xref: /openbsd-src/sys/dev/pci/drm/amd/display/amdgpu_dm/amdgpu_dm.c (revision a5429850edcc9dd5646cc8ddb251ed22eba08b09)
1 /*
2  * Copyright 2015 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: AMD
23  *
24  */
25 
26 /* The caprices of the preprocessor require that this be declared right here */
27 #define CREATE_TRACE_POINTS
28 
29 #include "dm_services_types.h"
30 #include "dc.h"
31 #include "dc_link_dp.h"
32 #include "dc/inc/core_types.h"
33 #include "dal_asic_id.h"
34 #include "dmub/dmub_srv.h"
35 #include "dc/inc/hw/dmcu.h"
36 #include "dc/inc/hw/abm.h"
37 #include "dc/dc_dmub_srv.h"
38 #include "dc/dc_edid_parser.h"
39 #include "dc/dc_stat.h"
40 #include "amdgpu_dm_trace.h"
41 
42 #include "vid.h"
43 #include "amdgpu.h"
44 #include "amdgpu_display.h"
45 #include "amdgpu_ucode.h"
46 #include "atom.h"
47 #include "amdgpu_dm.h"
48 #ifdef CONFIG_DRM_AMD_DC_HDCP
49 #include "amdgpu_dm_hdcp.h"
50 #include <drm/drm_hdcp.h>
51 #endif
52 #include "amdgpu_pm.h"
53 
54 #include "amd_shared.h"
55 #include "amdgpu_dm_irq.h"
56 #include "dm_helpers.h"
57 #include "amdgpu_dm_mst_types.h"
58 #if defined(CONFIG_DEBUG_FS)
59 #include "amdgpu_dm_debugfs.h"
60 #endif
61 #include "amdgpu_dm_psr.h"
62 
63 #include "ivsrcid/ivsrcid_vislands30.h"
64 
65 #include "i2caux_interface.h"
66 #include <linux/module.h>
67 #include <linux/moduleparam.h>
68 #include <linux/types.h>
69 #include <linux/pm_runtime.h>
70 #include <linux/pci.h>
71 #include <linux/firmware.h>
72 #include <linux/component.h>
73 #include <linux/dmi.h>
74 
75 #include <drm/drm_atomic.h>
76 #include <drm/drm_atomic_uapi.h>
77 #include <drm/drm_atomic_helper.h>
78 #include <drm/drm_dp_mst_helper.h>
79 #include <drm/drm_fb_helper.h>
80 #include <drm/drm_fourcc.h>
81 #include <drm/drm_edid.h>
82 #include <drm/drm_vblank.h>
83 #include <drm/drm_audio_component.h>
84 
85 #if defined(CONFIG_DRM_AMD_DC_DCN)
86 #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h"
87 
88 #include "dcn/dcn_1_0_offset.h"
89 #include "dcn/dcn_1_0_sh_mask.h"
90 #include "soc15_hw_ip.h"
91 #include "vega10_ip_offset.h"
92 
93 #include "soc15_common.h"
94 #endif
95 
96 #include "modules/inc/mod_freesync.h"
97 #include "modules/power/power_helpers.h"
98 #include "modules/inc/mod_info_packet.h"
99 
100 #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin"
101 MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
102 #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin"
103 MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB);
104 #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin"
105 MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB);
106 #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin"
107 MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB);
108 #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin"
109 MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB);
110 #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin"
111 MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB);
112 #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin"
113 MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB);
114 #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin"
115 MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB);
116 
117 #define FIRMWARE_RAVEN_DMCU		"amdgpu/raven_dmcu.bin"
118 MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
119 
120 #define FIRMWARE_NAVI12_DMCU            "amdgpu/navi12_dmcu.bin"
121 MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
122 
123 /* Number of bytes in PSP header for firmware. */
124 #define PSP_HEADER_BYTES 0x100
125 
126 /* Number of bytes in PSP footer for firmware. */
127 #define PSP_FOOTER_BYTES 0x100
128 
129 /**
130  * DOC: overview
131  *
132  * The AMDgpu display manager, **amdgpu_dm** (or even simpler,
133  * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM
134  * requests into DC requests, and DC responses into DRM responses.
135  *
136  * The root control structure is &struct amdgpu_display_manager.
137  */
138 
139 /* basic init/fini API */
140 static int amdgpu_dm_init(struct amdgpu_device *adev);
141 static void amdgpu_dm_fini(struct amdgpu_device *adev);
142 static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector);
143 
144 static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link)
145 {
146 	switch (link->dpcd_caps.dongle_type) {
147 	case DISPLAY_DONGLE_NONE:
148 		return DRM_MODE_SUBCONNECTOR_Native;
149 	case DISPLAY_DONGLE_DP_VGA_CONVERTER:
150 		return DRM_MODE_SUBCONNECTOR_VGA;
151 	case DISPLAY_DONGLE_DP_DVI_CONVERTER:
152 	case DISPLAY_DONGLE_DP_DVI_DONGLE:
153 		return DRM_MODE_SUBCONNECTOR_DVID;
154 	case DISPLAY_DONGLE_DP_HDMI_CONVERTER:
155 	case DISPLAY_DONGLE_DP_HDMI_DONGLE:
156 		return DRM_MODE_SUBCONNECTOR_HDMIA;
157 	case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE:
158 	default:
159 		return DRM_MODE_SUBCONNECTOR_Unknown;
160 	}
161 }
162 
163 static void update_subconnector_property(struct amdgpu_dm_connector *aconnector)
164 {
165 	struct dc_link *link = aconnector->dc_link;
166 	struct drm_connector *connector = &aconnector->base;
167 	enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown;
168 
169 	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
170 		return;
171 
172 	if (aconnector->dc_sink)
173 		subconnector = get_subconnector_type(link);
174 
175 	drm_object_property_set_value(&connector->base,
176 			connector->dev->mode_config.dp_subconnector_property,
177 			subconnector);
178 }
179 
180 /*
181  * initializes drm_device display related structures, based on the information
182  * provided by DAL. The drm strcutures are: drm_crtc, drm_connector,
183  * drm_encoder, drm_mode_config
184  *
185  * Returns 0 on success
186  */
187 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
188 /* removes and deallocates the drm structures, created by the above function */
189 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
190 
191 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
192 				struct drm_plane *plane,
193 				unsigned long possible_crtcs,
194 				const struct dc_plane_cap *plane_cap);
195 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
196 			       struct drm_plane *plane,
197 			       uint32_t link_index);
198 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
199 				    struct amdgpu_dm_connector *amdgpu_dm_connector,
200 				    uint32_t link_index,
201 				    struct amdgpu_encoder *amdgpu_encoder);
202 static int amdgpu_dm_encoder_init(struct drm_device *dev,
203 				  struct amdgpu_encoder *aencoder,
204 				  uint32_t link_index);
205 
206 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector);
207 
208 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state);
209 
210 static int amdgpu_dm_atomic_check(struct drm_device *dev,
211 				  struct drm_atomic_state *state);
212 
213 static void handle_cursor_update(struct drm_plane *plane,
214 				 struct drm_plane_state *old_plane_state);
215 
216 static const struct drm_format_info *
217 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd);
218 
219 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector);
220 
221 static bool
222 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
223 				 struct drm_crtc_state *new_crtc_state);
224 /*
225  * dm_vblank_get_counter
226  *
227  * @brief
228  * Get counter for number of vertical blanks
229  *
230  * @param
231  * struct amdgpu_device *adev - [in] desired amdgpu device
232  * int disp_idx - [in] which CRTC to get the counter from
233  *
234  * @return
235  * Counter for vertical blanks
236  */
237 static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc)
238 {
239 	if (crtc >= adev->mode_info.num_crtc)
240 		return 0;
241 	else {
242 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
243 
244 		if (acrtc->dm_irq_params.stream == NULL) {
245 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
246 				  crtc);
247 			return 0;
248 		}
249 
250 		return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream);
251 	}
252 }
253 
254 static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
255 				  u32 *vbl, u32 *position)
256 {
257 	uint32_t v_blank_start, v_blank_end, h_position, v_position;
258 
259 	if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
260 		return -EINVAL;
261 	else {
262 		struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc];
263 
264 		if (acrtc->dm_irq_params.stream ==  NULL) {
265 			DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n",
266 				  crtc);
267 			return 0;
268 		}
269 
270 		/*
271 		 * TODO rework base driver to use values directly.
272 		 * for now parse it back into reg-format
273 		 */
274 		dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream,
275 					 &v_blank_start,
276 					 &v_blank_end,
277 					 &h_position,
278 					 &v_position);
279 
280 		*position = v_position | (h_position << 16);
281 		*vbl = v_blank_start | (v_blank_end << 16);
282 	}
283 
284 	return 0;
285 }
286 
287 static bool dm_is_idle(void *handle)
288 {
289 	/* XXX todo */
290 	return true;
291 }
292 
293 static int dm_wait_for_idle(void *handle)
294 {
295 	/* XXX todo */
296 	return 0;
297 }
298 
299 static bool dm_check_soft_reset(void *handle)
300 {
301 	return false;
302 }
303 
304 static int dm_soft_reset(void *handle)
305 {
306 	/* XXX todo */
307 	return 0;
308 }
309 
310 static struct amdgpu_crtc *
311 get_crtc_by_otg_inst(struct amdgpu_device *adev,
312 		     int otg_inst)
313 {
314 	struct drm_device *dev = adev_to_drm(adev);
315 	struct drm_crtc *crtc;
316 	struct amdgpu_crtc *amdgpu_crtc;
317 
318 	if (WARN_ON(otg_inst == -1))
319 		return adev->mode_info.crtcs[0];
320 
321 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
322 		amdgpu_crtc = to_amdgpu_crtc(crtc);
323 
324 		if (amdgpu_crtc->otg_inst == otg_inst)
325 			return amdgpu_crtc;
326 	}
327 
328 	return NULL;
329 }
330 
331 static inline bool amdgpu_dm_vrr_active_irq(struct amdgpu_crtc *acrtc)
332 {
333 	return acrtc->dm_irq_params.freesync_config.state ==
334 		       VRR_STATE_ACTIVE_VARIABLE ||
335 	       acrtc->dm_irq_params.freesync_config.state ==
336 		       VRR_STATE_ACTIVE_FIXED;
337 }
338 
339 static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state)
340 {
341 	return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE ||
342 	       dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
343 }
344 
345 static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state,
346 					      struct dm_crtc_state *new_state)
347 {
348 	if (new_state->freesync_config.state ==  VRR_STATE_ACTIVE_FIXED)
349 		return true;
350 	else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state))
351 		return true;
352 	else
353 		return false;
354 }
355 
356 /**
357  * dm_pflip_high_irq() - Handle pageflip interrupt
358  * @interrupt_params: ignored
359  *
360  * Handles the pageflip interrupt by notifying all interested parties
361  * that the pageflip has been completed.
362  */
363 static void dm_pflip_high_irq(void *interrupt_params)
364 {
365 	struct amdgpu_crtc *amdgpu_crtc;
366 	struct common_irq_params *irq_params = interrupt_params;
367 	struct amdgpu_device *adev = irq_params->adev;
368 	unsigned long flags;
369 	struct drm_pending_vblank_event *e;
370 	uint32_t vpos, hpos, v_blank_start, v_blank_end;
371 	bool vrr_active;
372 
373 	amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP);
374 
375 	/* IRQ could occur when in initial stage */
376 	/* TODO work and BO cleanup */
377 	if (amdgpu_crtc == NULL) {
378 		DC_LOG_PFLIP("CRTC is null, returning.\n");
379 		return;
380 	}
381 
382 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
383 
384 	if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){
385 		DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n",
386 						 amdgpu_crtc->pflip_status,
387 						 AMDGPU_FLIP_SUBMITTED,
388 						 amdgpu_crtc->crtc_id,
389 						 amdgpu_crtc);
390 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
391 		return;
392 	}
393 
394 	/* page flip completed. */
395 	e = amdgpu_crtc->event;
396 	amdgpu_crtc->event = NULL;
397 
398 	WARN_ON(!e);
399 
400 	vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc);
401 
402 	/* Fixed refresh rate, or VRR scanout position outside front-porch? */
403 	if (!vrr_active ||
404 	    !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start,
405 				      &v_blank_end, &hpos, &vpos) ||
406 	    (vpos < v_blank_start)) {
407 		/* Update to correct count and vblank timestamp if racing with
408 		 * vblank irq. This also updates to the correct vblank timestamp
409 		 * even in VRR mode, as scanout is past the front-porch atm.
410 		 */
411 		drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
412 
413 		/* Wake up userspace by sending the pageflip event with proper
414 		 * count and timestamp of vblank of flip completion.
415 		 */
416 		if (e) {
417 			drm_crtc_send_vblank_event(&amdgpu_crtc->base, e);
418 
419 			/* Event sent, so done with vblank for this flip */
420 			drm_crtc_vblank_put(&amdgpu_crtc->base);
421 		}
422 	} else if (e) {
423 		/* VRR active and inside front-porch: vblank count and
424 		 * timestamp for pageflip event will only be up to date after
425 		 * drm_crtc_handle_vblank() has been executed from late vblank
426 		 * irq handler after start of back-porch (vline 0). We queue the
427 		 * pageflip event for send-out by drm_crtc_handle_vblank() with
428 		 * updated timestamp and count, once it runs after us.
429 		 *
430 		 * We need to open-code this instead of using the helper
431 		 * drm_crtc_arm_vblank_event(), as that helper would
432 		 * call drm_crtc_accurate_vblank_count(), which we must
433 		 * not call in VRR mode while we are in front-porch!
434 		 */
435 
436 		/* sequence will be replaced by real count during send-out. */
437 		e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base);
438 		e->pipe = amdgpu_crtc->crtc_id;
439 
440 		list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list);
441 		e = NULL;
442 	}
443 
444 	/* Keep track of vblank of this flip for flip throttling. We use the
445 	 * cooked hw counter, as that one incremented at start of this vblank
446 	 * of pageflip completion, so last_flip_vblank is the forbidden count
447 	 * for queueing new pageflips if vsync + VRR is enabled.
448 	 */
449 	amdgpu_crtc->dm_irq_params.last_flip_vblank =
450 		amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
451 
452 	amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
453 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
454 
455 	DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n",
456 		     amdgpu_crtc->crtc_id, amdgpu_crtc,
457 		     vrr_active, (int) !e);
458 }
459 
460 static void dm_vupdate_high_irq(void *interrupt_params)
461 {
462 	struct common_irq_params *irq_params = interrupt_params;
463 	struct amdgpu_device *adev = irq_params->adev;
464 	struct amdgpu_crtc *acrtc;
465 	struct drm_device *drm_dev;
466 	struct drm_vblank_crtc *vblank;
467 	ktime_t frame_duration_ns, previous_timestamp;
468 	unsigned long flags;
469 	int vrr_active;
470 
471 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE);
472 
473 	if (acrtc) {
474 		vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
475 		drm_dev = acrtc->base.dev;
476 		vblank = &drm_dev->vblank[acrtc->base.index];
477 		previous_timestamp = atomic64_read(&irq_params->previous_timestamp);
478 		frame_duration_ns = vblank->time - previous_timestamp;
479 
480 		if (frame_duration_ns > 0) {
481 			trace_amdgpu_refresh_rate_track(acrtc->base.index,
482 						frame_duration_ns,
483 						ktime_divns(NSEC_PER_SEC, frame_duration_ns));
484 			atomic64_set(&irq_params->previous_timestamp, vblank->time);
485 		}
486 
487 		DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n",
488 			      acrtc->crtc_id,
489 			      vrr_active);
490 
491 		/* Core vblank handling is done here after end of front-porch in
492 		 * vrr mode, as vblank timestamping will give valid results
493 		 * while now done after front-porch. This will also deliver
494 		 * page-flip completion events that have been queued to us
495 		 * if a pageflip happened inside front-porch.
496 		 */
497 		if (vrr_active) {
498 			drm_crtc_handle_vblank(&acrtc->base);
499 
500 			/* BTR processing for pre-DCE12 ASICs */
501 			if (acrtc->dm_irq_params.stream &&
502 			    adev->family < AMDGPU_FAMILY_AI) {
503 				spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
504 				mod_freesync_handle_v_update(
505 				    adev->dm.freesync_module,
506 				    acrtc->dm_irq_params.stream,
507 				    &acrtc->dm_irq_params.vrr_params);
508 
509 				dc_stream_adjust_vmin_vmax(
510 				    adev->dm.dc,
511 				    acrtc->dm_irq_params.stream,
512 				    &acrtc->dm_irq_params.vrr_params.adjust);
513 				spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
514 			}
515 		}
516 	}
517 }
518 
519 /**
520  * dm_crtc_high_irq() - Handles CRTC interrupt
521  * @interrupt_params: used for determining the CRTC instance
522  *
523  * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK
524  * event handler.
525  */
526 static void dm_crtc_high_irq(void *interrupt_params)
527 {
528 	struct common_irq_params *irq_params = interrupt_params;
529 	struct amdgpu_device *adev = irq_params->adev;
530 	struct amdgpu_crtc *acrtc;
531 	unsigned long flags;
532 	int vrr_active;
533 
534 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK);
535 	if (!acrtc)
536 		return;
537 
538 	vrr_active = amdgpu_dm_vrr_active_irq(acrtc);
539 
540 	DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id,
541 		      vrr_active, acrtc->dm_irq_params.active_planes);
542 
543 	/**
544 	 * Core vblank handling at start of front-porch is only possible
545 	 * in non-vrr mode, as only there vblank timestamping will give
546 	 * valid results while done in front-porch. Otherwise defer it
547 	 * to dm_vupdate_high_irq after end of front-porch.
548 	 */
549 	if (!vrr_active)
550 		drm_crtc_handle_vblank(&acrtc->base);
551 
552 	/**
553 	 * Following stuff must happen at start of vblank, for crc
554 	 * computation and below-the-range btr support in vrr mode.
555 	 */
556 	amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
557 
558 	/* BTR updates need to happen before VUPDATE on Vega and above. */
559 	if (adev->family < AMDGPU_FAMILY_AI)
560 		return;
561 
562 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
563 
564 	if (acrtc->dm_irq_params.stream &&
565 	    acrtc->dm_irq_params.vrr_params.supported &&
566 	    acrtc->dm_irq_params.freesync_config.state ==
567 		    VRR_STATE_ACTIVE_VARIABLE) {
568 		mod_freesync_handle_v_update(adev->dm.freesync_module,
569 					     acrtc->dm_irq_params.stream,
570 					     &acrtc->dm_irq_params.vrr_params);
571 
572 		dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream,
573 					   &acrtc->dm_irq_params.vrr_params.adjust);
574 	}
575 
576 	/*
577 	 * If there aren't any active_planes then DCH HUBP may be clock-gated.
578 	 * In that case, pageflip completion interrupts won't fire and pageflip
579 	 * completion events won't get delivered. Prevent this by sending
580 	 * pending pageflip events from here if a flip is still pending.
581 	 *
582 	 * If any planes are enabled, use dm_pflip_high_irq() instead, to
583 	 * avoid race conditions between flip programming and completion,
584 	 * which could cause too early flip completion events.
585 	 */
586 	if (adev->family >= AMDGPU_FAMILY_RV &&
587 	    acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED &&
588 	    acrtc->dm_irq_params.active_planes == 0) {
589 		if (acrtc->event) {
590 			drm_crtc_send_vblank_event(&acrtc->base, acrtc->event);
591 			acrtc->event = NULL;
592 			drm_crtc_vblank_put(&acrtc->base);
593 		}
594 		acrtc->pflip_status = AMDGPU_FLIP_NONE;
595 	}
596 
597 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
598 }
599 
600 #if defined(CONFIG_DRM_AMD_DC_DCN)
601 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
602 /**
603  * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for
604  * DCN generation ASICs
605  * @interrupt_params: interrupt parameters
606  *
607  * Used to set crc window/read out crc value at vertical line 0 position
608  */
609 static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params)
610 {
611 	struct common_irq_params *irq_params = interrupt_params;
612 	struct amdgpu_device *adev = irq_params->adev;
613 	struct amdgpu_crtc *acrtc;
614 
615 	acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0);
616 
617 	if (!acrtc)
618 		return;
619 
620 	amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base);
621 }
622 #endif
623 
624 /**
625  * dmub_aux_setconfig_reply_callback - Callback for AUX or SET_CONFIG command.
626  * @adev: amdgpu_device pointer
627  * @notify: dmub notification structure
628  *
629  * Dmub AUX or SET_CONFIG command completion processing callback
630  * Copies dmub notification to DM which is to be read by AUX command.
631  * issuing thread and also signals the event to wake up the thread.
632  */
633 void dmub_aux_setconfig_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
634 {
635 	if (adev->dm.dmub_notify)
636 		memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification));
637 	if (notify->type == DMUB_NOTIFICATION_AUX_REPLY)
638 		complete(&adev->dm.dmub_aux_transfer_done);
639 }
640 
641 /**
642  * dmub_hpd_callback - DMUB HPD interrupt processing callback.
643  * @adev: amdgpu_device pointer
644  * @notify: dmub notification structure
645  *
646  * Dmub Hpd interrupt processing callback. Gets displayindex through the
647  * ink index and calls helper to do the processing.
648  */
649 void dmub_hpd_callback(struct amdgpu_device *adev, struct dmub_notification *notify)
650 {
651 	struct amdgpu_dm_connector *aconnector;
652 	struct drm_connector *connector;
653 	struct drm_connector_list_iter iter;
654 	struct dc_link *link;
655 	uint8_t link_index = 0;
656 	struct drm_device *dev;
657 
658 	if (adev == NULL)
659 		return;
660 
661 	if (notify == NULL) {
662 		DRM_ERROR("DMUB HPD callback notification was NULL");
663 		return;
664 	}
665 
666 	if (notify->link_index > adev->dm.dc->link_count) {
667 		DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index);
668 		return;
669 	}
670 
671 	link_index = notify->link_index;
672 	link = adev->dm.dc->links[link_index];
673 	dev = adev->dm.ddev;
674 
675 	drm_connector_list_iter_begin(dev, &iter);
676 	drm_for_each_connector_iter(connector, &iter) {
677 		aconnector = to_amdgpu_dm_connector(connector);
678 		if (link && aconnector->dc_link == link) {
679 			DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index);
680 			handle_hpd_irq_helper(aconnector);
681 			break;
682 		}
683 	}
684 	drm_connector_list_iter_end(&iter);
685 
686 }
687 
688 /**
689  * register_dmub_notify_callback - Sets callback for DMUB notify
690  * @adev: amdgpu_device pointer
691  * @type: Type of dmub notification
692  * @callback: Dmub interrupt callback function
693  * @dmub_int_thread_offload: offload indicator
694  *
695  * API to register a dmub callback handler for a dmub notification
696  * Also sets indicator whether callback processing to be offloaded.
697  * to dmub interrupt handling thread
698  * Return: true if successfully registered, false if there is existing registration
699  */
700 bool register_dmub_notify_callback(struct amdgpu_device *adev, enum dmub_notification_type type,
701 dmub_notify_interrupt_callback_t callback, bool dmub_int_thread_offload)
702 {
703 	if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) {
704 		adev->dm.dmub_callback[type] = callback;
705 		adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload;
706 	} else
707 		return false;
708 
709 	return true;
710 }
711 
712 static void dm_handle_hpd_work(struct work_struct *work)
713 {
714 	struct dmub_hpd_work *dmub_hpd_wrk;
715 
716 	dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work);
717 
718 	if (!dmub_hpd_wrk->dmub_notify) {
719 		DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL");
720 		return;
721 	}
722 
723 	if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) {
724 		dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev,
725 		dmub_hpd_wrk->dmub_notify);
726 	}
727 	kfree(dmub_hpd_wrk);
728 
729 }
730 
731 #define DMUB_TRACE_MAX_READ 64
732 /**
733  * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt
734  * @interrupt_params: used for determining the Outbox instance
735  *
736  * Handles the Outbox Interrupt
737  * event handler.
738  */
739 static void dm_dmub_outbox1_low_irq(void *interrupt_params)
740 {
741 	struct dmub_notification notify;
742 	struct common_irq_params *irq_params = interrupt_params;
743 	struct amdgpu_device *adev = irq_params->adev;
744 	struct amdgpu_display_manager *dm = &adev->dm;
745 	struct dmcub_trace_buf_entry entry = { 0 };
746 	uint32_t count = 0;
747 	struct dmub_hpd_work *dmub_hpd_wrk;
748 
749 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
750 		dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC);
751 		if (!dmub_hpd_wrk) {
752 			DRM_ERROR("Failed to allocate dmub_hpd_wrk");
753 			return;
754 		}
755 		INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work);
756 
757 		if (irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) {
758 			do {
759 				dc_stat_get_dmub_notification(adev->dm.dc, &notify);
760 				if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) {
761 					DRM_ERROR("DM: notify type %d larger than the array size %zu!", notify.type,
762 					ARRAY_SIZE(dm->dmub_thread_offload));
763 					continue;
764 				}
765 				if (dm->dmub_thread_offload[notify.type] == true) {
766 					dmub_hpd_wrk->dmub_notify = &notify;
767 					dmub_hpd_wrk->adev = adev;
768 					queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work);
769 				} else {
770 					dm->dmub_callback[notify.type](adev, &notify);
771 				}
772 
773 			} while (notify.pending_notification);
774 
775 		} else {
776 			DRM_ERROR("DM: Failed to receive correct outbox IRQ !");
777 		}
778 	}
779 
780 
781 	do {
782 		if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) {
783 			trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count,
784 							entry.param0, entry.param1);
785 
786 			DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n",
787 				 entry.trace_code, entry.tick_count, entry.param0, entry.param1);
788 		} else
789 			break;
790 
791 		count++;
792 
793 	} while (count <= DMUB_TRACE_MAX_READ);
794 
795 	ASSERT(count <= DMUB_TRACE_MAX_READ);
796 }
797 #endif
798 
799 static int dm_set_clockgating_state(void *handle,
800 		  enum amd_clockgating_state state)
801 {
802 	return 0;
803 }
804 
805 static int dm_set_powergating_state(void *handle,
806 		  enum amd_powergating_state state)
807 {
808 	return 0;
809 }
810 
811 /* Prototypes of private functions */
812 static int dm_early_init(void* handle);
813 
814 /* Allocate memory for FBC compressed data  */
815 static void amdgpu_dm_fbc_init(struct drm_connector *connector)
816 {
817 	struct drm_device *dev = connector->dev;
818 	struct amdgpu_device *adev = drm_to_adev(dev);
819 	struct dm_compressor_info *compressor = &adev->dm.compressor;
820 	struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector);
821 	struct drm_display_mode *mode;
822 	unsigned long max_size = 0;
823 
824 	if (adev->dm.dc->fbc_compressor == NULL)
825 		return;
826 
827 	if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP)
828 		return;
829 
830 	if (compressor->bo_ptr)
831 		return;
832 
833 
834 	list_for_each_entry(mode, &connector->modes, head) {
835 		if (max_size < mode->htotal * mode->vtotal)
836 			max_size = mode->htotal * mode->vtotal;
837 	}
838 
839 	if (max_size) {
840 		int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE,
841 			    AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr,
842 			    &compressor->gpu_addr, &compressor->cpu_addr);
843 
844 		if (r)
845 			DRM_ERROR("DM: Failed to initialize FBC\n");
846 		else {
847 			adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr;
848 			DRM_INFO("DM: FBC alloc %lu\n", max_size*4);
849 		}
850 
851 	}
852 
853 }
854 
855 static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port,
856 					  int pipe, bool *enabled,
857 					  unsigned char *buf, int max_bytes)
858 {
859 	struct drm_device *dev = dev_get_drvdata(kdev);
860 	struct amdgpu_device *adev = drm_to_adev(dev);
861 	struct drm_connector *connector;
862 	struct drm_connector_list_iter conn_iter;
863 	struct amdgpu_dm_connector *aconnector;
864 	int ret = 0;
865 
866 	*enabled = false;
867 
868 	mutex_lock(&adev->dm.audio_lock);
869 
870 	drm_connector_list_iter_begin(dev, &conn_iter);
871 	drm_for_each_connector_iter(connector, &conn_iter) {
872 		aconnector = to_amdgpu_dm_connector(connector);
873 		if (aconnector->audio_inst != port)
874 			continue;
875 
876 		*enabled = true;
877 		ret = drm_eld_size(connector->eld);
878 		memcpy(buf, connector->eld, min(max_bytes, ret));
879 
880 		break;
881 	}
882 	drm_connector_list_iter_end(&conn_iter);
883 
884 	mutex_unlock(&adev->dm.audio_lock);
885 
886 	DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled);
887 
888 	return ret;
889 }
890 
891 static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = {
892 	.get_eld = amdgpu_dm_audio_component_get_eld,
893 };
894 
895 static int amdgpu_dm_audio_component_bind(struct device *kdev,
896 				       struct device *hda_kdev, void *data)
897 {
898 	struct drm_device *dev = dev_get_drvdata(kdev);
899 	struct amdgpu_device *adev = drm_to_adev(dev);
900 	struct drm_audio_component *acomp = data;
901 
902 	acomp->ops = &amdgpu_dm_audio_component_ops;
903 	acomp->dev = kdev;
904 	adev->dm.audio_component = acomp;
905 
906 	return 0;
907 }
908 
909 static void amdgpu_dm_audio_component_unbind(struct device *kdev,
910 					  struct device *hda_kdev, void *data)
911 {
912 	struct drm_device *dev = dev_get_drvdata(kdev);
913 	struct amdgpu_device *adev = drm_to_adev(dev);
914 	struct drm_audio_component *acomp = data;
915 
916 	acomp->ops = NULL;
917 	acomp->dev = NULL;
918 	adev->dm.audio_component = NULL;
919 }
920 
921 #ifdef notyet
922 static const struct component_ops amdgpu_dm_audio_component_bind_ops = {
923 	.bind	= amdgpu_dm_audio_component_bind,
924 	.unbind	= amdgpu_dm_audio_component_unbind,
925 };
926 #endif
927 
928 static int amdgpu_dm_audio_init(struct amdgpu_device *adev)
929 {
930 	int i, ret;
931 
932 	if (!amdgpu_audio)
933 		return 0;
934 
935 	adev->mode_info.audio.enabled = true;
936 
937 	adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count;
938 
939 	for (i = 0; i < adev->mode_info.audio.num_pins; i++) {
940 		adev->mode_info.audio.pin[i].channels = -1;
941 		adev->mode_info.audio.pin[i].rate = -1;
942 		adev->mode_info.audio.pin[i].bits_per_sample = -1;
943 		adev->mode_info.audio.pin[i].status_bits = 0;
944 		adev->mode_info.audio.pin[i].category_code = 0;
945 		adev->mode_info.audio.pin[i].connected = false;
946 		adev->mode_info.audio.pin[i].id =
947 			adev->dm.dc->res_pool->audios[i]->inst;
948 		adev->mode_info.audio.pin[i].offset = 0;
949 	}
950 
951 	ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops);
952 	if (ret < 0)
953 		return ret;
954 
955 	adev->dm.audio_registered = true;
956 
957 	return 0;
958 }
959 
960 static void amdgpu_dm_audio_fini(struct amdgpu_device *adev)
961 {
962 	if (!amdgpu_audio)
963 		return;
964 
965 	if (!adev->mode_info.audio.enabled)
966 		return;
967 
968 	if (adev->dm.audio_registered) {
969 		component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops);
970 		adev->dm.audio_registered = false;
971 	}
972 
973 	/* TODO: Disable audio? */
974 
975 	adev->mode_info.audio.enabled = false;
976 }
977 
978 static  void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin)
979 {
980 	struct drm_audio_component *acomp = adev->dm.audio_component;
981 
982 	if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
983 		DRM_DEBUG_KMS("Notify ELD: %d\n", pin);
984 
985 		acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
986 						 pin, -1);
987 	}
988 }
989 
990 static int dm_dmub_hw_init(struct amdgpu_device *adev)
991 {
992 	const struct dmcub_firmware_header_v1_0 *hdr;
993 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
994 	struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
995 	const struct firmware *dmub_fw = adev->dm.dmub_fw;
996 	struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu;
997 	struct abm *abm = adev->dm.dc->res_pool->abm;
998 	struct dmub_srv_hw_params hw_params;
999 	enum dmub_status status;
1000 	const unsigned char *fw_inst_const, *fw_bss_data;
1001 	uint32_t i, fw_inst_const_size, fw_bss_data_size;
1002 	bool has_hw_support;
1003 
1004 	if (!dmub_srv)
1005 		/* DMUB isn't supported on the ASIC. */
1006 		return 0;
1007 
1008 	if (!fb_info) {
1009 		DRM_ERROR("No framebuffer info for DMUB service.\n");
1010 		return -EINVAL;
1011 	}
1012 
1013 	if (!dmub_fw) {
1014 		/* Firmware required for DMUB support. */
1015 		DRM_ERROR("No firmware provided for DMUB.\n");
1016 		return -EINVAL;
1017 	}
1018 
1019 	status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support);
1020 	if (status != DMUB_STATUS_OK) {
1021 		DRM_ERROR("Error checking HW support for DMUB: %d\n", status);
1022 		return -EINVAL;
1023 	}
1024 
1025 	if (!has_hw_support) {
1026 		DRM_INFO("DMUB unsupported on ASIC\n");
1027 		return 0;
1028 	}
1029 
1030 	/* Reset DMCUB if it was previously running - before we overwrite its memory. */
1031 	status = dmub_srv_hw_reset(dmub_srv);
1032 	if (status != DMUB_STATUS_OK)
1033 		DRM_WARN("Error resetting DMUB HW: %d\n", status);
1034 
1035 	hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data;
1036 
1037 	fw_inst_const = dmub_fw->data +
1038 			le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1039 			PSP_HEADER_BYTES;
1040 
1041 	fw_bss_data = dmub_fw->data +
1042 		      le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1043 		      le32_to_cpu(hdr->inst_const_bytes);
1044 
1045 	/* Copy firmware and bios info into FB memory. */
1046 	fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1047 			     PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1048 
1049 	fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1050 
1051 	/* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
1052 	 * amdgpu_ucode_init_single_fw will load dmub firmware
1053 	 * fw_inst_const part to cw0; otherwise, the firmware back door load
1054 	 * will be done by dm_dmub_hw_init
1055 	 */
1056 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1057 		memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
1058 				fw_inst_const_size);
1059 	}
1060 
1061 	if (fw_bss_data_size)
1062 		memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr,
1063 		       fw_bss_data, fw_bss_data_size);
1064 
1065 	/* Copy firmware bios info into FB memory. */
1066 	memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
1067 	       adev->bios_size);
1068 
1069 	/* Reset regions that need to be reset. */
1070 	memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0,
1071 	fb_info->fb[DMUB_WINDOW_4_MAILBOX].size);
1072 
1073 	memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0,
1074 	       fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size);
1075 
1076 	memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0,
1077 	       fb_info->fb[DMUB_WINDOW_6_FW_STATE].size);
1078 
1079 	/* Initialize hardware. */
1080 	memset(&hw_params, 0, sizeof(hw_params));
1081 	hw_params.fb_base = adev->gmc.fb_start;
1082 	hw_params.fb_offset = adev->gmc.aper_base;
1083 
1084 	/* backdoor load firmware and trigger dmub running */
1085 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
1086 		hw_params.load_inst_const = true;
1087 
1088 	if (dmcu)
1089 		hw_params.psp_version = dmcu->psp_version;
1090 
1091 	for (i = 0; i < fb_info->num_fb; ++i)
1092 		hw_params.fb[i] = &fb_info->fb[i];
1093 
1094 	status = dmub_srv_hw_init(dmub_srv, &hw_params);
1095 	if (status != DMUB_STATUS_OK) {
1096 		DRM_ERROR("Error initializing DMUB HW: %d\n", status);
1097 		return -EINVAL;
1098 	}
1099 
1100 	/* Wait for firmware load to finish. */
1101 	status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1102 	if (status != DMUB_STATUS_OK)
1103 		DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1104 
1105 	/* Init DMCU and ABM if available. */
1106 	if (dmcu && abm) {
1107 		dmcu->funcs->dmcu_init(dmcu);
1108 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1109 	}
1110 
1111 	if (!adev->dm.dc->ctx->dmub_srv)
1112 		adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv);
1113 	if (!adev->dm.dc->ctx->dmub_srv) {
1114 		DRM_ERROR("Couldn't allocate DC DMUB server!\n");
1115 		return -ENOMEM;
1116 	}
1117 
1118 	DRM_INFO("DMUB hardware initialized: version=0x%08X\n",
1119 		 adev->dm.dmcub_fw_version);
1120 
1121 	return 0;
1122 }
1123 
1124 static void dm_dmub_hw_resume(struct amdgpu_device *adev)
1125 {
1126 	struct dmub_srv *dmub_srv = adev->dm.dmub_srv;
1127 	enum dmub_status status;
1128 	bool init;
1129 
1130 	if (!dmub_srv) {
1131 		/* DMUB isn't supported on the ASIC. */
1132 		return;
1133 	}
1134 
1135 	status = dmub_srv_is_hw_init(dmub_srv, &init);
1136 	if (status != DMUB_STATUS_OK)
1137 		DRM_WARN("DMUB hardware init check failed: %d\n", status);
1138 
1139 	if (status == DMUB_STATUS_OK && init) {
1140 		/* Wait for firmware load to finish. */
1141 		status = dmub_srv_wait_for_auto_load(dmub_srv, 100000);
1142 		if (status != DMUB_STATUS_OK)
1143 			DRM_WARN("Wait for DMUB auto-load failed: %d\n", status);
1144 	} else {
1145 		/* Perform the full hardware initialization. */
1146 		dm_dmub_hw_init(adev);
1147 	}
1148 }
1149 
1150 #if defined(CONFIG_DRM_AMD_DC_DCN)
1151 static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config)
1152 {
1153 	uint64_t pt_base;
1154 	uint32_t logical_addr_low;
1155 	uint32_t logical_addr_high;
1156 	uint32_t agp_base, agp_bot, agp_top;
1157 	PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base;
1158 
1159 	memset(pa_config, 0, sizeof(*pa_config));
1160 
1161 	logical_addr_low  = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18;
1162 	pt_base = amdgpu_gmc_pd_addr(adev->gart.bo);
1163 
1164 	if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1165 		/*
1166 		 * Raven2 has a HW issue that it is unable to use the vram which
1167 		 * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the
1168 		 * workaround that increase system aperture high address (add 1)
1169 		 * to get rid of the VM fault and hardware hang.
1170 		 */
1171 		logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18);
1172 	else
1173 		logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18;
1174 
1175 	agp_base = 0;
1176 	agp_bot = adev->gmc.agp_start >> 24;
1177 	agp_top = adev->gmc.agp_end >> 24;
1178 
1179 
1180 	page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF;
1181 	page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12);
1182 	page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF;
1183 	page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12);
1184 	page_table_base.high_part = upper_32_bits(pt_base) & 0xF;
1185 	page_table_base.low_part = lower_32_bits(pt_base);
1186 
1187 	pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18;
1188 	pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18;
1189 
1190 	pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ;
1191 	pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24;
1192 	pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24;
1193 
1194 	pa_config->system_aperture.fb_base = adev->gmc.fb_start;
1195 	pa_config->system_aperture.fb_offset = adev->gmc.aper_base;
1196 	pa_config->system_aperture.fb_top = adev->gmc.fb_end;
1197 
1198 	pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12;
1199 	pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12;
1200 	pa_config->gart_config.page_table_base_addr = page_table_base.quad_part;
1201 
1202 	pa_config->is_hvm_enabled = 0;
1203 
1204 }
1205 #endif
1206 #if defined(CONFIG_DRM_AMD_DC_DCN)
1207 static void vblank_control_worker(struct work_struct *work)
1208 {
1209 	struct vblank_control_work *vblank_work =
1210 		container_of(work, struct vblank_control_work, work);
1211 	struct amdgpu_display_manager *dm = vblank_work->dm;
1212 
1213 	mutex_lock(&dm->dc_lock);
1214 
1215 	if (vblank_work->enable)
1216 		dm->active_vblank_irq_count++;
1217 	else if(dm->active_vblank_irq_count)
1218 		dm->active_vblank_irq_count--;
1219 
1220 	dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0);
1221 
1222 	DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0);
1223 
1224 	/* Control PSR based on vblank requirements from OS */
1225 	if (vblank_work->stream && vblank_work->stream->link) {
1226 		if (vblank_work->enable) {
1227 			if (vblank_work->stream->link->psr_settings.psr_allow_active)
1228 				amdgpu_dm_psr_disable(vblank_work->stream);
1229 		} else if (vblank_work->stream->link->psr_settings.psr_feature_enabled &&
1230 			   !vblank_work->stream->link->psr_settings.psr_allow_active &&
1231 			   vblank_work->acrtc->dm_irq_params.allow_psr_entry) {
1232 			amdgpu_dm_psr_enable(vblank_work->stream);
1233 		}
1234 	}
1235 
1236 	mutex_unlock(&dm->dc_lock);
1237 
1238 	dc_stream_release(vblank_work->stream);
1239 
1240 	kfree(vblank_work);
1241 }
1242 
1243 #endif
1244 
1245 static void dm_handle_hpd_rx_offload_work(struct work_struct *work)
1246 {
1247 	struct hpd_rx_irq_offload_work *offload_work;
1248 	struct amdgpu_dm_connector *aconnector;
1249 	struct dc_link *dc_link;
1250 	struct amdgpu_device *adev;
1251 	enum dc_connection_type new_connection_type = dc_connection_none;
1252 	unsigned long flags;
1253 
1254 	offload_work = container_of(work, struct hpd_rx_irq_offload_work, work);
1255 	aconnector = offload_work->offload_wq->aconnector;
1256 
1257 	if (!aconnector) {
1258 		DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work");
1259 		goto skip;
1260 	}
1261 
1262 	adev = drm_to_adev(aconnector->base.dev);
1263 	dc_link = aconnector->dc_link;
1264 
1265 	mutex_lock(&aconnector->hpd_lock);
1266 	if (!dc_link_detect_sink(dc_link, &new_connection_type))
1267 		DRM_ERROR("KMS: Failed to detect connector\n");
1268 	mutex_unlock(&aconnector->hpd_lock);
1269 
1270 	if (new_connection_type == dc_connection_none)
1271 		goto skip;
1272 
1273 	if (amdgpu_in_reset(adev))
1274 		goto skip;
1275 
1276 	mutex_lock(&adev->dm.dc_lock);
1277 	if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST)
1278 		dc_link_dp_handle_automated_test(dc_link);
1279 	else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) &&
1280 			hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) &&
1281 			dc_link_dp_allow_hpd_rx_irq(dc_link)) {
1282 		dc_link_dp_handle_link_loss(dc_link);
1283 		spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags);
1284 		offload_work->offload_wq->is_handling_link_loss = false;
1285 		spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags);
1286 	}
1287 	mutex_unlock(&adev->dm.dc_lock);
1288 
1289 skip:
1290 	kfree(offload_work);
1291 
1292 }
1293 
1294 static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc)
1295 {
1296 	int max_caps = dc->caps.max_links;
1297 	int i = 0;
1298 	struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL;
1299 
1300 	hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL);
1301 
1302 	if (!hpd_rx_offload_wq)
1303 		return NULL;
1304 
1305 
1306 	for (i = 0; i < max_caps; i++) {
1307 		hpd_rx_offload_wq[i].wq =
1308 				    create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq");
1309 
1310 		if (hpd_rx_offload_wq[i].wq == NULL) {
1311 			DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!");
1312 			return NULL;
1313 		}
1314 
1315 		mtx_init(&hpd_rx_offload_wq[i].offload_lock, IPL_TTY);
1316 	}
1317 
1318 	return hpd_rx_offload_wq;
1319 }
1320 
1321 struct amdgpu_stutter_quirk {
1322 	u16 chip_vendor;
1323 	u16 chip_device;
1324 	u16 subsys_vendor;
1325 	u16 subsys_device;
1326 	u8 revision;
1327 };
1328 
1329 static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = {
1330 	/* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */
1331 	{ 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 },
1332 	{ 0, 0, 0, 0, 0 },
1333 };
1334 
1335 static bool dm_should_disable_stutter(struct pci_dev *pdev)
1336 {
1337 	const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list;
1338 
1339 	while (p && p->chip_device != 0) {
1340 		if (pdev->vendor == p->chip_vendor &&
1341 		    pdev->device == p->chip_device &&
1342 		    pdev->subsystem_vendor == p->subsys_vendor &&
1343 		    pdev->subsystem_device == p->subsys_device &&
1344 		    pdev->revision == p->revision) {
1345 			return true;
1346 		}
1347 		++p;
1348 	}
1349 	return false;
1350 }
1351 
1352 static const struct dmi_system_id hpd_disconnect_quirk_table[] = {
1353 	{
1354 		.matches = {
1355 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1356 			DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
1357 		},
1358 	},
1359 	{
1360 		.matches = {
1361 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1362 			DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
1363 		},
1364 	},
1365 	{
1366 		.matches = {
1367 			DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
1368 			DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
1369 		},
1370 	},
1371 	{}
1372 };
1373 
1374 static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
1375 {
1376 	const struct dmi_system_id *dmi_id;
1377 
1378 	dm->aux_hpd_discon_quirk = false;
1379 
1380 	dmi_id = dmi_first_match(hpd_disconnect_quirk_table);
1381 	if (dmi_id) {
1382 		dm->aux_hpd_discon_quirk = true;
1383 		DRM_INFO("aux_hpd_discon_quirk attached\n");
1384 	}
1385 }
1386 
1387 static int amdgpu_dm_init(struct amdgpu_device *adev)
1388 {
1389 	struct dc_init_data init_data;
1390 #ifdef CONFIG_DRM_AMD_DC_HDCP
1391 	struct dc_callback_init init_params;
1392 #endif
1393 	int r;
1394 
1395 	adev->dm.ddev = adev_to_drm(adev);
1396 	adev->dm.adev = adev;
1397 
1398 	/* Zero all the fields */
1399 	memset(&init_data, 0, sizeof(init_data));
1400 #ifdef CONFIG_DRM_AMD_DC_HDCP
1401 	memset(&init_params, 0, sizeof(init_params));
1402 #endif
1403 
1404 	rw_init(&adev->dm.dc_lock, "dmdc");
1405 	rw_init(&adev->dm.audio_lock, "dmaud");
1406 #if defined(CONFIG_DRM_AMD_DC_DCN)
1407 	mtx_init(&adev->dm.vblank_lock, IPL_TTY);
1408 #endif
1409 
1410 	if(amdgpu_dm_irq_init(adev)) {
1411 		DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n");
1412 		goto error;
1413 	}
1414 
1415 	init_data.asic_id.chip_family = adev->family;
1416 
1417 	init_data.asic_id.pci_revision_id = adev->pdev->revision;
1418 	init_data.asic_id.hw_internal_rev = adev->external_rev_id;
1419 	init_data.asic_id.chip_id = adev->pdev->device;
1420 
1421 	init_data.asic_id.vram_width = adev->gmc.vram_width;
1422 	/* TODO: initialize init_data.asic_id.vram_type here!!!! */
1423 	init_data.asic_id.atombios_base_address =
1424 		adev->mode_info.atom_context->bios;
1425 
1426 	init_data.driver = adev;
1427 
1428 	adev->dm.cgs_device = amdgpu_cgs_create_device(adev);
1429 
1430 	if (!adev->dm.cgs_device) {
1431 		DRM_ERROR("amdgpu: failed to create cgs device.\n");
1432 		goto error;
1433 	}
1434 
1435 	init_data.cgs_device = adev->dm.cgs_device;
1436 
1437 	init_data.dce_environment = DCE_ENV_PRODUCTION_DRV;
1438 
1439 	switch (adev->asic_type) {
1440 	case CHIP_CARRIZO:
1441 	case CHIP_STONEY:
1442 	case CHIP_RAVEN:
1443 	case CHIP_RENOIR:
1444 		init_data.flags.gpu_vm_support = true;
1445 		switch (adev->dm.dmcub_fw_version) {
1446 		case 0: /* development */
1447 		case 0x1: /* linux-firmware.git hash 6d9f399 */
1448 		case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */
1449 			init_data.flags.disable_dmcu = false;
1450 			break;
1451 		default:
1452 			init_data.flags.disable_dmcu = true;
1453 		}
1454 		break;
1455 	case CHIP_VANGOGH:
1456 	case CHIP_YELLOW_CARP:
1457 		init_data.flags.gpu_vm_support = true;
1458 		break;
1459 	default:
1460 		break;
1461 	}
1462 
1463 	if (amdgpu_dc_feature_mask & DC_FBC_MASK)
1464 		init_data.flags.fbc_support = true;
1465 
1466 	if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK)
1467 		init_data.flags.multi_mon_pp_mclk_switch = true;
1468 
1469 	if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK)
1470 		init_data.flags.disable_fractional_pwm = true;
1471 
1472 	if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING)
1473 		init_data.flags.edp_no_power_sequencing = true;
1474 
1475 	init_data.flags.power_down_display_on_boot = true;
1476 
1477 	INIT_LIST_HEAD(&adev->dm.da_list);
1478 
1479 	retrieve_dmi_info(&adev->dm);
1480 
1481 	/* Display Core create. */
1482 	adev->dm.dc = dc_create(&init_data);
1483 
1484 	if (adev->dm.dc) {
1485 		DRM_INFO("Display Core initialized with v%s!\n", DC_VER);
1486 	} else {
1487 		DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER);
1488 		goto error;
1489 	}
1490 
1491 	if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) {
1492 		adev->dm.dc->debug.force_single_disp_pipe_split = false;
1493 		adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID;
1494 	}
1495 
1496 	if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
1497 		adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
1498 	if (dm_should_disable_stutter(adev->pdev))
1499 		adev->dm.dc->debug.disable_stutter = true;
1500 
1501 	if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER)
1502 		adev->dm.dc->debug.disable_stutter = true;
1503 
1504 	if (amdgpu_dc_debug_mask & DC_DISABLE_DSC)
1505 		adev->dm.dc->debug.disable_dsc = true;
1506 
1507 	if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING)
1508 		adev->dm.dc->debug.disable_clock_gate = true;
1509 
1510 	r = dm_dmub_hw_init(adev);
1511 	if (r) {
1512 		DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
1513 		goto error;
1514 	}
1515 
1516 	dc_hardware_init(adev->dm.dc);
1517 
1518 	adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc);
1519 	if (!adev->dm.hpd_rx_offload_wq) {
1520 		DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n");
1521 		goto error;
1522 	}
1523 
1524 #if defined(CONFIG_DRM_AMD_DC_DCN)
1525 	if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) {
1526 		struct dc_phy_addr_space_config pa_config;
1527 
1528 		mmhub_read_system_context(adev, &pa_config);
1529 
1530 		// Call the DC init_memory func
1531 		dc_setup_system_context(adev->dm.dc, &pa_config);
1532 	}
1533 #endif
1534 
1535 	adev->dm.freesync_module = mod_freesync_create(adev->dm.dc);
1536 	if (!adev->dm.freesync_module) {
1537 		DRM_ERROR(
1538 		"amdgpu: failed to initialize freesync_module.\n");
1539 	} else
1540 		DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n",
1541 				adev->dm.freesync_module);
1542 
1543 	amdgpu_dm_init_color_mod();
1544 
1545 #if defined(CONFIG_DRM_AMD_DC_DCN)
1546 	if (adev->dm.dc->caps.max_links > 0) {
1547 		adev->dm.vblank_control_workqueue =
1548 			create_singlethread_workqueue("dm_vblank_control_workqueue");
1549 		if (!adev->dm.vblank_control_workqueue)
1550 			DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n");
1551 	}
1552 #endif
1553 
1554 #ifdef CONFIG_DRM_AMD_DC_HDCP
1555 	if (adev->dm.dc->caps.max_links > 0 && adev->asic_type >= CHIP_RAVEN) {
1556 		adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
1557 
1558 		if (!adev->dm.hdcp_workqueue)
1559 			DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
1560 		else
1561 			DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue);
1562 
1563 		dc_init_callbacks(adev->dm.dc, &init_params);
1564 	}
1565 #endif
1566 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1567 	adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work();
1568 #endif
1569 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1570 		init_completion(&adev->dm.dmub_aux_transfer_done);
1571 		adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL);
1572 		if (!adev->dm.dmub_notify) {
1573 			DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify");
1574 			goto error;
1575 		}
1576 
1577 		adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq");
1578 		if (!adev->dm.delayed_hpd_wq) {
1579 			DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n");
1580 			goto error;
1581 		}
1582 
1583 		amdgpu_dm_outbox_init(adev);
1584 #if defined(CONFIG_DRM_AMD_DC_DCN)
1585 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY,
1586 			dmub_aux_setconfig_callback, false)) {
1587 			DRM_ERROR("amdgpu: fail to register dmub aux callback");
1588 			goto error;
1589 		}
1590 		if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) {
1591 			DRM_ERROR("amdgpu: fail to register dmub hpd callback");
1592 			goto error;
1593 		}
1594 #endif
1595 	}
1596 
1597 	if (amdgpu_dm_initialize_drm_device(adev)) {
1598 		DRM_ERROR(
1599 		"amdgpu: failed to initialize sw for display support.\n");
1600 		goto error;
1601 	}
1602 
1603 	/* create fake encoders for MST */
1604 	dm_dp_create_fake_mst_encoders(adev);
1605 
1606 	/* TODO: Add_display_info? */
1607 
1608 	/* TODO use dynamic cursor width */
1609 	adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size;
1610 	adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size;
1611 
1612 	if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) {
1613 		DRM_ERROR(
1614 		"amdgpu: failed to initialize sw for display support.\n");
1615 		goto error;
1616 	}
1617 
1618 
1619 	DRM_DEBUG_DRIVER("KMS initialized.\n");
1620 
1621 	return 0;
1622 error:
1623 	amdgpu_dm_fini(adev);
1624 
1625 	return -EINVAL;
1626 }
1627 
1628 static int amdgpu_dm_early_fini(void *handle)
1629 {
1630 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1631 
1632 	amdgpu_dm_audio_fini(adev);
1633 
1634 	return 0;
1635 }
1636 
1637 static void amdgpu_dm_fini(struct amdgpu_device *adev)
1638 {
1639 	int i;
1640 
1641 #if defined(CONFIG_DRM_AMD_DC_DCN)
1642 	if (adev->dm.vblank_control_workqueue) {
1643 		destroy_workqueue(adev->dm.vblank_control_workqueue);
1644 		adev->dm.vblank_control_workqueue = NULL;
1645 	}
1646 #endif
1647 
1648 	for (i = 0; i < adev->dm.display_indexes_num; i++) {
1649 		drm_encoder_cleanup(&adev->dm.mst_encoders[i].base);
1650 	}
1651 
1652 	amdgpu_dm_destroy_drm_device(&adev->dm);
1653 
1654 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
1655 	if (adev->dm.crc_rd_wrk) {
1656 		flush_work(&adev->dm.crc_rd_wrk->notify_ta_work);
1657 		kfree(adev->dm.crc_rd_wrk);
1658 		adev->dm.crc_rd_wrk = NULL;
1659 	}
1660 #endif
1661 #ifdef CONFIG_DRM_AMD_DC_HDCP
1662 	if (adev->dm.hdcp_workqueue) {
1663 		hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue);
1664 		adev->dm.hdcp_workqueue = NULL;
1665 	}
1666 
1667 	if (adev->dm.dc)
1668 		dc_deinit_callbacks(adev->dm.dc);
1669 #endif
1670 
1671 	dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv);
1672 
1673 	if (dc_enable_dmub_notifications(adev->dm.dc)) {
1674 		kfree(adev->dm.dmub_notify);
1675 		adev->dm.dmub_notify = NULL;
1676 		destroy_workqueue(adev->dm.delayed_hpd_wq);
1677 		adev->dm.delayed_hpd_wq = NULL;
1678 	}
1679 
1680 	if (adev->dm.dmub_bo)
1681 		amdgpu_bo_free_kernel(&adev->dm.dmub_bo,
1682 				      &adev->dm.dmub_bo_gpu_addr,
1683 				      &adev->dm.dmub_bo_cpu_addr);
1684 
1685 	/* DC Destroy TODO: Replace destroy DAL */
1686 	if (adev->dm.dc)
1687 		dc_destroy(&adev->dm.dc);
1688 	/*
1689 	 * TODO: pageflip, vlank interrupt
1690 	 *
1691 	 * amdgpu_dm_irq_fini(adev);
1692 	 */
1693 
1694 	if (adev->dm.cgs_device) {
1695 		amdgpu_cgs_destroy_device(adev->dm.cgs_device);
1696 		adev->dm.cgs_device = NULL;
1697 	}
1698 	if (adev->dm.freesync_module) {
1699 		mod_freesync_destroy(adev->dm.freesync_module);
1700 		adev->dm.freesync_module = NULL;
1701 	}
1702 
1703 	if (adev->dm.hpd_rx_offload_wq) {
1704 		for (i = 0; i < adev->dm.dc->caps.max_links; i++) {
1705 			if (adev->dm.hpd_rx_offload_wq[i].wq) {
1706 				destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq);
1707 				adev->dm.hpd_rx_offload_wq[i].wq = NULL;
1708 			}
1709 		}
1710 
1711 		kfree(adev->dm.hpd_rx_offload_wq);
1712 		adev->dm.hpd_rx_offload_wq = NULL;
1713 	}
1714 
1715 	mutex_destroy(&adev->dm.audio_lock);
1716 	mutex_destroy(&adev->dm.dc_lock);
1717 
1718 	return;
1719 }
1720 
1721 static int load_dmcu_fw(struct amdgpu_device *adev)
1722 {
1723 	const char *fw_name_dmcu = NULL;
1724 	int r;
1725 	const struct dmcu_firmware_header_v1_0 *hdr;
1726 
1727 	switch(adev->asic_type) {
1728 #if defined(CONFIG_DRM_AMD_DC_SI)
1729 	case CHIP_TAHITI:
1730 	case CHIP_PITCAIRN:
1731 	case CHIP_VERDE:
1732 	case CHIP_OLAND:
1733 #endif
1734 	case CHIP_BONAIRE:
1735 	case CHIP_HAWAII:
1736 	case CHIP_KAVERI:
1737 	case CHIP_KABINI:
1738 	case CHIP_MULLINS:
1739 	case CHIP_TONGA:
1740 	case CHIP_FIJI:
1741 	case CHIP_CARRIZO:
1742 	case CHIP_STONEY:
1743 	case CHIP_POLARIS11:
1744 	case CHIP_POLARIS10:
1745 	case CHIP_POLARIS12:
1746 	case CHIP_VEGAM:
1747 	case CHIP_VEGA10:
1748 	case CHIP_VEGA12:
1749 	case CHIP_VEGA20:
1750 	case CHIP_NAVI10:
1751 	case CHIP_NAVI14:
1752 	case CHIP_RENOIR:
1753 	case CHIP_SIENNA_CICHLID:
1754 	case CHIP_NAVY_FLOUNDER:
1755 	case CHIP_DIMGREY_CAVEFISH:
1756 	case CHIP_BEIGE_GOBY:
1757 	case CHIP_VANGOGH:
1758 	case CHIP_YELLOW_CARP:
1759 		return 0;
1760 	case CHIP_NAVI12:
1761 		fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
1762 		break;
1763 	case CHIP_RAVEN:
1764 		if (ASICREV_IS_PICASSO(adev->external_rev_id))
1765 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1766 		else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
1767 			fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
1768 		else
1769 			return 0;
1770 		break;
1771 	default:
1772 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
1773 		return -EINVAL;
1774 	}
1775 
1776 	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1777 		DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n");
1778 		return 0;
1779 	}
1780 
1781 	r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev);
1782 	if (r == -ENOENT) {
1783 		/* DMCU firmware is not necessary, so don't raise a fuss if it's missing */
1784 		DRM_DEBUG_KMS("dm: DMCU firmware not found\n");
1785 		adev->dm.fw_dmcu = NULL;
1786 		return 0;
1787 	}
1788 	if (r) {
1789 		dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n",
1790 			fw_name_dmcu);
1791 		return r;
1792 	}
1793 
1794 	r = amdgpu_ucode_validate(adev->dm.fw_dmcu);
1795 	if (r) {
1796 		dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n",
1797 			fw_name_dmcu);
1798 		release_firmware(adev->dm.fw_dmcu);
1799 		adev->dm.fw_dmcu = NULL;
1800 		return r;
1801 	}
1802 
1803 	hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data;
1804 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM;
1805 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu;
1806 	adev->firmware.fw_size +=
1807 		roundup2(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1808 
1809 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV;
1810 	adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu;
1811 	adev->firmware.fw_size +=
1812 		roundup2(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE);
1813 
1814 	adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version);
1815 
1816 	DRM_DEBUG_KMS("PSP loading DMCU firmware\n");
1817 
1818 	return 0;
1819 }
1820 
1821 static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address)
1822 {
1823 	struct amdgpu_device *adev = ctx;
1824 
1825 	return dm_read_reg(adev->dm.dc->ctx, address);
1826 }
1827 
1828 static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address,
1829 				     uint32_t value)
1830 {
1831 	struct amdgpu_device *adev = ctx;
1832 
1833 	return dm_write_reg(adev->dm.dc->ctx, address, value);
1834 }
1835 
1836 static int dm_dmub_sw_init(struct amdgpu_device *adev)
1837 {
1838 	struct dmub_srv_create_params create_params;
1839 	struct dmub_srv_region_params region_params;
1840 	struct dmub_srv_region_info region_info;
1841 	struct dmub_srv_fb_params fb_params;
1842 	struct dmub_srv_fb_info *fb_info;
1843 	struct dmub_srv *dmub_srv;
1844 	const struct dmcub_firmware_header_v1_0 *hdr;
1845 	const char *fw_name_dmub;
1846 	enum dmub_asic dmub_asic;
1847 	enum dmub_status status;
1848 	int r;
1849 
1850 	switch (adev->asic_type) {
1851 	case CHIP_RENOIR:
1852 		dmub_asic = DMUB_ASIC_DCN21;
1853 		fw_name_dmub = FIRMWARE_RENOIR_DMUB;
1854 		if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id))
1855 			fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB;
1856 		break;
1857 	case CHIP_SIENNA_CICHLID:
1858 		dmub_asic = DMUB_ASIC_DCN30;
1859 		fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB;
1860 		break;
1861 	case CHIP_NAVY_FLOUNDER:
1862 		dmub_asic = DMUB_ASIC_DCN30;
1863 		fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB;
1864 		break;
1865 	case CHIP_VANGOGH:
1866 		dmub_asic = DMUB_ASIC_DCN301;
1867 		fw_name_dmub = FIRMWARE_VANGOGH_DMUB;
1868 		break;
1869 	case CHIP_DIMGREY_CAVEFISH:
1870 		dmub_asic = DMUB_ASIC_DCN302;
1871 		fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB;
1872 		break;
1873 	case CHIP_BEIGE_GOBY:
1874 		dmub_asic = DMUB_ASIC_DCN303;
1875 		fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB;
1876 		break;
1877 	case CHIP_YELLOW_CARP:
1878 		dmub_asic = DMUB_ASIC_DCN31;
1879 		fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB;
1880 		break;
1881 
1882 	default:
1883 		/* ASIC doesn't support DMUB. */
1884 		return 0;
1885 	}
1886 
1887 	r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev);
1888 	if (r) {
1889 		DRM_ERROR("DMUB firmware loading failed: %d\n", r);
1890 		return 0;
1891 	}
1892 
1893 	r = amdgpu_ucode_validate(adev->dm.dmub_fw);
1894 	if (r) {
1895 		DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r);
1896 		return 0;
1897 	}
1898 
1899 	hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
1900 	adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
1901 
1902 	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
1903 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
1904 			AMDGPU_UCODE_ID_DMCUB;
1905 		adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
1906 			adev->dm.dmub_fw;
1907 		adev->firmware.fw_size +=
1908 			roundup2(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
1909 
1910 		DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
1911 			 adev->dm.dmcub_fw_version);
1912 	}
1913 
1914 
1915 	adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
1916 	dmub_srv = adev->dm.dmub_srv;
1917 
1918 	if (!dmub_srv) {
1919 		DRM_ERROR("Failed to allocate DMUB service!\n");
1920 		return -ENOMEM;
1921 	}
1922 
1923 	memset(&create_params, 0, sizeof(create_params));
1924 	create_params.user_ctx = adev;
1925 	create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read;
1926 	create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write;
1927 	create_params.asic = dmub_asic;
1928 
1929 	/* Create the DMUB service. */
1930 	status = dmub_srv_create(dmub_srv, &create_params);
1931 	if (status != DMUB_STATUS_OK) {
1932 		DRM_ERROR("Error creating DMUB service: %d\n", status);
1933 		return -EINVAL;
1934 	}
1935 
1936 	/* Calculate the size of all the regions for the DMUB service. */
1937 	memset(&region_params, 0, sizeof(region_params));
1938 
1939 	region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) -
1940 					PSP_HEADER_BYTES - PSP_FOOTER_BYTES;
1941 	region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
1942 	region_params.vbios_size = adev->bios_size;
1943 	region_params.fw_bss_data = region_params.bss_data_size ?
1944 		adev->dm.dmub_fw->data +
1945 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1946 		le32_to_cpu(hdr->inst_const_bytes) : NULL;
1947 	region_params.fw_inst_const =
1948 		adev->dm.dmub_fw->data +
1949 		le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
1950 		PSP_HEADER_BYTES;
1951 
1952 	status = dmub_srv_calc_region_info(dmub_srv, &region_params,
1953 					   &region_info);
1954 
1955 	if (status != DMUB_STATUS_OK) {
1956 		DRM_ERROR("Error calculating DMUB region info: %d\n", status);
1957 		return -EINVAL;
1958 	}
1959 
1960 	/*
1961 	 * Allocate a framebuffer based on the total size of all the regions.
1962 	 * TODO: Move this into GART.
1963 	 */
1964 	r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE,
1965 				    AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo,
1966 				    &adev->dm.dmub_bo_gpu_addr,
1967 				    &adev->dm.dmub_bo_cpu_addr);
1968 	if (r)
1969 		return r;
1970 
1971 	/* Rebase the regions on the framebuffer address. */
1972 	memset(&fb_params, 0, sizeof(fb_params));
1973 	fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
1974 	fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
1975 	fb_params.region_info = &region_info;
1976 
1977 	adev->dm.dmub_fb_info =
1978 		kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
1979 	fb_info = adev->dm.dmub_fb_info;
1980 
1981 	if (!fb_info) {
1982 		DRM_ERROR(
1983 			"Failed to allocate framebuffer info for DMUB service!\n");
1984 		return -ENOMEM;
1985 	}
1986 
1987 	status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
1988 	if (status != DMUB_STATUS_OK) {
1989 		DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
1990 		return -EINVAL;
1991 	}
1992 
1993 	return 0;
1994 }
1995 
1996 static int dm_sw_init(void *handle)
1997 {
1998 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1999 	int r;
2000 
2001 	r = dm_dmub_sw_init(adev);
2002 	if (r)
2003 		return r;
2004 
2005 	return load_dmcu_fw(adev);
2006 }
2007 
2008 static int dm_sw_fini(void *handle)
2009 {
2010 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2011 
2012 	kfree(adev->dm.dmub_fb_info);
2013 	adev->dm.dmub_fb_info = NULL;
2014 
2015 	if (adev->dm.dmub_srv) {
2016 		dmub_srv_destroy(adev->dm.dmub_srv);
2017 		adev->dm.dmub_srv = NULL;
2018 	}
2019 
2020 	release_firmware(adev->dm.dmub_fw);
2021 	adev->dm.dmub_fw = NULL;
2022 
2023 	release_firmware(adev->dm.fw_dmcu);
2024 	adev->dm.fw_dmcu = NULL;
2025 
2026 	return 0;
2027 }
2028 
2029 static int detect_mst_link_for_all_connectors(struct drm_device *dev)
2030 {
2031 	struct amdgpu_dm_connector *aconnector;
2032 	struct drm_connector *connector;
2033 	struct drm_connector_list_iter iter;
2034 	int ret = 0;
2035 
2036 	drm_connector_list_iter_begin(dev, &iter);
2037 	drm_for_each_connector_iter(connector, &iter) {
2038 		aconnector = to_amdgpu_dm_connector(connector);
2039 		if (aconnector->dc_link->type == dc_connection_mst_branch &&
2040 		    aconnector->mst_mgr.aux) {
2041 			DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
2042 					 aconnector,
2043 					 aconnector->base.base.id);
2044 
2045 			ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true);
2046 			if (ret < 0) {
2047 				DRM_ERROR("DM_MST: Failed to start MST\n");
2048 				aconnector->dc_link->type =
2049 					dc_connection_single;
2050 				break;
2051 			}
2052 		}
2053 	}
2054 	drm_connector_list_iter_end(&iter);
2055 
2056 	return ret;
2057 }
2058 
2059 static int dm_late_init(void *handle)
2060 {
2061 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2062 
2063 	struct dmcu_iram_parameters params;
2064 	unsigned int linear_lut[16];
2065 	int i;
2066 	struct dmcu *dmcu = NULL;
2067 
2068 	dmcu = adev->dm.dc->res_pool->dmcu;
2069 
2070 	for (i = 0; i < 16; i++)
2071 		linear_lut[i] = 0xFFFF * i / 15;
2072 
2073 	params.set = 0;
2074 	params.backlight_ramping_override = false;
2075 	params.backlight_ramping_start = 0xCCCC;
2076 	params.backlight_ramping_reduction = 0xCCCCCCCC;
2077 	params.backlight_lut_array_size = 16;
2078 	params.backlight_lut_array = linear_lut;
2079 
2080 	/* Min backlight level after ABM reduction,  Don't allow below 1%
2081 	 * 0xFFFF x 0.01 = 0x28F
2082 	 */
2083 	params.min_abm_backlight = 0x28F;
2084 	/* In the case where abm is implemented on dmcub,
2085 	* dmcu object will be null.
2086 	* ABM 2.4 and up are implemented on dmcub.
2087 	*/
2088 	if (dmcu) {
2089 		if (!dmcu_load_iram(dmcu, params))
2090 			return -EINVAL;
2091 	} else if (adev->dm.dc->ctx->dmub_srv) {
2092 		struct dc_link *edp_links[MAX_NUM_EDP];
2093 		int edp_num;
2094 
2095 		get_edp_links(adev->dm.dc, edp_links, &edp_num);
2096 		for (i = 0; i < edp_num; i++) {
2097 			if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i))
2098 				return -EINVAL;
2099 		}
2100 	}
2101 
2102 	return detect_mst_link_for_all_connectors(adev_to_drm(adev));
2103 }
2104 
2105 static void s3_handle_mst(struct drm_device *dev, bool suspend)
2106 {
2107 	struct amdgpu_dm_connector *aconnector;
2108 	struct drm_connector *connector;
2109 	struct drm_connector_list_iter iter;
2110 	struct drm_dp_mst_topology_mgr *mgr;
2111 	int ret;
2112 	bool need_hotplug = false;
2113 
2114 	drm_connector_list_iter_begin(dev, &iter);
2115 	drm_for_each_connector_iter(connector, &iter) {
2116 		aconnector = to_amdgpu_dm_connector(connector);
2117 		if (aconnector->dc_link->type != dc_connection_mst_branch ||
2118 		    aconnector->mst_port)
2119 			continue;
2120 
2121 		mgr = &aconnector->mst_mgr;
2122 
2123 		if (suspend) {
2124 			drm_dp_mst_topology_mgr_suspend(mgr);
2125 		} else {
2126 			ret = drm_dp_mst_topology_mgr_resume(mgr, true);
2127 			if (ret < 0) {
2128 				drm_dp_mst_topology_mgr_set_mst(mgr, false);
2129 				need_hotplug = true;
2130 			}
2131 		}
2132 	}
2133 	drm_connector_list_iter_end(&iter);
2134 
2135 	if (need_hotplug)
2136 		drm_kms_helper_hotplug_event(dev);
2137 }
2138 
2139 static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
2140 {
2141 	struct smu_context *smu = &adev->smu;
2142 	int ret = 0;
2143 
2144 	if (!is_support_sw_smu(adev))
2145 		return 0;
2146 
2147 	/* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
2148 	 * on window driver dc implementation.
2149 	 * For Navi1x, clock settings of dcn watermarks are fixed. the settings
2150 	 * should be passed to smu during boot up and resume from s3.
2151 	 * boot up: dc calculate dcn watermark clock settings within dc_create,
2152 	 * dcn20_resource_construct
2153 	 * then call pplib functions below to pass the settings to smu:
2154 	 * smu_set_watermarks_for_clock_ranges
2155 	 * smu_set_watermarks_table
2156 	 * navi10_set_watermarks_table
2157 	 * smu_write_watermarks_table
2158 	 *
2159 	 * For Renoir, clock settings of dcn watermark are also fixed values.
2160 	 * dc has implemented different flow for window driver:
2161 	 * dc_hardware_init / dc_set_power_state
2162 	 * dcn10_init_hw
2163 	 * notify_wm_ranges
2164 	 * set_wm_ranges
2165 	 * -- Linux
2166 	 * smu_set_watermarks_for_clock_ranges
2167 	 * renoir_set_watermarks_table
2168 	 * smu_write_watermarks_table
2169 	 *
2170 	 * For Linux,
2171 	 * dc_hardware_init -> amdgpu_dm_init
2172 	 * dc_set_power_state --> dm_resume
2173 	 *
2174 	 * therefore, this function apply to navi10/12/14 but not Renoir
2175 	 * *
2176 	 */
2177 	switch(adev->asic_type) {
2178 	case CHIP_NAVI10:
2179 	case CHIP_NAVI14:
2180 	case CHIP_NAVI12:
2181 		break;
2182 	default:
2183 		return 0;
2184 	}
2185 
2186 	ret = smu_write_watermarks_table(smu);
2187 	if (ret) {
2188 		DRM_ERROR("Failed to update WMTABLE!\n");
2189 		return ret;
2190 	}
2191 
2192 	return 0;
2193 }
2194 
2195 /**
2196  * dm_hw_init() - Initialize DC device
2197  * @handle: The base driver device containing the amdgpu_dm device.
2198  *
2199  * Initialize the &struct amdgpu_display_manager device. This involves calling
2200  * the initializers of each DM component, then populating the struct with them.
2201  *
2202  * Although the function implies hardware initialization, both hardware and
2203  * software are initialized here. Splitting them out to their relevant init
2204  * hooks is a future TODO item.
2205  *
2206  * Some notable things that are initialized here:
2207  *
2208  * - Display Core, both software and hardware
2209  * - DC modules that we need (freesync and color management)
2210  * - DRM software states
2211  * - Interrupt sources and handlers
2212  * - Vblank support
2213  * - Debug FS entries, if enabled
2214  */
2215 static int dm_hw_init(void *handle)
2216 {
2217 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2218 	/* Create DAL display manager */
2219 	amdgpu_dm_init(adev);
2220 	amdgpu_dm_hpd_init(adev);
2221 
2222 	return 0;
2223 }
2224 
2225 /**
2226  * dm_hw_fini() - Teardown DC device
2227  * @handle: The base driver device containing the amdgpu_dm device.
2228  *
2229  * Teardown components within &struct amdgpu_display_manager that require
2230  * cleanup. This involves cleaning up the DRM device, DC, and any modules that
2231  * were loaded. Also flush IRQ workqueues and disable them.
2232  */
2233 static int dm_hw_fini(void *handle)
2234 {
2235 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2236 
2237 	amdgpu_dm_hpd_fini(adev);
2238 
2239 	amdgpu_dm_irq_fini(adev);
2240 	amdgpu_dm_fini(adev);
2241 	return 0;
2242 }
2243 
2244 
2245 static int dm_enable_vblank(struct drm_crtc *crtc);
2246 static void dm_disable_vblank(struct drm_crtc *crtc);
2247 
2248 static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev,
2249 				 struct dc_state *state, bool enable)
2250 {
2251 	enum dc_irq_source irq_source;
2252 	struct amdgpu_crtc *acrtc;
2253 	int rc = -EBUSY;
2254 	int i = 0;
2255 
2256 	for (i = 0; i < state->stream_count; i++) {
2257 		acrtc = get_crtc_by_otg_inst(
2258 				adev, state->stream_status[i].primary_otg_inst);
2259 
2260 		if (acrtc && state->stream_status[i].plane_count != 0) {
2261 			irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst;
2262 			rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
2263 			DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
2264 				      acrtc->crtc_id, enable ? "en" : "dis", rc);
2265 			if (rc)
2266 				DRM_WARN("Failed to %s pflip interrupts\n",
2267 					 enable ? "enable" : "disable");
2268 
2269 			if (enable) {
2270 				rc = dm_enable_vblank(&acrtc->base);
2271 				if (rc)
2272 					DRM_WARN("Failed to enable vblank interrupts\n");
2273 			} else {
2274 				dm_disable_vblank(&acrtc->base);
2275 			}
2276 
2277 		}
2278 	}
2279 
2280 }
2281 
2282 static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc)
2283 {
2284 	struct dc_state *context = NULL;
2285 	enum dc_status res = DC_ERROR_UNEXPECTED;
2286 	int i;
2287 	struct dc_stream_state *del_streams[MAX_PIPES];
2288 	int del_streams_count = 0;
2289 
2290 	memset(del_streams, 0, sizeof(del_streams));
2291 
2292 	context = dc_create_state(dc);
2293 	if (context == NULL)
2294 		goto context_alloc_fail;
2295 
2296 	dc_resource_state_copy_construct_current(dc, context);
2297 
2298 	/* First remove from context all streams */
2299 	for (i = 0; i < context->stream_count; i++) {
2300 		struct dc_stream_state *stream = context->streams[i];
2301 
2302 		del_streams[del_streams_count++] = stream;
2303 	}
2304 
2305 	/* Remove all planes for removed streams and then remove the streams */
2306 	for (i = 0; i < del_streams_count; i++) {
2307 		if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) {
2308 			res = DC_FAIL_DETACH_SURFACES;
2309 			goto fail;
2310 		}
2311 
2312 		res = dc_remove_stream_from_ctx(dc, context, del_streams[i]);
2313 		if (res != DC_OK)
2314 			goto fail;
2315 	}
2316 
2317 
2318 	res = dc_validate_global_state(dc, context, false);
2319 
2320 	if (res != DC_OK) {
2321 		DRM_ERROR("%s:resource validation failed, dc_status:%d\n", __func__, res);
2322 		goto fail;
2323 	}
2324 
2325 	res = dc_commit_state(dc, context);
2326 
2327 fail:
2328 	dc_release_state(context);
2329 
2330 context_alloc_fail:
2331 	return res;
2332 }
2333 
2334 static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm)
2335 {
2336 	int i;
2337 
2338 	if (dm->hpd_rx_offload_wq) {
2339 		for (i = 0; i < dm->dc->caps.max_links; i++)
2340 			flush_workqueue(dm->hpd_rx_offload_wq[i].wq);
2341 	}
2342 }
2343 
2344 static int dm_suspend(void *handle)
2345 {
2346 	struct amdgpu_device *adev = handle;
2347 	struct amdgpu_display_manager *dm = &adev->dm;
2348 	int ret = 0;
2349 
2350 	if (amdgpu_in_reset(adev)) {
2351 		mutex_lock(&dm->dc_lock);
2352 
2353 #if defined(CONFIG_DRM_AMD_DC_DCN)
2354 		dc_allow_idle_optimizations(adev->dm.dc, false);
2355 #endif
2356 
2357 		dm->cached_dc_state = dc_copy_state(dm->dc->current_state);
2358 
2359 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
2360 
2361 		amdgpu_dm_commit_zero_streams(dm->dc);
2362 
2363 		amdgpu_dm_irq_suspend(adev);
2364 
2365 		hpd_rx_irq_work_suspend(dm);
2366 
2367 		return ret;
2368 	}
2369 
2370 	WARN_ON(adev->dm.cached_state);
2371 	adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev));
2372 
2373 	s3_handle_mst(adev_to_drm(adev), true);
2374 
2375 	amdgpu_dm_irq_suspend(adev);
2376 
2377 	hpd_rx_irq_work_suspend(dm);
2378 
2379 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3);
2380 
2381 	return 0;
2382 }
2383 
2384 static struct amdgpu_dm_connector *
2385 amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
2386 					     struct drm_crtc *crtc)
2387 {
2388 	uint32_t i;
2389 	struct drm_connector_state *new_con_state;
2390 	struct drm_connector *connector;
2391 	struct drm_crtc *crtc_from_state;
2392 
2393 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
2394 		crtc_from_state = new_con_state->crtc;
2395 
2396 		if (crtc_from_state == crtc)
2397 			return to_amdgpu_dm_connector(connector);
2398 	}
2399 
2400 	return NULL;
2401 }
2402 
2403 static void emulated_link_detect(struct dc_link *link)
2404 {
2405 	struct dc_sink_init_data sink_init_data = { 0 };
2406 	struct display_sink_capability sink_caps = { 0 };
2407 	enum dc_edid_status edid_status;
2408 	struct dc_context *dc_ctx = link->ctx;
2409 	struct dc_sink *sink = NULL;
2410 	struct dc_sink *prev_sink = NULL;
2411 
2412 	link->type = dc_connection_none;
2413 	prev_sink = link->local_sink;
2414 
2415 	if (prev_sink)
2416 		dc_sink_release(prev_sink);
2417 
2418 	switch (link->connector_signal) {
2419 	case SIGNAL_TYPE_HDMI_TYPE_A: {
2420 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2421 		sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
2422 		break;
2423 	}
2424 
2425 	case SIGNAL_TYPE_DVI_SINGLE_LINK: {
2426 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2427 		sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
2428 		break;
2429 	}
2430 
2431 	case SIGNAL_TYPE_DVI_DUAL_LINK: {
2432 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2433 		sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
2434 		break;
2435 	}
2436 
2437 	case SIGNAL_TYPE_LVDS: {
2438 		sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
2439 		sink_caps.signal = SIGNAL_TYPE_LVDS;
2440 		break;
2441 	}
2442 
2443 	case SIGNAL_TYPE_EDP: {
2444 		sink_caps.transaction_type =
2445 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2446 		sink_caps.signal = SIGNAL_TYPE_EDP;
2447 		break;
2448 	}
2449 
2450 	case SIGNAL_TYPE_DISPLAY_PORT: {
2451 		sink_caps.transaction_type =
2452 			DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
2453 		sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
2454 		break;
2455 	}
2456 
2457 	default:
2458 		DC_ERROR("Invalid connector type! signal:%d\n",
2459 			link->connector_signal);
2460 		return;
2461 	}
2462 
2463 	sink_init_data.link = link;
2464 	sink_init_data.sink_signal = sink_caps.signal;
2465 
2466 	sink = dc_sink_create(&sink_init_data);
2467 	if (!sink) {
2468 		DC_ERROR("Failed to create sink!\n");
2469 		return;
2470 	}
2471 
2472 	/* dc_sink_create returns a new reference */
2473 	link->local_sink = sink;
2474 
2475 	edid_status = dm_helpers_read_local_edid(
2476 			link->ctx,
2477 			link,
2478 			sink);
2479 
2480 	if (edid_status != EDID_OK)
2481 		DC_ERROR("Failed to read EDID");
2482 
2483 }
2484 
2485 static void dm_gpureset_commit_state(struct dc_state *dc_state,
2486 				     struct amdgpu_display_manager *dm)
2487 {
2488 	struct {
2489 		struct dc_surface_update surface_updates[MAX_SURFACES];
2490 		struct dc_plane_info plane_infos[MAX_SURFACES];
2491 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
2492 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
2493 		struct dc_stream_update stream_update;
2494 	} * bundle;
2495 	int k, m;
2496 
2497 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
2498 
2499 	if (!bundle) {
2500 		dm_error("Failed to allocate update bundle\n");
2501 		goto cleanup;
2502 	}
2503 
2504 	for (k = 0; k < dc_state->stream_count; k++) {
2505 		bundle->stream_update.stream = dc_state->streams[k];
2506 
2507 		for (m = 0; m < dc_state->stream_status->plane_count; m++) {
2508 			bundle->surface_updates[m].surface =
2509 				dc_state->stream_status->plane_states[m];
2510 			bundle->surface_updates[m].surface->force_full_update =
2511 				true;
2512 		}
2513 		dc_commit_updates_for_stream(
2514 			dm->dc, bundle->surface_updates,
2515 			dc_state->stream_status->plane_count,
2516 			dc_state->streams[k], &bundle->stream_update, dc_state);
2517 	}
2518 
2519 cleanup:
2520 	kfree(bundle);
2521 
2522 	return;
2523 }
2524 
2525 static void dm_set_dpms_off(struct dc_link *link)
2526 {
2527 	struct dc_stream_state *stream_state;
2528 	struct amdgpu_dm_connector *aconnector = link->priv;
2529 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
2530 	struct dc_stream_update stream_update;
2531 	bool dpms_off = true;
2532 
2533 	memset(&stream_update, 0, sizeof(stream_update));
2534 	stream_update.dpms_off = &dpms_off;
2535 
2536 	mutex_lock(&adev->dm.dc_lock);
2537 	stream_state = dc_stream_find_from_link(link);
2538 
2539 	if (stream_state == NULL) {
2540 		DRM_DEBUG_DRIVER("Error finding stream state associated with link!\n");
2541 		mutex_unlock(&adev->dm.dc_lock);
2542 		return;
2543 	}
2544 
2545 	stream_update.stream = stream_state;
2546 	dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0,
2547 				     stream_state, &stream_update,
2548 				     stream_state->ctx->dc->current_state);
2549 	mutex_unlock(&adev->dm.dc_lock);
2550 }
2551 
2552 static int dm_resume(void *handle)
2553 {
2554 	struct amdgpu_device *adev = handle;
2555 	struct drm_device *ddev = adev_to_drm(adev);
2556 	struct amdgpu_display_manager *dm = &adev->dm;
2557 	struct amdgpu_dm_connector *aconnector;
2558 	struct drm_connector *connector;
2559 	struct drm_connector_list_iter iter;
2560 	struct drm_crtc *crtc;
2561 	struct drm_crtc_state *new_crtc_state;
2562 	struct dm_crtc_state *dm_new_crtc_state;
2563 	struct drm_plane *plane;
2564 	struct drm_plane_state *new_plane_state;
2565 	struct dm_plane_state *dm_new_plane_state;
2566 	struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state);
2567 	enum dc_connection_type new_connection_type = dc_connection_none;
2568 	struct dc_state *dc_state;
2569 	int i, r, j;
2570 
2571 	if (amdgpu_in_reset(adev)) {
2572 		dc_state = dm->cached_dc_state;
2573 
2574 		if (dc_enable_dmub_notifications(adev->dm.dc))
2575 			amdgpu_dm_outbox_init(adev);
2576 
2577 		r = dm_dmub_hw_init(adev);
2578 		if (r)
2579 			DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r);
2580 
2581 		dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2582 		dc_resume(dm->dc);
2583 
2584 		amdgpu_dm_irq_resume_early(adev);
2585 
2586 		for (i = 0; i < dc_state->stream_count; i++) {
2587 			dc_state->streams[i]->mode_changed = true;
2588 			for (j = 0; j < dc_state->stream_status[i].plane_count; j++) {
2589 				dc_state->stream_status[i].plane_states[j]->update_flags.raw
2590 					= 0xffffffff;
2591 			}
2592 		}
2593 #if defined(CONFIG_DRM_AMD_DC_DCN)
2594 		/*
2595 		 * Resource allocation happens for link encoders for newer ASIC in
2596 		 * dc_validate_global_state, so we need to revalidate it.
2597 		 *
2598 		 * This shouldn't fail (it passed once before), so warn if it does.
2599 		 */
2600 		WARN_ON(dc_validate_global_state(dm->dc, dc_state, false) != DC_OK);
2601 #endif
2602 
2603 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
2604 
2605 		dm_gpureset_commit_state(dm->cached_dc_state, dm);
2606 
2607 		dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true);
2608 
2609 		dc_release_state(dm->cached_dc_state);
2610 		dm->cached_dc_state = NULL;
2611 
2612 		amdgpu_dm_irq_resume_late(adev);
2613 
2614 		mutex_unlock(&dm->dc_lock);
2615 
2616 		return 0;
2617 	}
2618 	/* Recreate dc_state - DC invalidates it when setting power state to S3. */
2619 	dc_release_state(dm_state->context);
2620 	dm_state->context = dc_create_state(dm->dc);
2621 	/* TODO: Remove dc_state->dccg, use dc->dccg directly. */
2622 	dc_resource_state_construct(dm->dc, dm_state->context);
2623 
2624 	/* Re-enable outbox interrupts for DPIA. */
2625 	if (dc_enable_dmub_notifications(adev->dm.dc))
2626 		amdgpu_dm_outbox_init(adev);
2627 
2628 	/* Before powering on DC we need to re-initialize DMUB. */
2629 	dm_dmub_hw_resume(adev);
2630 
2631 	/* power on hardware */
2632 	dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
2633 
2634 	/* program HPD filter */
2635 	dc_resume(dm->dc);
2636 
2637 	/*
2638 	 * early enable HPD Rx IRQ, should be done before set mode as short
2639 	 * pulse interrupts are used for MST
2640 	 */
2641 	amdgpu_dm_irq_resume_early(adev);
2642 
2643 	/* On resume we need to rewrite the MSTM control bits to enable MST*/
2644 	s3_handle_mst(ddev, false);
2645 
2646 	/* Do detection*/
2647 	drm_connector_list_iter_begin(ddev, &iter);
2648 	drm_for_each_connector_iter(connector, &iter) {
2649 		aconnector = to_amdgpu_dm_connector(connector);
2650 
2651 		/*
2652 		 * this is the case when traversing through already created
2653 		 * MST connectors, should be skipped
2654 		 */
2655 		if (aconnector->dc_link &&
2656 		    aconnector->dc_link->type == dc_connection_mst_branch)
2657 			continue;
2658 
2659 		mutex_lock(&aconnector->hpd_lock);
2660 		if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
2661 			DRM_ERROR("KMS: Failed to detect connector\n");
2662 
2663 		if (aconnector->base.force && new_connection_type == dc_connection_none)
2664 			emulated_link_detect(aconnector->dc_link);
2665 		else
2666 			dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
2667 
2668 		if (aconnector->fake_enable && aconnector->dc_link->local_sink)
2669 			aconnector->fake_enable = false;
2670 
2671 		if (aconnector->dc_sink)
2672 			dc_sink_release(aconnector->dc_sink);
2673 		aconnector->dc_sink = NULL;
2674 		amdgpu_dm_update_connector_after_detect(aconnector);
2675 		mutex_unlock(&aconnector->hpd_lock);
2676 	}
2677 	drm_connector_list_iter_end(&iter);
2678 
2679 	/* Force mode set in atomic commit */
2680 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i)
2681 		new_crtc_state->active_changed = true;
2682 
2683 	/*
2684 	 * atomic_check is expected to create the dc states. We need to release
2685 	 * them here, since they were duplicated as part of the suspend
2686 	 * procedure.
2687 	 */
2688 	for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) {
2689 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2690 		if (dm_new_crtc_state->stream) {
2691 			WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1);
2692 			dc_stream_release(dm_new_crtc_state->stream);
2693 			dm_new_crtc_state->stream = NULL;
2694 		}
2695 	}
2696 
2697 	for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) {
2698 		dm_new_plane_state = to_dm_plane_state(new_plane_state);
2699 		if (dm_new_plane_state->dc_state) {
2700 			WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1);
2701 			dc_plane_state_release(dm_new_plane_state->dc_state);
2702 			dm_new_plane_state->dc_state = NULL;
2703 		}
2704 	}
2705 
2706 	drm_atomic_helper_resume(ddev, dm->cached_state);
2707 
2708 	dm->cached_state = NULL;
2709 
2710 	amdgpu_dm_irq_resume_late(adev);
2711 
2712 	amdgpu_dm_smu_write_watermarks_table(adev);
2713 
2714 	return 0;
2715 }
2716 
2717 /**
2718  * DOC: DM Lifecycle
2719  *
2720  * DM (and consequently DC) is registered in the amdgpu base driver as a IP
2721  * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to
2722  * the base driver's device list to be initialized and torn down accordingly.
2723  *
2724  * The functions to do so are provided as hooks in &struct amd_ip_funcs.
2725  */
2726 
2727 static const struct amd_ip_funcs amdgpu_dm_funcs = {
2728 	.name = "dm",
2729 	.early_init = dm_early_init,
2730 	.late_init = dm_late_init,
2731 	.sw_init = dm_sw_init,
2732 	.sw_fini = dm_sw_fini,
2733 	.early_fini = amdgpu_dm_early_fini,
2734 	.hw_init = dm_hw_init,
2735 	.hw_fini = dm_hw_fini,
2736 	.suspend = dm_suspend,
2737 	.resume = dm_resume,
2738 	.is_idle = dm_is_idle,
2739 	.wait_for_idle = dm_wait_for_idle,
2740 	.check_soft_reset = dm_check_soft_reset,
2741 	.soft_reset = dm_soft_reset,
2742 	.set_clockgating_state = dm_set_clockgating_state,
2743 	.set_powergating_state = dm_set_powergating_state,
2744 };
2745 
2746 const struct amdgpu_ip_block_version dm_ip_block =
2747 {
2748 	.type = AMD_IP_BLOCK_TYPE_DCE,
2749 	.major = 1,
2750 	.minor = 0,
2751 	.rev = 0,
2752 	.funcs = &amdgpu_dm_funcs,
2753 };
2754 
2755 
2756 /**
2757  * DOC: atomic
2758  *
2759  * *WIP*
2760  */
2761 
2762 static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = {
2763 	.fb_create = amdgpu_display_user_framebuffer_create,
2764 	.get_format_info = amd_get_format_info,
2765 	.output_poll_changed = drm_fb_helper_output_poll_changed,
2766 	.atomic_check = amdgpu_dm_atomic_check,
2767 	.atomic_commit = drm_atomic_helper_commit,
2768 };
2769 
2770 static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
2771 	.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
2772 };
2773 
2774 static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
2775 {
2776 	u32 max_avg, min_cll, max, min, q, r;
2777 	struct amdgpu_dm_backlight_caps *caps;
2778 	struct amdgpu_display_manager *dm;
2779 	struct drm_connector *conn_base;
2780 	struct amdgpu_device *adev;
2781 	struct dc_link *link = NULL;
2782 	static const u8 pre_computed_values[] = {
2783 		50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
2784 		71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
2785 	int i;
2786 
2787 	if (!aconnector || !aconnector->dc_link)
2788 		return;
2789 
2790 	link = aconnector->dc_link;
2791 	if (link->connector_signal != SIGNAL_TYPE_EDP)
2792 		return;
2793 
2794 	conn_base = &aconnector->base;
2795 	adev = drm_to_adev(conn_base->dev);
2796 	dm = &adev->dm;
2797 	for (i = 0; i < dm->num_of_edps; i++) {
2798 		if (link == dm->backlight_link[i])
2799 			break;
2800 	}
2801 	if (i >= dm->num_of_edps)
2802 		return;
2803 	caps = &dm->backlight_caps[i];
2804 	caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
2805 	caps->aux_support = false;
2806 	max_avg = conn_base->hdr_sink_metadata.hdmi_type1.max_fall;
2807 	min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
2808 
2809 	if (caps->ext_caps->bits.oled == 1 /*||
2810 	    caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
2811 	    caps->ext_caps->bits.hdr_aux_backlight_control == 1*/)
2812 		caps->aux_support = true;
2813 
2814 	if (amdgpu_backlight == 0)
2815 		caps->aux_support = false;
2816 	else if (amdgpu_backlight == 1)
2817 		caps->aux_support = true;
2818 
2819 	/* From the specification (CTA-861-G), for calculating the maximum
2820 	 * luminance we need to use:
2821 	 *	Luminance = 50*2**(CV/32)
2822 	 * Where CV is a one-byte value.
2823 	 * For calculating this expression we may need float point precision;
2824 	 * to avoid this complexity level, we take advantage that CV is divided
2825 	 * by a constant. From the Euclids division algorithm, we know that CV
2826 	 * can be written as: CV = 32*q + r. Next, we replace CV in the
2827 	 * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
2828 	 * need to pre-compute the value of r/32. For pre-computing the values
2829 	 * We just used the following Ruby line:
2830 	 *	(0...32).each {|cv| puts (50*2**(cv/32.0)).round}
2831 	 * The results of the above expressions can be verified at
2832 	 * pre_computed_values.
2833 	 */
2834 	q = max_avg >> 5;
2835 	r = max_avg % 32;
2836 	max = (1 << q) * pre_computed_values[r];
2837 
2838 	// min luminance: maxLum * (CV/255)^2 / 100
2839 	q = DIV_ROUND_CLOSEST(min_cll, 255);
2840 	min = max * DIV_ROUND_CLOSEST((q * q), 100);
2841 
2842 	caps->aux_max_input_signal = max;
2843 	caps->aux_min_input_signal = min;
2844 }
2845 
2846 void amdgpu_dm_update_connector_after_detect(
2847 		struct amdgpu_dm_connector *aconnector)
2848 {
2849 	struct drm_connector *connector = &aconnector->base;
2850 	struct drm_device *dev = connector->dev;
2851 	struct dc_sink *sink;
2852 
2853 	/* MST handled by drm_mst framework */
2854 	if (aconnector->mst_mgr.mst_state == true)
2855 		return;
2856 
2857 	sink = aconnector->dc_link->local_sink;
2858 	if (sink)
2859 		dc_sink_retain(sink);
2860 
2861 	/*
2862 	 * Edid mgmt connector gets first update only in mode_valid hook and then
2863 	 * the connector sink is set to either fake or physical sink depends on link status.
2864 	 * Skip if already done during boot.
2865 	 */
2866 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED
2867 			&& aconnector->dc_em_sink) {
2868 
2869 		/*
2870 		 * For S3 resume with headless use eml_sink to fake stream
2871 		 * because on resume connector->sink is set to NULL
2872 		 */
2873 		mutex_lock(&dev->mode_config.mutex);
2874 
2875 		if (sink) {
2876 			if (aconnector->dc_sink) {
2877 				amdgpu_dm_update_freesync_caps(connector, NULL);
2878 				/*
2879 				 * retain and release below are used to
2880 				 * bump up refcount for sink because the link doesn't point
2881 				 * to it anymore after disconnect, so on next crtc to connector
2882 				 * reshuffle by UMD we will get into unwanted dc_sink release
2883 				 */
2884 				dc_sink_release(aconnector->dc_sink);
2885 			}
2886 			aconnector->dc_sink = sink;
2887 			dc_sink_retain(aconnector->dc_sink);
2888 			amdgpu_dm_update_freesync_caps(connector,
2889 					aconnector->edid);
2890 		} else {
2891 			amdgpu_dm_update_freesync_caps(connector, NULL);
2892 			if (!aconnector->dc_sink) {
2893 				aconnector->dc_sink = aconnector->dc_em_sink;
2894 				dc_sink_retain(aconnector->dc_sink);
2895 			}
2896 		}
2897 
2898 		mutex_unlock(&dev->mode_config.mutex);
2899 
2900 		if (sink)
2901 			dc_sink_release(sink);
2902 		return;
2903 	}
2904 
2905 	/*
2906 	 * TODO: temporary guard to look for proper fix
2907 	 * if this sink is MST sink, we should not do anything
2908 	 */
2909 	if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2910 		dc_sink_release(sink);
2911 		return;
2912 	}
2913 
2914 	if (aconnector->dc_sink == sink) {
2915 		/*
2916 		 * We got a DP short pulse (Link Loss, DP CTS, etc...).
2917 		 * Do nothing!!
2918 		 */
2919 		DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2920 				aconnector->connector_id);
2921 		if (sink)
2922 			dc_sink_release(sink);
2923 		return;
2924 	}
2925 
2926 	DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n",
2927 		aconnector->connector_id, aconnector->dc_sink, sink);
2928 
2929 	mutex_lock(&dev->mode_config.mutex);
2930 
2931 	/*
2932 	 * 1. Update status of the drm connector
2933 	 * 2. Send an event and let userspace tell us what to do
2934 	 */
2935 	if (sink) {
2936 		/*
2937 		 * TODO: check if we still need the S3 mode update workaround.
2938 		 * If yes, put it here.
2939 		 */
2940 		if (aconnector->dc_sink) {
2941 			amdgpu_dm_update_freesync_caps(connector, NULL);
2942 			dc_sink_release(aconnector->dc_sink);
2943 		}
2944 
2945 		aconnector->dc_sink = sink;
2946 		dc_sink_retain(aconnector->dc_sink);
2947 		if (sink->dc_edid.length == 0) {
2948 			aconnector->edid = NULL;
2949 			if (aconnector->dc_link->aux_mode) {
2950 				drm_dp_cec_unset_edid(
2951 					&aconnector->dm_dp_aux.aux);
2952 			}
2953 		} else {
2954 			aconnector->edid =
2955 				(struct edid *)sink->dc_edid.raw_edid;
2956 
2957 			drm_connector_update_edid_property(connector,
2958 							   aconnector->edid);
2959 			if (aconnector->dc_link->aux_mode)
2960 				drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
2961 						    aconnector->edid);
2962 		}
2963 
2964 		amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
2965 		update_connector_ext_caps(aconnector);
2966 	} else {
2967 		drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2968 		amdgpu_dm_update_freesync_caps(connector, NULL);
2969 		drm_connector_update_edid_property(connector, NULL);
2970 		aconnector->num_modes = 0;
2971 		dc_sink_release(aconnector->dc_sink);
2972 		aconnector->dc_sink = NULL;
2973 		aconnector->edid = NULL;
2974 #ifdef CONFIG_DRM_AMD_DC_HDCP
2975 		/* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */
2976 		if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
2977 			connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
2978 #endif
2979 	}
2980 
2981 	mutex_unlock(&dev->mode_config.mutex);
2982 
2983 	update_subconnector_property(aconnector);
2984 
2985 	if (sink)
2986 		dc_sink_release(sink);
2987 }
2988 
2989 static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
2990 {
2991 	struct drm_connector *connector = &aconnector->base;
2992 	struct drm_device *dev = connector->dev;
2993 	enum dc_connection_type new_connection_type = dc_connection_none;
2994 	struct amdgpu_device *adev = drm_to_adev(dev);
2995 #ifdef CONFIG_DRM_AMD_DC_HDCP
2996 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
2997 #endif
2998 
2999 	if (adev->dm.disable_hpd_irq)
3000 		return;
3001 
3002 	/*
3003 	 * In case of failure or MST no need to update connector status or notify the OS
3004 	 * since (for MST case) MST does this in its own context.
3005 	 */
3006 	mutex_lock(&aconnector->hpd_lock);
3007 
3008 #ifdef CONFIG_DRM_AMD_DC_HDCP
3009 	if (adev->dm.hdcp_workqueue) {
3010 		hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
3011 		dm_con_state->update_hdcp = true;
3012 	}
3013 #endif
3014 	if (aconnector->fake_enable)
3015 		aconnector->fake_enable = false;
3016 
3017 	if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
3018 		DRM_ERROR("KMS: Failed to detect connector\n");
3019 
3020 	if (aconnector->base.force && new_connection_type == dc_connection_none) {
3021 		emulated_link_detect(aconnector->dc_link);
3022 
3023 
3024 		drm_modeset_lock_all(dev);
3025 		dm_restore_drm_connector_state(dev, connector);
3026 		drm_modeset_unlock_all(dev);
3027 
3028 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3029 			drm_kms_helper_hotplug_event(dev);
3030 
3031 	} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
3032 		if (new_connection_type == dc_connection_none &&
3033 		    aconnector->dc_link->type == dc_connection_none)
3034 			dm_set_dpms_off(aconnector->dc_link);
3035 
3036 		amdgpu_dm_update_connector_after_detect(aconnector);
3037 
3038 		drm_modeset_lock_all(dev);
3039 		dm_restore_drm_connector_state(dev, connector);
3040 		drm_modeset_unlock_all(dev);
3041 
3042 		if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
3043 			drm_kms_helper_hotplug_event(dev);
3044 	}
3045 	mutex_unlock(&aconnector->hpd_lock);
3046 
3047 }
3048 
3049 static void handle_hpd_irq(void *param)
3050 {
3051 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3052 
3053 	handle_hpd_irq_helper(aconnector);
3054 
3055 }
3056 
3057 static void dm_handle_mst_sideband_msg(struct amdgpu_dm_connector *aconnector)
3058 {
3059 	uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 };
3060 	uint8_t dret;
3061 	bool new_irq_handled = false;
3062 	int dpcd_addr;
3063 	int dpcd_bytes_to_read;
3064 
3065 	const int max_process_count = 30;
3066 	int process_count = 0;
3067 
3068 	const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link);
3069 
3070 	if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) {
3071 		dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT;
3072 		/* DPCD 0x200 - 0x201 for downstream IRQ */
3073 		dpcd_addr = DP_SINK_COUNT;
3074 	} else {
3075 		dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI;
3076 		/* DPCD 0x2002 - 0x2005 for downstream IRQ */
3077 		dpcd_addr = DP_SINK_COUNT_ESI;
3078 	}
3079 
3080 	dret = drm_dp_dpcd_read(
3081 		&aconnector->dm_dp_aux.aux,
3082 		dpcd_addr,
3083 		esi,
3084 		dpcd_bytes_to_read);
3085 
3086 	while (dret == dpcd_bytes_to_read &&
3087 		process_count < max_process_count) {
3088 		uint8_t retry;
3089 		dret = 0;
3090 
3091 		process_count++;
3092 
3093 		DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]);
3094 		/* handle HPD short pulse irq */
3095 		if (aconnector->mst_mgr.mst_state)
3096 			drm_dp_mst_hpd_irq(
3097 				&aconnector->mst_mgr,
3098 				esi,
3099 				&new_irq_handled);
3100 
3101 		if (new_irq_handled) {
3102 			/* ACK at DPCD to notify down stream */
3103 			const int ack_dpcd_bytes_to_write =
3104 				dpcd_bytes_to_read - 1;
3105 
3106 			for (retry = 0; retry < 3; retry++) {
3107 				uint8_t wret;
3108 
3109 				wret = drm_dp_dpcd_write(
3110 					&aconnector->dm_dp_aux.aux,
3111 					dpcd_addr + 1,
3112 					&esi[1],
3113 					ack_dpcd_bytes_to_write);
3114 				if (wret == ack_dpcd_bytes_to_write)
3115 					break;
3116 			}
3117 
3118 			/* check if there is new irq to be handled */
3119 			dret = drm_dp_dpcd_read(
3120 				&aconnector->dm_dp_aux.aux,
3121 				dpcd_addr,
3122 				esi,
3123 				dpcd_bytes_to_read);
3124 
3125 			new_irq_handled = false;
3126 		} else {
3127 			break;
3128 		}
3129 	}
3130 
3131 	if (process_count == max_process_count)
3132 		DRM_DEBUG_DRIVER("Loop exceeded max iterations\n");
3133 }
3134 
3135 static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq,
3136 							union hpd_irq_data hpd_irq_data)
3137 {
3138 	struct hpd_rx_irq_offload_work *offload_work =
3139 				kzalloc(sizeof(*offload_work), GFP_KERNEL);
3140 
3141 	if (!offload_work) {
3142 		DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n");
3143 		return;
3144 	}
3145 
3146 	INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work);
3147 	offload_work->data = hpd_irq_data;
3148 	offload_work->offload_wq = offload_wq;
3149 
3150 	queue_work(offload_wq->wq, &offload_work->work);
3151 	DRM_DEBUG_KMS("queue work to handle hpd_rx offload work");
3152 }
3153 
3154 static void handle_hpd_rx_irq(void *param)
3155 {
3156 	struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
3157 	struct drm_connector *connector = &aconnector->base;
3158 	struct drm_device *dev = connector->dev;
3159 	struct dc_link *dc_link = aconnector->dc_link;
3160 	bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
3161 	bool result = false;
3162 	enum dc_connection_type new_connection_type = dc_connection_none;
3163 	struct amdgpu_device *adev = drm_to_adev(dev);
3164 	union hpd_irq_data hpd_irq_data;
3165 	bool link_loss = false;
3166 	bool has_left_work = false;
3167 	int idx = aconnector->base.index;
3168 	struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
3169 
3170 	memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
3171 
3172 	if (adev->dm.disable_hpd_irq)
3173 		return;
3174 
3175 	/*
3176 	 * TODO:Temporary add mutex to protect hpd interrupt not have a gpio
3177 	 * conflict, after implement i2c helper, this mutex should be
3178 	 * retired.
3179 	 */
3180 	mutex_lock(&aconnector->hpd_lock);
3181 
3182 	result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data,
3183 						&link_loss, true, &has_left_work);
3184 
3185 	if (!has_left_work)
3186 		goto out;
3187 
3188 	if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) {
3189 		schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3190 		goto out;
3191 	}
3192 
3193 	if (dc_link_dp_allow_hpd_rx_irq(dc_link)) {
3194 		if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY ||
3195 			hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) {
3196 			dm_handle_mst_sideband_msg(aconnector);
3197 			goto out;
3198 		}
3199 
3200 		if (link_loss) {
3201 			bool skip = false;
3202 
3203 			spin_lock(&offload_wq->offload_lock);
3204 			skip = offload_wq->is_handling_link_loss;
3205 
3206 			if (!skip)
3207 				offload_wq->is_handling_link_loss = true;
3208 
3209 			spin_unlock(&offload_wq->offload_lock);
3210 
3211 			if (!skip)
3212 				schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data);
3213 
3214 			goto out;
3215 		}
3216 	}
3217 
3218 out:
3219 	if (result && !is_mst_root_connector) {
3220 		/* Downstream Port status changed. */
3221 		if (!dc_link_detect_sink(dc_link, &new_connection_type))
3222 			DRM_ERROR("KMS: Failed to detect connector\n");
3223 
3224 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
3225 			emulated_link_detect(dc_link);
3226 
3227 			if (aconnector->fake_enable)
3228 				aconnector->fake_enable = false;
3229 
3230 			amdgpu_dm_update_connector_after_detect(aconnector);
3231 
3232 
3233 			drm_modeset_lock_all(dev);
3234 			dm_restore_drm_connector_state(dev, connector);
3235 			drm_modeset_unlock_all(dev);
3236 
3237 			drm_kms_helper_hotplug_event(dev);
3238 		} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
3239 
3240 			if (aconnector->fake_enable)
3241 				aconnector->fake_enable = false;
3242 
3243 			amdgpu_dm_update_connector_after_detect(aconnector);
3244 
3245 
3246 			drm_modeset_lock_all(dev);
3247 			dm_restore_drm_connector_state(dev, connector);
3248 			drm_modeset_unlock_all(dev);
3249 
3250 			drm_kms_helper_hotplug_event(dev);
3251 		}
3252 	}
3253 #ifdef CONFIG_DRM_AMD_DC_HDCP
3254 	if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
3255 		if (adev->dm.hdcp_workqueue)
3256 			hdcp_handle_cpirq(adev->dm.hdcp_workqueue,  aconnector->base.index);
3257 	}
3258 #endif
3259 
3260 	if (dc_link->type != dc_connection_mst_branch)
3261 		drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
3262 
3263 	mutex_unlock(&aconnector->hpd_lock);
3264 }
3265 
3266 static void register_hpd_handlers(struct amdgpu_device *adev)
3267 {
3268 	struct drm_device *dev = adev_to_drm(adev);
3269 	struct drm_connector *connector;
3270 	struct amdgpu_dm_connector *aconnector;
3271 	const struct dc_link *dc_link;
3272 	struct dc_interrupt_params int_params = {0};
3273 
3274 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3275 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3276 
3277 	list_for_each_entry(connector,
3278 			&dev->mode_config.connector_list, head)	{
3279 
3280 		aconnector = to_amdgpu_dm_connector(connector);
3281 		dc_link = aconnector->dc_link;
3282 
3283 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd) {
3284 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3285 			int_params.irq_source = dc_link->irq_source_hpd;
3286 
3287 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3288 					handle_hpd_irq,
3289 					(void *) aconnector);
3290 		}
3291 
3292 		if (DC_IRQ_SOURCE_INVALID != dc_link->irq_source_hpd_rx) {
3293 
3294 			/* Also register for DP short pulse (hpd_rx). */
3295 			int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3296 			int_params.irq_source =	dc_link->irq_source_hpd_rx;
3297 
3298 			amdgpu_dm_irq_register_interrupt(adev, &int_params,
3299 					handle_hpd_rx_irq,
3300 					(void *) aconnector);
3301 
3302 			if (adev->dm.hpd_rx_offload_wq)
3303 				adev->dm.hpd_rx_offload_wq[connector->index].aconnector =
3304 					aconnector;
3305 		}
3306 	}
3307 }
3308 
3309 #if defined(CONFIG_DRM_AMD_DC_SI)
3310 /* Register IRQ sources and initialize IRQ callbacks */
3311 static int dce60_register_irq_handlers(struct amdgpu_device *adev)
3312 {
3313 	struct dc *dc = adev->dm.dc;
3314 	struct common_irq_params *c_irq_params;
3315 	struct dc_interrupt_params int_params = {0};
3316 	int r;
3317 	int i;
3318 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3319 
3320 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3321 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3322 
3323 	/*
3324 	 * Actions of amdgpu_irq_add_id():
3325 	 * 1. Register a set() function with base driver.
3326 	 *    Base driver will call set() function to enable/disable an
3327 	 *    interrupt in DC hardware.
3328 	 * 2. Register amdgpu_dm_irq_handler().
3329 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3330 	 *    coming from DC hardware.
3331 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3332 	 *    for acknowledging and handling. */
3333 
3334 	/* Use VBLANK interrupt */
3335 	for (i = 0; i < adev->mode_info.num_crtc; i++) {
3336 		r = amdgpu_irq_add_id(adev, client_id, i+1 , &adev->crtc_irq);
3337 		if (r) {
3338 			DRM_ERROR("Failed to add crtc irq id!\n");
3339 			return r;
3340 		}
3341 
3342 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3343 		int_params.irq_source =
3344 			dc_interrupt_to_irq_source(dc, i+1 , 0);
3345 
3346 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3347 
3348 		c_irq_params->adev = adev;
3349 		c_irq_params->irq_src = int_params.irq_source;
3350 
3351 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3352 				dm_crtc_high_irq, c_irq_params);
3353 	}
3354 
3355 	/* Use GRPH_PFLIP interrupt */
3356 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3357 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3358 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3359 		if (r) {
3360 			DRM_ERROR("Failed to add page flip irq id!\n");
3361 			return r;
3362 		}
3363 
3364 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3365 		int_params.irq_source =
3366 			dc_interrupt_to_irq_source(dc, i, 0);
3367 
3368 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3369 
3370 		c_irq_params->adev = adev;
3371 		c_irq_params->irq_src = int_params.irq_source;
3372 
3373 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3374 				dm_pflip_high_irq, c_irq_params);
3375 
3376 	}
3377 
3378 	/* HPD */
3379 	r = amdgpu_irq_add_id(adev, client_id,
3380 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3381 	if (r) {
3382 		DRM_ERROR("Failed to add hpd irq id!\n");
3383 		return r;
3384 	}
3385 
3386 	register_hpd_handlers(adev);
3387 
3388 	return 0;
3389 }
3390 #endif
3391 
3392 /* Register IRQ sources and initialize IRQ callbacks */
3393 static int dce110_register_irq_handlers(struct amdgpu_device *adev)
3394 {
3395 	struct dc *dc = adev->dm.dc;
3396 	struct common_irq_params *c_irq_params;
3397 	struct dc_interrupt_params int_params = {0};
3398 	int r;
3399 	int i;
3400 	unsigned client_id = AMDGPU_IRQ_CLIENTID_LEGACY;
3401 
3402 	if (adev->asic_type >= CHIP_VEGA10)
3403 		client_id = SOC15_IH_CLIENTID_DCE;
3404 
3405 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3406 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3407 
3408 	/*
3409 	 * Actions of amdgpu_irq_add_id():
3410 	 * 1. Register a set() function with base driver.
3411 	 *    Base driver will call set() function to enable/disable an
3412 	 *    interrupt in DC hardware.
3413 	 * 2. Register amdgpu_dm_irq_handler().
3414 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3415 	 *    coming from DC hardware.
3416 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3417 	 *    for acknowledging and handling. */
3418 
3419 	/* Use VBLANK interrupt */
3420 	for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) {
3421 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq);
3422 		if (r) {
3423 			DRM_ERROR("Failed to add crtc irq id!\n");
3424 			return r;
3425 		}
3426 
3427 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3428 		int_params.irq_source =
3429 			dc_interrupt_to_irq_source(dc, i, 0);
3430 
3431 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3432 
3433 		c_irq_params->adev = adev;
3434 		c_irq_params->irq_src = int_params.irq_source;
3435 
3436 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3437 				dm_crtc_high_irq, c_irq_params);
3438 	}
3439 
3440 	/* Use VUPDATE interrupt */
3441 	for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) {
3442 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq);
3443 		if (r) {
3444 			DRM_ERROR("Failed to add vupdate irq id!\n");
3445 			return r;
3446 		}
3447 
3448 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3449 		int_params.irq_source =
3450 			dc_interrupt_to_irq_source(dc, i, 0);
3451 
3452 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3453 
3454 		c_irq_params->adev = adev;
3455 		c_irq_params->irq_src = int_params.irq_source;
3456 
3457 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3458 				dm_vupdate_high_irq, c_irq_params);
3459 	}
3460 
3461 	/* Use GRPH_PFLIP interrupt */
3462 	for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP;
3463 			i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) {
3464 		r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq);
3465 		if (r) {
3466 			DRM_ERROR("Failed to add page flip irq id!\n");
3467 			return r;
3468 		}
3469 
3470 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3471 		int_params.irq_source =
3472 			dc_interrupt_to_irq_source(dc, i, 0);
3473 
3474 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3475 
3476 		c_irq_params->adev = adev;
3477 		c_irq_params->irq_src = int_params.irq_source;
3478 
3479 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3480 				dm_pflip_high_irq, c_irq_params);
3481 
3482 	}
3483 
3484 	/* HPD */
3485 	r = amdgpu_irq_add_id(adev, client_id,
3486 			VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq);
3487 	if (r) {
3488 		DRM_ERROR("Failed to add hpd irq id!\n");
3489 		return r;
3490 	}
3491 
3492 	register_hpd_handlers(adev);
3493 
3494 	return 0;
3495 }
3496 
3497 #if defined(CONFIG_DRM_AMD_DC_DCN)
3498 /* Register IRQ sources and initialize IRQ callbacks */
3499 static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
3500 {
3501 	struct dc *dc = adev->dm.dc;
3502 	struct common_irq_params *c_irq_params;
3503 	struct dc_interrupt_params int_params = {0};
3504 	int r;
3505 	int i;
3506 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3507 	static const unsigned int vrtl_int_srcid[] = {
3508 		DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL,
3509 		DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL,
3510 		DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL,
3511 		DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL,
3512 		DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL,
3513 		DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL
3514 	};
3515 #endif
3516 
3517 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3518 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3519 
3520 	/*
3521 	 * Actions of amdgpu_irq_add_id():
3522 	 * 1. Register a set() function with base driver.
3523 	 *    Base driver will call set() function to enable/disable an
3524 	 *    interrupt in DC hardware.
3525 	 * 2. Register amdgpu_dm_irq_handler().
3526 	 *    Base driver will call amdgpu_dm_irq_handler() for ALL interrupts
3527 	 *    coming from DC hardware.
3528 	 *    amdgpu_dm_irq_handler() will re-direct the interrupt to DC
3529 	 *    for acknowledging and handling.
3530 	 */
3531 
3532 	/* Use VSTARTUP interrupt */
3533 	for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP;
3534 			i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1;
3535 			i++) {
3536 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq);
3537 
3538 		if (r) {
3539 			DRM_ERROR("Failed to add crtc irq id!\n");
3540 			return r;
3541 		}
3542 
3543 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3544 		int_params.irq_source =
3545 			dc_interrupt_to_irq_source(dc, i, 0);
3546 
3547 		c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1];
3548 
3549 		c_irq_params->adev = adev;
3550 		c_irq_params->irq_src = int_params.irq_source;
3551 
3552 		amdgpu_dm_irq_register_interrupt(
3553 			adev, &int_params, dm_crtc_high_irq, c_irq_params);
3554 	}
3555 
3556 	/* Use otg vertical line interrupt */
3557 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
3558 	for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) {
3559 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE,
3560 				vrtl_int_srcid[i], &adev->vline0_irq);
3561 
3562 		if (r) {
3563 			DRM_ERROR("Failed to add vline0 irq id!\n");
3564 			return r;
3565 		}
3566 
3567 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3568 		int_params.irq_source =
3569 			dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0);
3570 
3571 		if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) {
3572 			DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]);
3573 			break;
3574 		}
3575 
3576 		c_irq_params = &adev->dm.vline0_params[int_params.irq_source
3577 					- DC_IRQ_SOURCE_DC1_VLINE0];
3578 
3579 		c_irq_params->adev = adev;
3580 		c_irq_params->irq_src = int_params.irq_source;
3581 
3582 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3583 				dm_dcn_vertical_interrupt0_high_irq, c_irq_params);
3584 	}
3585 #endif
3586 
3587 	/* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to
3588 	 * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx
3589 	 * to trigger at end of each vblank, regardless of state of the lock,
3590 	 * matching DCE behaviour.
3591 	 */
3592 	for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT;
3593 	     i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1;
3594 	     i++) {
3595 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq);
3596 
3597 		if (r) {
3598 			DRM_ERROR("Failed to add vupdate irq id!\n");
3599 			return r;
3600 		}
3601 
3602 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3603 		int_params.irq_source =
3604 			dc_interrupt_to_irq_source(dc, i, 0);
3605 
3606 		c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1];
3607 
3608 		c_irq_params->adev = adev;
3609 		c_irq_params->irq_src = int_params.irq_source;
3610 
3611 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3612 				dm_vupdate_high_irq, c_irq_params);
3613 	}
3614 
3615 	/* Use GRPH_PFLIP interrupt */
3616 	for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT;
3617 			i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1;
3618 			i++) {
3619 		r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq);
3620 		if (r) {
3621 			DRM_ERROR("Failed to add page flip irq id!\n");
3622 			return r;
3623 		}
3624 
3625 		int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT;
3626 		int_params.irq_source =
3627 			dc_interrupt_to_irq_source(dc, i, 0);
3628 
3629 		c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST];
3630 
3631 		c_irq_params->adev = adev;
3632 		c_irq_params->irq_src = int_params.irq_source;
3633 
3634 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3635 				dm_pflip_high_irq, c_irq_params);
3636 
3637 	}
3638 
3639 	/* HPD */
3640 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT,
3641 			&adev->hpd_irq);
3642 	if (r) {
3643 		DRM_ERROR("Failed to add hpd irq id!\n");
3644 		return r;
3645 	}
3646 
3647 	register_hpd_handlers(adev);
3648 
3649 	return 0;
3650 }
3651 /* Register Outbox IRQ sources and initialize IRQ callbacks */
3652 static int register_outbox_irq_handlers(struct amdgpu_device *adev)
3653 {
3654 	struct dc *dc = adev->dm.dc;
3655 	struct common_irq_params *c_irq_params;
3656 	struct dc_interrupt_params int_params = {0};
3657 	int r, i;
3658 
3659 	int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT;
3660 	int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT;
3661 
3662 	r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT,
3663 			&adev->dmub_outbox_irq);
3664 	if (r) {
3665 		DRM_ERROR("Failed to add outbox irq id!\n");
3666 		return r;
3667 	}
3668 
3669 	if (dc->ctx->dmub_srv) {
3670 		i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT;
3671 		int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT;
3672 		int_params.irq_source =
3673 		dc_interrupt_to_irq_source(dc, i, 0);
3674 
3675 		c_irq_params = &adev->dm.dmub_outbox_params[0];
3676 
3677 		c_irq_params->adev = adev;
3678 		c_irq_params->irq_src = int_params.irq_source;
3679 
3680 		amdgpu_dm_irq_register_interrupt(adev, &int_params,
3681 				dm_dmub_outbox1_low_irq, c_irq_params);
3682 	}
3683 
3684 	return 0;
3685 }
3686 #endif
3687 
3688 /*
3689  * Acquires the lock for the atomic state object and returns
3690  * the new atomic state.
3691  *
3692  * This should only be called during atomic check.
3693  */
3694 static int dm_atomic_get_state(struct drm_atomic_state *state,
3695 			       struct dm_atomic_state **dm_state)
3696 {
3697 	struct drm_device *dev = state->dev;
3698 	struct amdgpu_device *adev = drm_to_adev(dev);
3699 	struct amdgpu_display_manager *dm = &adev->dm;
3700 	struct drm_private_state *priv_state;
3701 
3702 	if (*dm_state)
3703 		return 0;
3704 
3705 	priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj);
3706 	if (IS_ERR(priv_state))
3707 		return PTR_ERR(priv_state);
3708 
3709 	*dm_state = to_dm_atomic_state(priv_state);
3710 
3711 	return 0;
3712 }
3713 
3714 static struct dm_atomic_state *
3715 dm_atomic_get_new_state(struct drm_atomic_state *state)
3716 {
3717 	struct drm_device *dev = state->dev;
3718 	struct amdgpu_device *adev = drm_to_adev(dev);
3719 	struct amdgpu_display_manager *dm = &adev->dm;
3720 	struct drm_private_obj *obj;
3721 	struct drm_private_state *new_obj_state;
3722 	int i;
3723 
3724 	for_each_new_private_obj_in_state(state, obj, new_obj_state, i) {
3725 		if (obj->funcs == dm->atomic_obj.funcs)
3726 			return to_dm_atomic_state(new_obj_state);
3727 	}
3728 
3729 	return NULL;
3730 }
3731 
3732 static struct drm_private_state *
3733 dm_atomic_duplicate_state(struct drm_private_obj *obj)
3734 {
3735 	struct dm_atomic_state *old_state, *new_state;
3736 
3737 	new_state = kzalloc(sizeof(*new_state), GFP_KERNEL);
3738 	if (!new_state)
3739 		return NULL;
3740 
3741 	__drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base);
3742 
3743 	old_state = to_dm_atomic_state(obj->state);
3744 
3745 	if (old_state && old_state->context)
3746 		new_state->context = dc_copy_state(old_state->context);
3747 
3748 	if (!new_state->context) {
3749 		kfree(new_state);
3750 		return NULL;
3751 	}
3752 
3753 	return &new_state->base;
3754 }
3755 
3756 static void dm_atomic_destroy_state(struct drm_private_obj *obj,
3757 				    struct drm_private_state *state)
3758 {
3759 	struct dm_atomic_state *dm_state = to_dm_atomic_state(state);
3760 
3761 	if (dm_state && dm_state->context)
3762 		dc_release_state(dm_state->context);
3763 
3764 	kfree(dm_state);
3765 }
3766 
3767 static struct drm_private_state_funcs dm_atomic_state_funcs = {
3768 	.atomic_duplicate_state = dm_atomic_duplicate_state,
3769 	.atomic_destroy_state = dm_atomic_destroy_state,
3770 };
3771 
3772 static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
3773 {
3774 	struct dm_atomic_state *state;
3775 	int r;
3776 
3777 	adev->mode_info.mode_config_initialized = true;
3778 
3779 	adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs;
3780 	adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs;
3781 
3782 	adev_to_drm(adev)->mode_config.max_width = 16384;
3783 	adev_to_drm(adev)->mode_config.max_height = 16384;
3784 
3785 	adev_to_drm(adev)->mode_config.preferred_depth = 24;
3786 	adev_to_drm(adev)->mode_config.prefer_shadow = 1;
3787 	/* indicates support for immediate flip */
3788 	adev_to_drm(adev)->mode_config.async_page_flip = true;
3789 
3790 	adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base;
3791 
3792 	state = kzalloc(sizeof(*state), GFP_KERNEL);
3793 	if (!state)
3794 		return -ENOMEM;
3795 
3796 	state->context = dc_create_state(adev->dm.dc);
3797 	if (!state->context) {
3798 		kfree(state);
3799 		return -ENOMEM;
3800 	}
3801 
3802 	dc_resource_state_copy_construct_current(adev->dm.dc, state->context);
3803 
3804 	drm_atomic_private_obj_init(adev_to_drm(adev),
3805 				    &adev->dm.atomic_obj,
3806 				    &state->base,
3807 				    &dm_atomic_state_funcs);
3808 
3809 	r = amdgpu_display_modeset_create_props(adev);
3810 	if (r) {
3811 		dc_release_state(state->context);
3812 		kfree(state);
3813 		return r;
3814 	}
3815 
3816 	r = amdgpu_dm_audio_init(adev);
3817 	if (r) {
3818 		dc_release_state(state->context);
3819 		kfree(state);
3820 		return r;
3821 	}
3822 
3823 	return 0;
3824 }
3825 
3826 #define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
3827 #define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
3828 #define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
3829 
3830 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
3831 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
3832 
3833 static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm,
3834 					    int bl_idx)
3835 {
3836 #if defined(CONFIG_ACPI)
3837 	struct amdgpu_dm_backlight_caps caps;
3838 
3839 	memset(&caps, 0, sizeof(caps));
3840 
3841 	if (dm->backlight_caps[bl_idx].caps_valid)
3842 		return;
3843 
3844 	amdgpu_acpi_get_backlight_caps(&caps);
3845 	if (caps.caps_valid) {
3846 		dm->backlight_caps[bl_idx].caps_valid = true;
3847 		if (caps.aux_support)
3848 			return;
3849 		dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal;
3850 		dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal;
3851 	} else {
3852 		dm->backlight_caps[bl_idx].min_input_signal =
3853 				AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3854 		dm->backlight_caps[bl_idx].max_input_signal =
3855 				AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3856 	}
3857 #else
3858 	if (dm->backlight_caps[bl_idx].aux_support)
3859 		return;
3860 
3861 	dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
3862 	dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
3863 #endif
3864 }
3865 
3866 static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps,
3867 				unsigned *min, unsigned *max)
3868 {
3869 	if (!caps)
3870 		return 0;
3871 
3872 	if (caps->aux_support) {
3873 		// Firmware limits are in nits, DC API wants millinits.
3874 		*max = 1000 * caps->aux_max_input_signal;
3875 		*min = 1000 * caps->aux_min_input_signal;
3876 	} else {
3877 		// Firmware limits are 8-bit, PWM control is 16-bit.
3878 		*max = 0x101 * caps->max_input_signal;
3879 		*min = 0x101 * caps->min_input_signal;
3880 	}
3881 	return 1;
3882 }
3883 
3884 static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps,
3885 					uint32_t brightness)
3886 {
3887 	unsigned min, max;
3888 
3889 	if (!get_brightness_range(caps, &min, &max))
3890 		return brightness;
3891 
3892 	// Rescale 0..255 to min..max
3893 	return min + DIV_ROUND_CLOSEST((max - min) * brightness,
3894 				       AMDGPU_MAX_BL_LEVEL);
3895 }
3896 
3897 static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps,
3898 				      uint32_t brightness)
3899 {
3900 	unsigned min, max;
3901 
3902 	if (!get_brightness_range(caps, &min, &max))
3903 		return brightness;
3904 
3905 	if (brightness < min)
3906 		return 0;
3907 	// Rescale min..max to 0..255
3908 	return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min),
3909 				 max - min);
3910 }
3911 
3912 static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
3913 					 int bl_idx,
3914 					 u32 user_brightness)
3915 {
3916 	struct amdgpu_dm_backlight_caps caps;
3917 	struct dc_link *link;
3918 	u32 brightness;
3919 	bool rc;
3920 
3921 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
3922 	caps = dm->backlight_caps[bl_idx];
3923 
3924 	dm->brightness[bl_idx] = user_brightness;
3925 	brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]);
3926 	link = (struct dc_link *)dm->backlight_link[bl_idx];
3927 
3928 	/* Change brightness based on AUX property */
3929 	if (caps.aux_support) {
3930 		rc = dc_link_set_backlight_level_nits(link, true, brightness,
3931 						      AUX_BL_DEFAULT_TRANSITION_TIME_MS);
3932 		if (!rc)
3933 			DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx);
3934 	} else {
3935 		rc = dc_link_set_backlight_level(link, brightness, 0);
3936 		if (!rc)
3937 			DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
3938 	}
3939 
3940 	if (rc)
3941 		dm->actual_brightness[bl_idx] = user_brightness;
3942 }
3943 
3944 static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
3945 {
3946 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3947 	int i;
3948 
3949 	for (i = 0; i < dm->num_of_edps; i++) {
3950 		if (bd == dm->backlight_dev[i])
3951 			break;
3952 	}
3953 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
3954 		i = 0;
3955 	amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness);
3956 
3957 	return 0;
3958 }
3959 
3960 static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm,
3961 					 int bl_idx)
3962 {
3963 	struct amdgpu_dm_backlight_caps caps;
3964 	struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx];
3965 
3966 	amdgpu_dm_update_backlight_caps(dm, bl_idx);
3967 	caps = dm->backlight_caps[bl_idx];
3968 
3969 	if (caps.aux_support) {
3970 		u32 avg, peak;
3971 		bool rc;
3972 
3973 		rc = dc_link_get_backlight_level_nits(link, &avg, &peak);
3974 		if (!rc)
3975 			return dm->brightness[bl_idx];
3976 		return convert_brightness_to_user(&caps, avg);
3977 	} else {
3978 		int ret = dc_link_get_backlight_level(link);
3979 
3980 		if (ret == DC_ERROR_UNEXPECTED)
3981 			return dm->brightness[bl_idx];
3982 		return convert_brightness_to_user(&caps, ret);
3983 	}
3984 }
3985 
3986 static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
3987 {
3988 	struct amdgpu_display_manager *dm = bl_get_data(bd);
3989 	int i;
3990 
3991 	for (i = 0; i < dm->num_of_edps; i++) {
3992 		if (bd == dm->backlight_dev[i])
3993 			break;
3994 	}
3995 	if (i >= AMDGPU_DM_MAX_NUM_EDP)
3996 		i = 0;
3997 	return amdgpu_dm_backlight_get_level(dm, i);
3998 }
3999 
4000 static const struct backlight_ops amdgpu_dm_backlight_ops = {
4001 	.options = BL_CORE_SUSPENDRESUME,
4002 	.get_brightness = amdgpu_dm_backlight_get_brightness,
4003 	.update_status	= amdgpu_dm_backlight_update_status,
4004 };
4005 
4006 static void
4007 amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
4008 {
4009 	char bl_name[16];
4010 	struct backlight_properties props = { 0 };
4011 
4012 	amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps);
4013 	dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL;
4014 
4015 	props.max_brightness = AMDGPU_MAX_BL_LEVEL;
4016 	props.brightness = AMDGPU_MAX_BL_LEVEL;
4017 	props.type = BACKLIGHT_RAW;
4018 
4019 	snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
4020 		 adev_to_drm(dm->adev)->primary->index + dm->num_of_edps);
4021 
4022 	dm->backlight_dev[dm->num_of_edps] = backlight_device_register(bl_name,
4023 								       adev_to_drm(dm->adev)->dev,
4024 								       dm,
4025 								       &amdgpu_dm_backlight_ops,
4026 								       &props);
4027 
4028 	if (IS_ERR(dm->backlight_dev[dm->num_of_edps]))
4029 		DRM_ERROR("DM: Backlight registration failed!\n");
4030 	else
4031 		DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name);
4032 }
4033 #endif
4034 
4035 static int initialize_plane(struct amdgpu_display_manager *dm,
4036 			    struct amdgpu_mode_info *mode_info, int plane_id,
4037 			    enum drm_plane_type plane_type,
4038 			    const struct dc_plane_cap *plane_cap)
4039 {
4040 	struct drm_plane *plane;
4041 	unsigned long possible_crtcs;
4042 	int ret = 0;
4043 
4044 	plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL);
4045 	if (!plane) {
4046 		DRM_ERROR("KMS: Failed to allocate plane\n");
4047 		return -ENOMEM;
4048 	}
4049 	plane->type = plane_type;
4050 
4051 	/*
4052 	 * HACK: IGT tests expect that the primary plane for a CRTC
4053 	 * can only have one possible CRTC. Only expose support for
4054 	 * any CRTC if they're not going to be used as a primary plane
4055 	 * for a CRTC - like overlay or underlay planes.
4056 	 */
4057 	possible_crtcs = 1 << plane_id;
4058 	if (plane_id >= dm->dc->caps.max_streams)
4059 		possible_crtcs = 0xff;
4060 
4061 	ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap);
4062 
4063 	if (ret) {
4064 		DRM_ERROR("KMS: Failed to initialize plane\n");
4065 		kfree(plane);
4066 		return ret;
4067 	}
4068 
4069 	if (mode_info)
4070 		mode_info->planes[plane_id] = plane;
4071 
4072 	return ret;
4073 }
4074 
4075 
4076 static void register_backlight_device(struct amdgpu_display_manager *dm,
4077 				      struct dc_link *link)
4078 {
4079 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
4080 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
4081 
4082 	if ((link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) &&
4083 	    link->type != dc_connection_none) {
4084 		/*
4085 		 * Event if registration failed, we should continue with
4086 		 * DM initialization because not having a backlight control
4087 		 * is better then a black screen.
4088 		 */
4089 		if (!dm->backlight_dev[dm->num_of_edps])
4090 			amdgpu_dm_register_backlight_device(dm);
4091 
4092 		if (dm->backlight_dev[dm->num_of_edps]) {
4093 			dm->backlight_link[dm->num_of_edps] = link;
4094 			dm->num_of_edps++;
4095 		}
4096 	}
4097 #endif
4098 }
4099 
4100 
4101 /*
4102  * In this architecture, the association
4103  * connector -> encoder -> crtc
4104  * id not really requried. The crtc and connector will hold the
4105  * display_index as an abstraction to use with DAL component
4106  *
4107  * Returns 0 on success
4108  */
4109 static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
4110 {
4111 	struct amdgpu_display_manager *dm = &adev->dm;
4112 	int32_t i;
4113 	struct amdgpu_dm_connector *aconnector = NULL;
4114 	struct amdgpu_encoder *aencoder = NULL;
4115 	struct amdgpu_mode_info *mode_info = &adev->mode_info;
4116 	uint32_t link_cnt;
4117 	int32_t primary_planes;
4118 	enum dc_connection_type new_connection_type = dc_connection_none;
4119 	const struct dc_plane_cap *plane;
4120 
4121 	dm->display_indexes_num = dm->dc->caps.max_streams;
4122 	/* Update the actual used number of crtc */
4123 	adev->mode_info.num_crtc = adev->dm.display_indexes_num;
4124 
4125 	link_cnt = dm->dc->caps.max_links;
4126 	if (amdgpu_dm_mode_config_init(dm->adev)) {
4127 		DRM_ERROR("DM: Failed to initialize mode config\n");
4128 		return -EINVAL;
4129 	}
4130 
4131 	/* There is one primary plane per CRTC */
4132 	primary_planes = dm->dc->caps.max_streams;
4133 	ASSERT(primary_planes <= AMDGPU_MAX_PLANES);
4134 
4135 	/*
4136 	 * Initialize primary planes, implicit planes for legacy IOCTLS.
4137 	 * Order is reversed to match iteration order in atomic check.
4138 	 */
4139 	for (i = (primary_planes - 1); i >= 0; i--) {
4140 		plane = &dm->dc->caps.planes[i];
4141 
4142 		if (initialize_plane(dm, mode_info, i,
4143 				     DRM_PLANE_TYPE_PRIMARY, plane)) {
4144 			DRM_ERROR("KMS: Failed to initialize primary plane\n");
4145 			goto fail;
4146 		}
4147 	}
4148 
4149 	/*
4150 	 * Initialize overlay planes, index starting after primary planes.
4151 	 * These planes have a higher DRM index than the primary planes since
4152 	 * they should be considered as having a higher z-order.
4153 	 * Order is reversed to match iteration order in atomic check.
4154 	 *
4155 	 * Only support DCN for now, and only expose one so we don't encourage
4156 	 * userspace to use up all the pipes.
4157 	 */
4158 	for (i = 0; i < dm->dc->caps.max_planes; ++i) {
4159 		struct dc_plane_cap *plane = &dm->dc->caps.planes[i];
4160 
4161 		if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL)
4162 			continue;
4163 
4164 		if (!plane->blends_with_above || !plane->blends_with_below)
4165 			continue;
4166 
4167 		if (!plane->pixel_format_support.argb8888)
4168 			continue;
4169 
4170 		if (initialize_plane(dm, NULL, primary_planes + i,
4171 				     DRM_PLANE_TYPE_OVERLAY, plane)) {
4172 			DRM_ERROR("KMS: Failed to initialize overlay plane\n");
4173 			goto fail;
4174 		}
4175 
4176 		/* Only create one overlay plane. */
4177 		break;
4178 	}
4179 
4180 	for (i = 0; i < dm->dc->caps.max_streams; i++)
4181 		if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) {
4182 			DRM_ERROR("KMS: Failed to initialize crtc\n");
4183 			goto fail;
4184 		}
4185 
4186 #if defined(CONFIG_DRM_AMD_DC_DCN)
4187 	/* Use Outbox interrupt */
4188 	switch (adev->asic_type) {
4189 	case CHIP_SIENNA_CICHLID:
4190 	case CHIP_NAVY_FLOUNDER:
4191 	case CHIP_YELLOW_CARP:
4192 	case CHIP_RENOIR:
4193 		if (register_outbox_irq_handlers(dm->adev)) {
4194 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4195 			goto fail;
4196 		}
4197 		break;
4198 	default:
4199 		DRM_DEBUG_KMS("Unsupported ASIC type for outbox: 0x%X\n", adev->asic_type);
4200 	}
4201 #endif
4202 
4203 	/* loops over all connectors on the board */
4204 	for (i = 0; i < link_cnt; i++) {
4205 		struct dc_link *link = NULL;
4206 
4207 		if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) {
4208 			DRM_ERROR(
4209 				"KMS: Cannot support more than %d display indexes\n",
4210 					AMDGPU_DM_MAX_DISPLAY_INDEX);
4211 			continue;
4212 		}
4213 
4214 		aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL);
4215 		if (!aconnector)
4216 			goto fail;
4217 
4218 		aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL);
4219 		if (!aencoder)
4220 			goto fail;
4221 
4222 		if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) {
4223 			DRM_ERROR("KMS: Failed to initialize encoder\n");
4224 			goto fail;
4225 		}
4226 
4227 		if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) {
4228 			DRM_ERROR("KMS: Failed to initialize connector\n");
4229 			goto fail;
4230 		}
4231 
4232 		link = dc_get_link_at_index(dm->dc, i);
4233 
4234 		if (!dc_link_detect_sink(link, &new_connection_type))
4235 			DRM_ERROR("KMS: Failed to detect connector\n");
4236 
4237 		if (aconnector->base.force && new_connection_type == dc_connection_none) {
4238 			emulated_link_detect(link);
4239 			amdgpu_dm_update_connector_after_detect(aconnector);
4240 
4241 		} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
4242 			amdgpu_dm_update_connector_after_detect(aconnector);
4243 			register_backlight_device(dm, link);
4244 
4245 			if (dm->num_of_edps)
4246 				update_connector_ext_caps(aconnector);
4247 			if (amdgpu_dc_feature_mask & DC_PSR_MASK)
4248 				amdgpu_dm_set_psr_caps(link);
4249 
4250 			/* TODO: Fix vblank control helpers to delay PSR entry to allow this when
4251 			 * PSR is also supported.
4252 			 */
4253 			if (link->psr_settings.psr_feature_enabled)
4254 				adev_to_drm(adev)->vblank_disable_immediate = false;
4255 		}
4256 
4257 
4258 	}
4259 
4260 	/* Software is initialized. Now we can register interrupt handlers. */
4261 	switch (adev->asic_type) {
4262 #if defined(CONFIG_DRM_AMD_DC_SI)
4263 	case CHIP_TAHITI:
4264 	case CHIP_PITCAIRN:
4265 	case CHIP_VERDE:
4266 	case CHIP_OLAND:
4267 		if (dce60_register_irq_handlers(dm->adev)) {
4268 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4269 			goto fail;
4270 		}
4271 		break;
4272 #endif
4273 	case CHIP_BONAIRE:
4274 	case CHIP_HAWAII:
4275 	case CHIP_KAVERI:
4276 	case CHIP_KABINI:
4277 	case CHIP_MULLINS:
4278 	case CHIP_TONGA:
4279 	case CHIP_FIJI:
4280 	case CHIP_CARRIZO:
4281 	case CHIP_STONEY:
4282 	case CHIP_POLARIS11:
4283 	case CHIP_POLARIS10:
4284 	case CHIP_POLARIS12:
4285 	case CHIP_VEGAM:
4286 	case CHIP_VEGA10:
4287 	case CHIP_VEGA12:
4288 	case CHIP_VEGA20:
4289 		if (dce110_register_irq_handlers(dm->adev)) {
4290 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4291 			goto fail;
4292 		}
4293 		break;
4294 #if defined(CONFIG_DRM_AMD_DC_DCN)
4295 	case CHIP_RAVEN:
4296 	case CHIP_NAVI12:
4297 	case CHIP_NAVI10:
4298 	case CHIP_NAVI14:
4299 	case CHIP_RENOIR:
4300 	case CHIP_SIENNA_CICHLID:
4301 	case CHIP_NAVY_FLOUNDER:
4302 	case CHIP_DIMGREY_CAVEFISH:
4303 	case CHIP_BEIGE_GOBY:
4304 	case CHIP_VANGOGH:
4305 	case CHIP_YELLOW_CARP:
4306 		if (dcn10_register_irq_handlers(dm->adev)) {
4307 			DRM_ERROR("DM: Failed to initialize IRQ\n");
4308 			goto fail;
4309 		}
4310 		break;
4311 #endif
4312 	default:
4313 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4314 		goto fail;
4315 	}
4316 
4317 	return 0;
4318 fail:
4319 	kfree(aencoder);
4320 	kfree(aconnector);
4321 
4322 	return -EINVAL;
4323 }
4324 
4325 static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm)
4326 {
4327 	drm_atomic_private_obj_fini(&dm->atomic_obj);
4328 	return;
4329 }
4330 
4331 /******************************************************************************
4332  * amdgpu_display_funcs functions
4333  *****************************************************************************/
4334 
4335 /*
4336  * dm_bandwidth_update - program display watermarks
4337  *
4338  * @adev: amdgpu_device pointer
4339  *
4340  * Calculate and program the display watermarks and line buffer allocation.
4341  */
4342 static void dm_bandwidth_update(struct amdgpu_device *adev)
4343 {
4344 	/* TODO: implement later */
4345 }
4346 
4347 static const struct amdgpu_display_funcs dm_display_funcs = {
4348 	.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
4349 	.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
4350 	.backlight_set_level = NULL, /* never called for DC */
4351 	.backlight_get_level = NULL, /* never called for DC */
4352 	.hpd_sense = NULL,/* called unconditionally */
4353 	.hpd_set_polarity = NULL, /* called unconditionally */
4354 	.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
4355 	.page_flip_get_scanoutpos =
4356 		dm_crtc_get_scanoutpos,/* called unconditionally */
4357 	.add_encoder = NULL, /* VBIOS parsing. DAL does it. */
4358 	.add_connector = NULL, /* VBIOS parsing. DAL does it. */
4359 };
4360 
4361 #if defined(CONFIG_DEBUG_KERNEL_DC)
4362 
4363 static ssize_t s3_debug_store(struct device *device,
4364 			      struct device_attribute *attr,
4365 			      const char *buf,
4366 			      size_t count)
4367 {
4368 	int ret;
4369 	int s3_state;
4370 	struct drm_device *drm_dev = dev_get_drvdata(device);
4371 	struct amdgpu_device *adev = drm_to_adev(drm_dev);
4372 
4373 	ret = kstrtoint(buf, 0, &s3_state);
4374 
4375 	if (ret == 0) {
4376 		if (s3_state) {
4377 			dm_resume(adev);
4378 			drm_kms_helper_hotplug_event(adev_to_drm(adev));
4379 		} else
4380 			dm_suspend(adev);
4381 	}
4382 
4383 	return ret == 0 ? count : 0;
4384 }
4385 
4386 DEVICE_ATTR_WO(s3_debug);
4387 
4388 #endif
4389 
4390 static int dm_early_init(void *handle)
4391 {
4392 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
4393 
4394 	switch (adev->asic_type) {
4395 #if defined(CONFIG_DRM_AMD_DC_SI)
4396 	case CHIP_TAHITI:
4397 	case CHIP_PITCAIRN:
4398 	case CHIP_VERDE:
4399 		adev->mode_info.num_crtc = 6;
4400 		adev->mode_info.num_hpd = 6;
4401 		adev->mode_info.num_dig = 6;
4402 		break;
4403 	case CHIP_OLAND:
4404 		adev->mode_info.num_crtc = 2;
4405 		adev->mode_info.num_hpd = 2;
4406 		adev->mode_info.num_dig = 2;
4407 		break;
4408 #endif
4409 	case CHIP_BONAIRE:
4410 	case CHIP_HAWAII:
4411 		adev->mode_info.num_crtc = 6;
4412 		adev->mode_info.num_hpd = 6;
4413 		adev->mode_info.num_dig = 6;
4414 		break;
4415 	case CHIP_KAVERI:
4416 		adev->mode_info.num_crtc = 4;
4417 		adev->mode_info.num_hpd = 6;
4418 		adev->mode_info.num_dig = 7;
4419 		break;
4420 	case CHIP_KABINI:
4421 	case CHIP_MULLINS:
4422 		adev->mode_info.num_crtc = 2;
4423 		adev->mode_info.num_hpd = 6;
4424 		adev->mode_info.num_dig = 6;
4425 		break;
4426 	case CHIP_FIJI:
4427 	case CHIP_TONGA:
4428 		adev->mode_info.num_crtc = 6;
4429 		adev->mode_info.num_hpd = 6;
4430 		adev->mode_info.num_dig = 7;
4431 		break;
4432 	case CHIP_CARRIZO:
4433 		adev->mode_info.num_crtc = 3;
4434 		adev->mode_info.num_hpd = 6;
4435 		adev->mode_info.num_dig = 9;
4436 		break;
4437 	case CHIP_STONEY:
4438 		adev->mode_info.num_crtc = 2;
4439 		adev->mode_info.num_hpd = 6;
4440 		adev->mode_info.num_dig = 9;
4441 		break;
4442 	case CHIP_POLARIS11:
4443 	case CHIP_POLARIS12:
4444 		adev->mode_info.num_crtc = 5;
4445 		adev->mode_info.num_hpd = 5;
4446 		adev->mode_info.num_dig = 5;
4447 		break;
4448 	case CHIP_POLARIS10:
4449 	case CHIP_VEGAM:
4450 		adev->mode_info.num_crtc = 6;
4451 		adev->mode_info.num_hpd = 6;
4452 		adev->mode_info.num_dig = 6;
4453 		break;
4454 	case CHIP_VEGA10:
4455 	case CHIP_VEGA12:
4456 	case CHIP_VEGA20:
4457 		adev->mode_info.num_crtc = 6;
4458 		adev->mode_info.num_hpd = 6;
4459 		adev->mode_info.num_dig = 6;
4460 		break;
4461 #if defined(CONFIG_DRM_AMD_DC_DCN)
4462 	case CHIP_RAVEN:
4463 	case CHIP_RENOIR:
4464 	case CHIP_VANGOGH:
4465 		adev->mode_info.num_crtc = 4;
4466 		adev->mode_info.num_hpd = 4;
4467 		adev->mode_info.num_dig = 4;
4468 		break;
4469 	case CHIP_NAVI10:
4470 	case CHIP_NAVI12:
4471 	case CHIP_SIENNA_CICHLID:
4472 	case CHIP_NAVY_FLOUNDER:
4473 		adev->mode_info.num_crtc = 6;
4474 		adev->mode_info.num_hpd = 6;
4475 		adev->mode_info.num_dig = 6;
4476 		break;
4477 	case CHIP_YELLOW_CARP:
4478 		adev->mode_info.num_crtc = 4;
4479 		adev->mode_info.num_hpd = 4;
4480 		adev->mode_info.num_dig = 4;
4481 		break;
4482 	case CHIP_NAVI14:
4483 	case CHIP_DIMGREY_CAVEFISH:
4484 		adev->mode_info.num_crtc = 5;
4485 		adev->mode_info.num_hpd = 5;
4486 		adev->mode_info.num_dig = 5;
4487 		break;
4488 	case CHIP_BEIGE_GOBY:
4489 		adev->mode_info.num_crtc = 2;
4490 		adev->mode_info.num_hpd = 2;
4491 		adev->mode_info.num_dig = 2;
4492 		break;
4493 #endif
4494 	default:
4495 		DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
4496 		return -EINVAL;
4497 	}
4498 
4499 	amdgpu_dm_set_irq_funcs(adev);
4500 
4501 	if (adev->mode_info.funcs == NULL)
4502 		adev->mode_info.funcs = &dm_display_funcs;
4503 
4504 	/*
4505 	 * Note: Do NOT change adev->audio_endpt_rreg and
4506 	 * adev->audio_endpt_wreg because they are initialised in
4507 	 * amdgpu_device_init()
4508 	 */
4509 #if defined(CONFIG_DEBUG_KERNEL_DC)
4510 	device_create_file(
4511 		adev_to_drm(adev)->dev,
4512 		&dev_attr_s3_debug);
4513 #endif
4514 
4515 	return 0;
4516 }
4517 
4518 static bool modeset_required(struct drm_crtc_state *crtc_state,
4519 			     struct dc_stream_state *new_stream,
4520 			     struct dc_stream_state *old_stream)
4521 {
4522 	return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4523 }
4524 
4525 static bool modereset_required(struct drm_crtc_state *crtc_state)
4526 {
4527 	return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state);
4528 }
4529 
4530 static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder)
4531 {
4532 	drm_encoder_cleanup(encoder);
4533 	kfree(encoder);
4534 }
4535 
4536 static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = {
4537 	.destroy = amdgpu_dm_encoder_destroy,
4538 };
4539 
4540 
4541 static void get_min_max_dc_plane_scaling(struct drm_device *dev,
4542 					 struct drm_framebuffer *fb,
4543 					 int *min_downscale, int *max_upscale)
4544 {
4545 	struct amdgpu_device *adev = drm_to_adev(dev);
4546 	struct dc *dc = adev->dm.dc;
4547 	/* Caps for all supported planes are the same on DCE and DCN 1 - 3 */
4548 	struct dc_plane_cap *plane_cap = &dc->caps.planes[0];
4549 
4550 	switch (fb->format->format) {
4551 	case DRM_FORMAT_P010:
4552 	case DRM_FORMAT_NV12:
4553 	case DRM_FORMAT_NV21:
4554 		*max_upscale = plane_cap->max_upscale_factor.nv12;
4555 		*min_downscale = plane_cap->max_downscale_factor.nv12;
4556 		break;
4557 
4558 	case DRM_FORMAT_XRGB16161616F:
4559 	case DRM_FORMAT_ARGB16161616F:
4560 	case DRM_FORMAT_XBGR16161616F:
4561 	case DRM_FORMAT_ABGR16161616F:
4562 		*max_upscale = plane_cap->max_upscale_factor.fp16;
4563 		*min_downscale = plane_cap->max_downscale_factor.fp16;
4564 		break;
4565 
4566 	default:
4567 		*max_upscale = plane_cap->max_upscale_factor.argb8888;
4568 		*min_downscale = plane_cap->max_downscale_factor.argb8888;
4569 		break;
4570 	}
4571 
4572 	/*
4573 	 * A factor of 1 in the plane_cap means to not allow scaling, ie. use a
4574 	 * scaling factor of 1.0 == 1000 units.
4575 	 */
4576 	if (*max_upscale == 1)
4577 		*max_upscale = 1000;
4578 
4579 	if (*min_downscale == 1)
4580 		*min_downscale = 1000;
4581 }
4582 
4583 
4584 static int fill_dc_scaling_info(const struct drm_plane_state *state,
4585 				struct dc_scaling_info *scaling_info)
4586 {
4587 	int scale_w, scale_h, min_downscale, max_upscale;
4588 
4589 	memset(scaling_info, 0, sizeof(*scaling_info));
4590 
4591 	/* Source is fixed 16.16 but we ignore mantissa for now... */
4592 	scaling_info->src_rect.x = state->src_x >> 16;
4593 	scaling_info->src_rect.y = state->src_y >> 16;
4594 
4595 	/*
4596 	 * For reasons we don't (yet) fully understand a non-zero
4597 	 * src_y coordinate into an NV12 buffer can cause a
4598 	 * system hang. To avoid hangs (and maybe be overly cautious)
4599 	 * let's reject both non-zero src_x and src_y.
4600 	 *
4601 	 * We currently know of only one use-case to reproduce a
4602 	 * scenario with non-zero src_x and src_y for NV12, which
4603 	 * is to gesture the YouTube Android app into full screen
4604 	 * on ChromeOS.
4605 	 */
4606 	if (state->fb &&
4607 	    state->fb->format->format == DRM_FORMAT_NV12 &&
4608 	    (scaling_info->src_rect.x != 0 ||
4609 	     scaling_info->src_rect.y != 0))
4610 		return -EINVAL;
4611 
4612 	scaling_info->src_rect.width = state->src_w >> 16;
4613 	if (scaling_info->src_rect.width == 0)
4614 		return -EINVAL;
4615 
4616 	scaling_info->src_rect.height = state->src_h >> 16;
4617 	if (scaling_info->src_rect.height == 0)
4618 		return -EINVAL;
4619 
4620 	scaling_info->dst_rect.x = state->crtc_x;
4621 	scaling_info->dst_rect.y = state->crtc_y;
4622 
4623 	if (state->crtc_w == 0)
4624 		return -EINVAL;
4625 
4626 	scaling_info->dst_rect.width = state->crtc_w;
4627 
4628 	if (state->crtc_h == 0)
4629 		return -EINVAL;
4630 
4631 	scaling_info->dst_rect.height = state->crtc_h;
4632 
4633 	/* DRM doesn't specify clipping on destination output. */
4634 	scaling_info->clip_rect = scaling_info->dst_rect;
4635 
4636 	/* Validate scaling per-format with DC plane caps */
4637 	if (state->plane && state->plane->dev && state->fb) {
4638 		get_min_max_dc_plane_scaling(state->plane->dev, state->fb,
4639 					     &min_downscale, &max_upscale);
4640 	} else {
4641 		min_downscale = 250;
4642 		max_upscale = 16000;
4643 	}
4644 
4645 	scale_w = scaling_info->dst_rect.width * 1000 /
4646 		  scaling_info->src_rect.width;
4647 
4648 	if (scale_w < min_downscale || scale_w > max_upscale)
4649 		return -EINVAL;
4650 
4651 	scale_h = scaling_info->dst_rect.height * 1000 /
4652 		  scaling_info->src_rect.height;
4653 
4654 	if (scale_h < min_downscale || scale_h > max_upscale)
4655 		return -EINVAL;
4656 
4657 	/*
4658 	 * The "scaling_quality" can be ignored for now, quality = 0 has DC
4659 	 * assume reasonable defaults based on the format.
4660 	 */
4661 
4662 	return 0;
4663 }
4664 
4665 static void
4666 fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info,
4667 				 uint64_t tiling_flags)
4668 {
4669 	/* Fill GFX8 params */
4670 	if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) {
4671 		unsigned int bankw, bankh, mtaspect, tile_split, num_banks;
4672 
4673 		bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH);
4674 		bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT);
4675 		mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT);
4676 		tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT);
4677 		num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS);
4678 
4679 		/* XXX fix me for VI */
4680 		tiling_info->gfx8.num_banks = num_banks;
4681 		tiling_info->gfx8.array_mode =
4682 				DC_ARRAY_2D_TILED_THIN1;
4683 		tiling_info->gfx8.tile_split = tile_split;
4684 		tiling_info->gfx8.bank_width = bankw;
4685 		tiling_info->gfx8.bank_height = bankh;
4686 		tiling_info->gfx8.tile_aspect = mtaspect;
4687 		tiling_info->gfx8.tile_mode =
4688 				DC_ADDR_SURF_MICRO_TILING_DISPLAY;
4689 	} else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE)
4690 			== DC_ARRAY_1D_TILED_THIN1) {
4691 		tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1;
4692 	}
4693 
4694 	tiling_info->gfx8.pipe_config =
4695 			AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG);
4696 }
4697 
4698 static void
4699 fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev,
4700 				  union dc_tiling_info *tiling_info)
4701 {
4702 	tiling_info->gfx9.num_pipes =
4703 		adev->gfx.config.gb_addr_config_fields.num_pipes;
4704 	tiling_info->gfx9.num_banks =
4705 		adev->gfx.config.gb_addr_config_fields.num_banks;
4706 	tiling_info->gfx9.pipe_interleave =
4707 		adev->gfx.config.gb_addr_config_fields.pipe_interleave_size;
4708 	tiling_info->gfx9.num_shader_engines =
4709 		adev->gfx.config.gb_addr_config_fields.num_se;
4710 	tiling_info->gfx9.max_compressed_frags =
4711 		adev->gfx.config.gb_addr_config_fields.max_compress_frags;
4712 	tiling_info->gfx9.num_rb_per_se =
4713 		adev->gfx.config.gb_addr_config_fields.num_rb_per_se;
4714 	tiling_info->gfx9.shaderEnable = 1;
4715 	if (adev->asic_type == CHIP_SIENNA_CICHLID ||
4716 	    adev->asic_type == CHIP_NAVY_FLOUNDER ||
4717 	    adev->asic_type == CHIP_DIMGREY_CAVEFISH ||
4718 	    adev->asic_type == CHIP_BEIGE_GOBY ||
4719 	    adev->asic_type == CHIP_YELLOW_CARP ||
4720 	    adev->asic_type == CHIP_VANGOGH)
4721 		tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs;
4722 }
4723 
4724 static int
4725 validate_dcc(struct amdgpu_device *adev,
4726 	     const enum surface_pixel_format format,
4727 	     const enum dc_rotation_angle rotation,
4728 	     const union dc_tiling_info *tiling_info,
4729 	     const struct dc_plane_dcc_param *dcc,
4730 	     const struct dc_plane_address *address,
4731 	     const struct plane_size *plane_size)
4732 {
4733 	struct dc *dc = adev->dm.dc;
4734 	struct dc_dcc_surface_param input;
4735 	struct dc_surface_dcc_cap output;
4736 
4737 	memset(&input, 0, sizeof(input));
4738 	memset(&output, 0, sizeof(output));
4739 
4740 	if (!dcc->enable)
4741 		return 0;
4742 
4743 	if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN ||
4744 	    !dc->cap_funcs.get_dcc_compression_cap)
4745 		return -EINVAL;
4746 
4747 	input.format = format;
4748 	input.surface_size.width = plane_size->surface_size.width;
4749 	input.surface_size.height = plane_size->surface_size.height;
4750 	input.swizzle_mode = tiling_info->gfx9.swizzle;
4751 
4752 	if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180)
4753 		input.scan = SCAN_DIRECTION_HORIZONTAL;
4754 	else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270)
4755 		input.scan = SCAN_DIRECTION_VERTICAL;
4756 
4757 	if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output))
4758 		return -EINVAL;
4759 
4760 	if (!output.capable)
4761 		return -EINVAL;
4762 
4763 	if (dcc->independent_64b_blks == 0 &&
4764 	    output.grph.rgb.independent_64b_blks != 0)
4765 		return -EINVAL;
4766 
4767 	return 0;
4768 }
4769 
4770 static bool
4771 modifier_has_dcc(uint64_t modifier)
4772 {
4773 	return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier);
4774 }
4775 
4776 static unsigned
4777 modifier_gfx9_swizzle_mode(uint64_t modifier)
4778 {
4779 	if (modifier == DRM_FORMAT_MOD_LINEAR)
4780 		return 0;
4781 
4782 	return AMD_FMT_MOD_GET(TILE, modifier);
4783 }
4784 
4785 static const struct drm_format_info *
4786 amd_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
4787 {
4788 	return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]);
4789 }
4790 
4791 static void
4792 fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev,
4793 				    union dc_tiling_info *tiling_info,
4794 				    uint64_t modifier)
4795 {
4796 	unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier);
4797 	unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier);
4798 	unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier);
4799 	unsigned int pipes_log2 = min(4u, mod_pipe_xor_bits);
4800 
4801 	fill_gfx9_tiling_info_from_device(adev, tiling_info);
4802 
4803 	if (!IS_AMD_FMT_MOD(modifier))
4804 		return;
4805 
4806 	tiling_info->gfx9.num_pipes = 1u << pipes_log2;
4807 	tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2);
4808 
4809 	if (adev->family >= AMDGPU_FAMILY_NV) {
4810 		tiling_info->gfx9.num_pkrs = 1u << pkrs_log2;
4811 	} else {
4812 		tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits;
4813 
4814 		/* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */
4815 	}
4816 }
4817 
4818 enum dm_micro_swizzle {
4819 	MICRO_SWIZZLE_Z = 0,
4820 	MICRO_SWIZZLE_S = 1,
4821 	MICRO_SWIZZLE_D = 2,
4822 	MICRO_SWIZZLE_R = 3
4823 };
4824 
4825 static bool dm_plane_format_mod_supported(struct drm_plane *plane,
4826 					  uint32_t format,
4827 					  uint64_t modifier)
4828 {
4829 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
4830 	const struct drm_format_info *info = drm_format_info(format);
4831 	int i;
4832 
4833 	enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3;
4834 
4835 	if (!info)
4836 		return false;
4837 
4838 	/*
4839 	 * We always have to allow these modifiers:
4840 	 * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers.
4841 	 * 2. Not passing any modifiers is the same as explicitly passing INVALID.
4842 	 */
4843 	if (modifier == DRM_FORMAT_MOD_LINEAR ||
4844 	    modifier == DRM_FORMAT_MOD_INVALID) {
4845 		return true;
4846 	}
4847 
4848 	/* Check that the modifier is on the list of the plane's supported modifiers. */
4849 	for (i = 0; i < plane->modifier_count; i++) {
4850 		if (modifier == plane->modifiers[i])
4851 			break;
4852 	}
4853 	if (i == plane->modifier_count)
4854 		return false;
4855 
4856 	/*
4857 	 * For D swizzle the canonical modifier depends on the bpp, so check
4858 	 * it here.
4859 	 */
4860 	if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 &&
4861 	    adev->family >= AMDGPU_FAMILY_NV) {
4862 		if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4)
4863 			return false;
4864 	}
4865 
4866 	if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D &&
4867 	    info->cpp[0] < 8)
4868 		return false;
4869 
4870 	if (modifier_has_dcc(modifier)) {
4871 		/* Per radeonsi comments 16/64 bpp are more complicated. */
4872 		if (info->cpp[0] != 4)
4873 			return false;
4874 		/* We support multi-planar formats, but not when combined with
4875 		 * additional DCC metadata planes. */
4876 		if (info->num_planes > 1)
4877 			return false;
4878 	}
4879 
4880 	return true;
4881 }
4882 
4883 static void
4884 add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod)
4885 {
4886 	if (!*mods)
4887 		return;
4888 
4889 	if (*cap - *size < 1) {
4890 		uint64_t new_cap = *cap * 2;
4891 		uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL);
4892 
4893 		if (!new_mods) {
4894 			kfree(*mods);
4895 			*mods = NULL;
4896 			return;
4897 		}
4898 
4899 		memcpy(new_mods, *mods, sizeof(uint64_t) * *size);
4900 		kfree(*mods);
4901 		*mods = new_mods;
4902 		*cap = new_cap;
4903 	}
4904 
4905 	(*mods)[*size] = mod;
4906 	*size += 1;
4907 }
4908 
4909 static void
4910 add_gfx9_modifiers(const struct amdgpu_device *adev,
4911 		   uint64_t **mods, uint64_t *size, uint64_t *capacity)
4912 {
4913 	int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
4914 	int pipe_xor_bits = min(8, pipes +
4915 				ilog2(adev->gfx.config.gb_addr_config_fields.num_se));
4916 	int bank_xor_bits = min(8 - pipe_xor_bits,
4917 				ilog2(adev->gfx.config.gb_addr_config_fields.num_banks));
4918 	int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) +
4919 		 ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se);
4920 
4921 
4922 	if (adev->family == AMDGPU_FAMILY_RV) {
4923 		/* Raven2 and later */
4924 		bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81;
4925 
4926 		/*
4927 		 * No _D DCC swizzles yet because we only allow 32bpp, which
4928 		 * doesn't support _D on DCN
4929 		 */
4930 
4931 		if (has_constant_encode) {
4932 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4933 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4934 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4935 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4936 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4937 				    AMD_FMT_MOD_SET(DCC, 1) |
4938 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4939 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4940 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1));
4941 		}
4942 
4943 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4944 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4945 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4946 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4947 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4948 			    AMD_FMT_MOD_SET(DCC, 1) |
4949 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4950 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4951 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0));
4952 
4953 		if (has_constant_encode) {
4954 			add_modifier(mods, size, capacity, AMD_FMT_MOD |
4955 				    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4956 				    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4957 				    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4958 				    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4959 				    AMD_FMT_MOD_SET(DCC, 1) |
4960 				    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4961 				    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4962 				    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4963 
4964 				    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
4965 				    AMD_FMT_MOD_SET(RB, rb) |
4966 				    AMD_FMT_MOD_SET(PIPE, pipes));
4967 		}
4968 
4969 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4970 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4971 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4972 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4973 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) |
4974 			    AMD_FMT_MOD_SET(DCC, 1) |
4975 			    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
4976 			    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
4977 			    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) |
4978 			    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) |
4979 			    AMD_FMT_MOD_SET(RB, rb) |
4980 			    AMD_FMT_MOD_SET(PIPE, pipes));
4981 	}
4982 
4983 	/*
4984 	 * Only supported for 64bpp on Raven, will be filtered on format in
4985 	 * dm_plane_format_mod_supported.
4986 	 */
4987 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
4988 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) |
4989 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4990 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4991 		    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4992 
4993 	if (adev->family == AMDGPU_FAMILY_RV) {
4994 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
4995 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
4996 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) |
4997 			    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
4998 			    AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits));
4999 	}
5000 
5001 	/*
5002 	 * Only supported for 64bpp on Raven, will be filtered on format in
5003 	 * dm_plane_format_mod_supported.
5004 	 */
5005 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5006 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5007 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5008 
5009 	if (adev->family == AMDGPU_FAMILY_RV) {
5010 		add_modifier(mods, size, capacity, AMD_FMT_MOD |
5011 			    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5012 			    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5013 	}
5014 }
5015 
5016 static void
5017 add_gfx10_1_modifiers(const struct amdgpu_device *adev,
5018 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5019 {
5020 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5021 
5022 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5023 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5024 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5025 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5026 		    AMD_FMT_MOD_SET(DCC, 1) |
5027 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5028 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5029 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5030 
5031 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5032 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5033 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5034 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5035 		    AMD_FMT_MOD_SET(DCC, 1) |
5036 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5037 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5038 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5039 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5040 
5041 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5042 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5043 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5044 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5045 
5046 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5047 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5048 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) |
5049 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits));
5050 
5051 
5052 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5053 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5054 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5055 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5056 
5057 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5058 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5059 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5060 }
5061 
5062 static void
5063 add_gfx10_3_modifiers(const struct amdgpu_device *adev,
5064 		      uint64_t **mods, uint64_t *size, uint64_t *capacity)
5065 {
5066 	int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes);
5067 	int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs);
5068 
5069 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5070 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5071 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5072 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5073 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5074 		    AMD_FMT_MOD_SET(DCC, 1) |
5075 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5076 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5077 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5078 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5079 
5080 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5081 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5082 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5083 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5084 		    AMD_FMT_MOD_SET(PACKERS, pkrs) |
5085 		    AMD_FMT_MOD_SET(DCC, 1) |
5086 		    AMD_FMT_MOD_SET(DCC_RETILE, 1) |
5087 		    AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) |
5088 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) |
5089 		    AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) |
5090 		    AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B));
5091 
5092 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5093 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) |
5094 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5095 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5096 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5097 
5098 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5099 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) |
5100 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) |
5101 		    AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) |
5102 		    AMD_FMT_MOD_SET(PACKERS, pkrs));
5103 
5104 	/* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */
5105 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5106 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) |
5107 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5108 
5109 	add_modifier(mods, size, capacity, AMD_FMT_MOD |
5110 		    AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) |
5111 		    AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9));
5112 }
5113 
5114 static int
5115 get_plane_modifiers(const struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods)
5116 {
5117 	uint64_t size = 0, capacity = 128;
5118 	*mods = NULL;
5119 
5120 	/* We have not hooked up any pre-GFX9 modifiers. */
5121 	if (adev->family < AMDGPU_FAMILY_AI)
5122 		return 0;
5123 
5124 	*mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL);
5125 
5126 	if (plane_type == DRM_PLANE_TYPE_CURSOR) {
5127 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5128 		add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5129 		return *mods ? 0 : -ENOMEM;
5130 	}
5131 
5132 	switch (adev->family) {
5133 	case AMDGPU_FAMILY_AI:
5134 	case AMDGPU_FAMILY_RV:
5135 		add_gfx9_modifiers(adev, mods, &size, &capacity);
5136 		break;
5137 	case AMDGPU_FAMILY_NV:
5138 	case AMDGPU_FAMILY_VGH:
5139 	case AMDGPU_FAMILY_YC:
5140 		if (adev->asic_type >= CHIP_SIENNA_CICHLID)
5141 			add_gfx10_3_modifiers(adev, mods, &size, &capacity);
5142 		else
5143 			add_gfx10_1_modifiers(adev, mods, &size, &capacity);
5144 		break;
5145 	}
5146 
5147 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR);
5148 
5149 	/* INVALID marks the end of the list. */
5150 	add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID);
5151 
5152 	if (!*mods)
5153 		return -ENOMEM;
5154 
5155 	return 0;
5156 }
5157 
5158 static int
5159 fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev,
5160 					  const struct amdgpu_framebuffer *afb,
5161 					  const enum surface_pixel_format format,
5162 					  const enum dc_rotation_angle rotation,
5163 					  const struct plane_size *plane_size,
5164 					  union dc_tiling_info *tiling_info,
5165 					  struct dc_plane_dcc_param *dcc,
5166 					  struct dc_plane_address *address,
5167 					  const bool force_disable_dcc)
5168 {
5169 	const uint64_t modifier = afb->base.modifier;
5170 	int ret = 0;
5171 
5172 	fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier);
5173 	tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier);
5174 
5175 	if (modifier_has_dcc(modifier) && !force_disable_dcc) {
5176 		uint64_t dcc_address = afb->address + afb->base.offsets[1];
5177 
5178 		dcc->enable = 1;
5179 		dcc->meta_pitch = afb->base.pitches[1];
5180 		dcc->independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier);
5181 
5182 		address->grph.meta_addr.low_part = lower_32_bits(dcc_address);
5183 		address->grph.meta_addr.high_part = upper_32_bits(dcc_address);
5184 	}
5185 
5186 	ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size);
5187 	if (ret)
5188 		drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret);
5189 
5190 	return ret;
5191 }
5192 
5193 static int
5194 fill_plane_buffer_attributes(struct amdgpu_device *adev,
5195 			     const struct amdgpu_framebuffer *afb,
5196 			     const enum surface_pixel_format format,
5197 			     const enum dc_rotation_angle rotation,
5198 			     const uint64_t tiling_flags,
5199 			     union dc_tiling_info *tiling_info,
5200 			     struct plane_size *plane_size,
5201 			     struct dc_plane_dcc_param *dcc,
5202 			     struct dc_plane_address *address,
5203 			     bool tmz_surface,
5204 			     bool force_disable_dcc)
5205 {
5206 	const struct drm_framebuffer *fb = &afb->base;
5207 	int ret;
5208 
5209 	memset(tiling_info, 0, sizeof(*tiling_info));
5210 	memset(plane_size, 0, sizeof(*plane_size));
5211 	memset(dcc, 0, sizeof(*dcc));
5212 	memset(address, 0, sizeof(*address));
5213 
5214 	address->tmz_surface = tmz_surface;
5215 
5216 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) {
5217 		uint64_t addr = afb->address + fb->offsets[0];
5218 
5219 		plane_size->surface_size.x = 0;
5220 		plane_size->surface_size.y = 0;
5221 		plane_size->surface_size.width = fb->width;
5222 		plane_size->surface_size.height = fb->height;
5223 		plane_size->surface_pitch =
5224 			fb->pitches[0] / fb->format->cpp[0];
5225 
5226 		address->type = PLN_ADDR_TYPE_GRAPHICS;
5227 		address->grph.addr.low_part = lower_32_bits(addr);
5228 		address->grph.addr.high_part = upper_32_bits(addr);
5229 	} else if (format < SURFACE_PIXEL_FORMAT_INVALID) {
5230 		uint64_t luma_addr = afb->address + fb->offsets[0];
5231 		uint64_t chroma_addr = afb->address + fb->offsets[1];
5232 
5233 		plane_size->surface_size.x = 0;
5234 		plane_size->surface_size.y = 0;
5235 		plane_size->surface_size.width = fb->width;
5236 		plane_size->surface_size.height = fb->height;
5237 		plane_size->surface_pitch =
5238 			fb->pitches[0] / fb->format->cpp[0];
5239 
5240 		plane_size->chroma_size.x = 0;
5241 		plane_size->chroma_size.y = 0;
5242 		/* TODO: set these based on surface format */
5243 		plane_size->chroma_size.width = fb->width / 2;
5244 		plane_size->chroma_size.height = fb->height / 2;
5245 
5246 		plane_size->chroma_pitch =
5247 			fb->pitches[1] / fb->format->cpp[1];
5248 
5249 		address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE;
5250 		address->video_progressive.luma_addr.low_part =
5251 			lower_32_bits(luma_addr);
5252 		address->video_progressive.luma_addr.high_part =
5253 			upper_32_bits(luma_addr);
5254 		address->video_progressive.chroma_addr.low_part =
5255 			lower_32_bits(chroma_addr);
5256 		address->video_progressive.chroma_addr.high_part =
5257 			upper_32_bits(chroma_addr);
5258 	}
5259 
5260 	if (adev->family >= AMDGPU_FAMILY_AI) {
5261 		ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format,
5262 								rotation, plane_size,
5263 								tiling_info, dcc,
5264 								address,
5265 								force_disable_dcc);
5266 		if (ret)
5267 			return ret;
5268 	} else {
5269 		fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags);
5270 	}
5271 
5272 	return 0;
5273 }
5274 
5275 static void
5276 fill_blending_from_plane_state(const struct drm_plane_state *plane_state,
5277 			       bool *per_pixel_alpha, bool *global_alpha,
5278 			       int *global_alpha_value)
5279 {
5280 	*per_pixel_alpha = false;
5281 	*global_alpha = false;
5282 	*global_alpha_value = 0xff;
5283 
5284 	if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY)
5285 		return;
5286 
5287 	if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) {
5288 		static const uint32_t alpha_formats[] = {
5289 			DRM_FORMAT_ARGB8888,
5290 			DRM_FORMAT_RGBA8888,
5291 			DRM_FORMAT_ABGR8888,
5292 		};
5293 		uint32_t format = plane_state->fb->format->format;
5294 		unsigned int i;
5295 
5296 		for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) {
5297 			if (format == alpha_formats[i]) {
5298 				*per_pixel_alpha = true;
5299 				break;
5300 			}
5301 		}
5302 	}
5303 
5304 	if (plane_state->alpha < 0xffff) {
5305 		*global_alpha = true;
5306 		*global_alpha_value = plane_state->alpha >> 8;
5307 	}
5308 }
5309 
5310 static int
5311 fill_plane_color_attributes(const struct drm_plane_state *plane_state,
5312 			    const enum surface_pixel_format format,
5313 			    enum dc_color_space *color_space)
5314 {
5315 	bool full_range;
5316 
5317 	*color_space = COLOR_SPACE_SRGB;
5318 
5319 	/* DRM color properties only affect non-RGB formats. */
5320 	if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN)
5321 		return 0;
5322 
5323 	full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE);
5324 
5325 	switch (plane_state->color_encoding) {
5326 	case DRM_COLOR_YCBCR_BT601:
5327 		if (full_range)
5328 			*color_space = COLOR_SPACE_YCBCR601;
5329 		else
5330 			*color_space = COLOR_SPACE_YCBCR601_LIMITED;
5331 		break;
5332 
5333 	case DRM_COLOR_YCBCR_BT709:
5334 		if (full_range)
5335 			*color_space = COLOR_SPACE_YCBCR709;
5336 		else
5337 			*color_space = COLOR_SPACE_YCBCR709_LIMITED;
5338 		break;
5339 
5340 	case DRM_COLOR_YCBCR_BT2020:
5341 		if (full_range)
5342 			*color_space = COLOR_SPACE_2020_YCBCR;
5343 		else
5344 			return -EINVAL;
5345 		break;
5346 
5347 	default:
5348 		return -EINVAL;
5349 	}
5350 
5351 	return 0;
5352 }
5353 
5354 static int
5355 fill_dc_plane_info_and_addr(struct amdgpu_device *adev,
5356 			    const struct drm_plane_state *plane_state,
5357 			    const uint64_t tiling_flags,
5358 			    struct dc_plane_info *plane_info,
5359 			    struct dc_plane_address *address,
5360 			    bool tmz_surface,
5361 			    bool force_disable_dcc)
5362 {
5363 	const struct drm_framebuffer *fb = plane_state->fb;
5364 	const struct amdgpu_framebuffer *afb =
5365 		to_amdgpu_framebuffer(plane_state->fb);
5366 	int ret;
5367 
5368 	memset(plane_info, 0, sizeof(*plane_info));
5369 
5370 	switch (fb->format->format) {
5371 	case DRM_FORMAT_C8:
5372 		plane_info->format =
5373 			SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS;
5374 		break;
5375 	case DRM_FORMAT_RGB565:
5376 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565;
5377 		break;
5378 	case DRM_FORMAT_XRGB8888:
5379 	case DRM_FORMAT_ARGB8888:
5380 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888;
5381 		break;
5382 	case DRM_FORMAT_XRGB2101010:
5383 	case DRM_FORMAT_ARGB2101010:
5384 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010;
5385 		break;
5386 	case DRM_FORMAT_XBGR2101010:
5387 	case DRM_FORMAT_ABGR2101010:
5388 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010;
5389 		break;
5390 	case DRM_FORMAT_XBGR8888:
5391 	case DRM_FORMAT_ABGR8888:
5392 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888;
5393 		break;
5394 	case DRM_FORMAT_NV21:
5395 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr;
5396 		break;
5397 	case DRM_FORMAT_NV12:
5398 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb;
5399 		break;
5400 	case DRM_FORMAT_P010:
5401 		plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb;
5402 		break;
5403 	case DRM_FORMAT_XRGB16161616F:
5404 	case DRM_FORMAT_ARGB16161616F:
5405 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F;
5406 		break;
5407 	case DRM_FORMAT_XBGR16161616F:
5408 	case DRM_FORMAT_ABGR16161616F:
5409 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F;
5410 		break;
5411 	case DRM_FORMAT_XRGB16161616:
5412 	case DRM_FORMAT_ARGB16161616:
5413 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616;
5414 		break;
5415 	case DRM_FORMAT_XBGR16161616:
5416 	case DRM_FORMAT_ABGR16161616:
5417 		plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616;
5418 		break;
5419 	default:
5420 		DRM_ERROR(
5421 			"Unsupported screen format %p4cc\n",
5422 			&fb->format->format);
5423 		return -EINVAL;
5424 	}
5425 
5426 	switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) {
5427 	case DRM_MODE_ROTATE_0:
5428 		plane_info->rotation = ROTATION_ANGLE_0;
5429 		break;
5430 	case DRM_MODE_ROTATE_90:
5431 		plane_info->rotation = ROTATION_ANGLE_90;
5432 		break;
5433 	case DRM_MODE_ROTATE_180:
5434 		plane_info->rotation = ROTATION_ANGLE_180;
5435 		break;
5436 	case DRM_MODE_ROTATE_270:
5437 		plane_info->rotation = ROTATION_ANGLE_270;
5438 		break;
5439 	default:
5440 		plane_info->rotation = ROTATION_ANGLE_0;
5441 		break;
5442 	}
5443 
5444 	plane_info->visible = true;
5445 	plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE;
5446 
5447 	plane_info->layer_index = 0;
5448 
5449 	ret = fill_plane_color_attributes(plane_state, plane_info->format,
5450 					  &plane_info->color_space);
5451 	if (ret)
5452 		return ret;
5453 
5454 	ret = fill_plane_buffer_attributes(adev, afb, plane_info->format,
5455 					   plane_info->rotation, tiling_flags,
5456 					   &plane_info->tiling_info,
5457 					   &plane_info->plane_size,
5458 					   &plane_info->dcc, address, tmz_surface,
5459 					   force_disable_dcc);
5460 	if (ret)
5461 		return ret;
5462 
5463 	fill_blending_from_plane_state(
5464 		plane_state, &plane_info->per_pixel_alpha,
5465 		&plane_info->global_alpha, &plane_info->global_alpha_value);
5466 
5467 	return 0;
5468 }
5469 
5470 static int fill_dc_plane_attributes(struct amdgpu_device *adev,
5471 				    struct dc_plane_state *dc_plane_state,
5472 				    struct drm_plane_state *plane_state,
5473 				    struct drm_crtc_state *crtc_state)
5474 {
5475 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
5476 	struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb;
5477 	struct dc_scaling_info scaling_info;
5478 	struct dc_plane_info plane_info;
5479 	int ret;
5480 	bool force_disable_dcc = false;
5481 
5482 	ret = fill_dc_scaling_info(plane_state, &scaling_info);
5483 	if (ret)
5484 		return ret;
5485 
5486 	dc_plane_state->src_rect = scaling_info.src_rect;
5487 	dc_plane_state->dst_rect = scaling_info.dst_rect;
5488 	dc_plane_state->clip_rect = scaling_info.clip_rect;
5489 	dc_plane_state->scaling_quality = scaling_info.scaling_quality;
5490 
5491 	force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend;
5492 	ret = fill_dc_plane_info_and_addr(adev, plane_state,
5493 					  afb->tiling_flags,
5494 					  &plane_info,
5495 					  &dc_plane_state->address,
5496 					  afb->tmz_surface,
5497 					  force_disable_dcc);
5498 	if (ret)
5499 		return ret;
5500 
5501 	dc_plane_state->format = plane_info.format;
5502 	dc_plane_state->color_space = plane_info.color_space;
5503 	dc_plane_state->format = plane_info.format;
5504 	dc_plane_state->plane_size = plane_info.plane_size;
5505 	dc_plane_state->rotation = plane_info.rotation;
5506 	dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror;
5507 	dc_plane_state->stereo_format = plane_info.stereo_format;
5508 	dc_plane_state->tiling_info = plane_info.tiling_info;
5509 	dc_plane_state->visible = plane_info.visible;
5510 	dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha;
5511 	dc_plane_state->global_alpha = plane_info.global_alpha;
5512 	dc_plane_state->global_alpha_value = plane_info.global_alpha_value;
5513 	dc_plane_state->dcc = plane_info.dcc;
5514 	dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0
5515 	dc_plane_state->flip_int_enabled = true;
5516 
5517 	/*
5518 	 * Always set input transfer function, since plane state is refreshed
5519 	 * every time.
5520 	 */
5521 	ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state);
5522 	if (ret)
5523 		return ret;
5524 
5525 	return 0;
5526 }
5527 
5528 static void update_stream_scaling_settings(const struct drm_display_mode *mode,
5529 					   const struct dm_connector_state *dm_state,
5530 					   struct dc_stream_state *stream)
5531 {
5532 	enum amdgpu_rmx_type rmx_type;
5533 
5534 	struct rect src = { 0 }; /* viewport in composition space*/
5535 	struct rect dst = { 0 }; /* stream addressable area */
5536 
5537 	/* no mode. nothing to be done */
5538 	if (!mode)
5539 		return;
5540 
5541 	/* Full screen scaling by default */
5542 	src.width = mode->hdisplay;
5543 	src.height = mode->vdisplay;
5544 	dst.width = stream->timing.h_addressable;
5545 	dst.height = stream->timing.v_addressable;
5546 
5547 	if (dm_state) {
5548 		rmx_type = dm_state->scaling;
5549 		if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) {
5550 			if (src.width * dst.height <
5551 					src.height * dst.width) {
5552 				/* height needs less upscaling/more downscaling */
5553 				dst.width = src.width *
5554 						dst.height / src.height;
5555 			} else {
5556 				/* width needs less upscaling/more downscaling */
5557 				dst.height = src.height *
5558 						dst.width / src.width;
5559 			}
5560 		} else if (rmx_type == RMX_CENTER) {
5561 			dst = src;
5562 		}
5563 
5564 		dst.x = (stream->timing.h_addressable - dst.width) / 2;
5565 		dst.y = (stream->timing.v_addressable - dst.height) / 2;
5566 
5567 		if (dm_state->underscan_enable) {
5568 			dst.x += dm_state->underscan_hborder / 2;
5569 			dst.y += dm_state->underscan_vborder / 2;
5570 			dst.width -= dm_state->underscan_hborder;
5571 			dst.height -= dm_state->underscan_vborder;
5572 		}
5573 	}
5574 
5575 	stream->src = src;
5576 	stream->dst = dst;
5577 
5578 	DRM_DEBUG_KMS("Destination Rectangle x:%d  y:%d  width:%d  height:%d\n",
5579 		      dst.x, dst.y, dst.width, dst.height);
5580 
5581 }
5582 
5583 static enum dc_color_depth
5584 convert_color_depth_from_display_info(const struct drm_connector *connector,
5585 				      bool is_y420, int requested_bpc)
5586 {
5587 	uint8_t bpc;
5588 
5589 	if (is_y420) {
5590 		bpc = 8;
5591 
5592 		/* Cap display bpc based on HDMI 2.0 HF-VSDB */
5593 		if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48)
5594 			bpc = 16;
5595 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36)
5596 			bpc = 12;
5597 		else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30)
5598 			bpc = 10;
5599 	} else {
5600 		bpc = (uint8_t)connector->display_info.bpc;
5601 		/* Assume 8 bpc by default if no bpc is specified. */
5602 		bpc = bpc ? bpc : 8;
5603 	}
5604 
5605 	if (requested_bpc > 0) {
5606 		/*
5607 		 * Cap display bpc based on the user requested value.
5608 		 *
5609 		 * The value for state->max_bpc may not correctly updated
5610 		 * depending on when the connector gets added to the state
5611 		 * or if this was called outside of atomic check, so it
5612 		 * can't be used directly.
5613 		 */
5614 		bpc = min_t(u8, bpc, requested_bpc);
5615 
5616 		/* Round down to the nearest even number. */
5617 		bpc = bpc - (bpc & 1);
5618 	}
5619 
5620 	switch (bpc) {
5621 	case 0:
5622 		/*
5623 		 * Temporary Work around, DRM doesn't parse color depth for
5624 		 * EDID revision before 1.4
5625 		 * TODO: Fix edid parsing
5626 		 */
5627 		return COLOR_DEPTH_888;
5628 	case 6:
5629 		return COLOR_DEPTH_666;
5630 	case 8:
5631 		return COLOR_DEPTH_888;
5632 	case 10:
5633 		return COLOR_DEPTH_101010;
5634 	case 12:
5635 		return COLOR_DEPTH_121212;
5636 	case 14:
5637 		return COLOR_DEPTH_141414;
5638 	case 16:
5639 		return COLOR_DEPTH_161616;
5640 	default:
5641 		return COLOR_DEPTH_UNDEFINED;
5642 	}
5643 }
5644 
5645 static enum dc_aspect_ratio
5646 get_aspect_ratio(const struct drm_display_mode *mode_in)
5647 {
5648 	/* 1-1 mapping, since both enums follow the HDMI spec. */
5649 	return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
5650 }
5651 
5652 static enum dc_color_space
5653 get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
5654 {
5655 	enum dc_color_space color_space = COLOR_SPACE_SRGB;
5656 
5657 	switch (dc_crtc_timing->pixel_encoding)	{
5658 	case PIXEL_ENCODING_YCBCR422:
5659 	case PIXEL_ENCODING_YCBCR444:
5660 	case PIXEL_ENCODING_YCBCR420:
5661 	{
5662 		/*
5663 		 * 27030khz is the separation point between HDTV and SDTV
5664 		 * according to HDMI spec, we use YCbCr709 and YCbCr601
5665 		 * respectively
5666 		 */
5667 		if (dc_crtc_timing->pix_clk_100hz > 270300) {
5668 			if (dc_crtc_timing->flags.Y_ONLY)
5669 				color_space =
5670 					COLOR_SPACE_YCBCR709_LIMITED;
5671 			else
5672 				color_space = COLOR_SPACE_YCBCR709;
5673 		} else {
5674 			if (dc_crtc_timing->flags.Y_ONLY)
5675 				color_space =
5676 					COLOR_SPACE_YCBCR601_LIMITED;
5677 			else
5678 				color_space = COLOR_SPACE_YCBCR601;
5679 		}
5680 
5681 	}
5682 	break;
5683 	case PIXEL_ENCODING_RGB:
5684 		color_space = COLOR_SPACE_SRGB;
5685 		break;
5686 
5687 	default:
5688 		WARN_ON(1);
5689 		break;
5690 	}
5691 
5692 	return color_space;
5693 }
5694 
5695 static bool adjust_colour_depth_from_display_info(
5696 	struct dc_crtc_timing *timing_out,
5697 	const struct drm_display_info *info)
5698 {
5699 	enum dc_color_depth depth = timing_out->display_color_depth;
5700 	int normalized_clk;
5701 	do {
5702 		normalized_clk = timing_out->pix_clk_100hz / 10;
5703 		/* YCbCr 4:2:0 requires additional adjustment of 1/2 */
5704 		if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
5705 			normalized_clk /= 2;
5706 		/* Adjusting pix clock following on HDMI spec based on colour depth */
5707 		switch (depth) {
5708 		case COLOR_DEPTH_888:
5709 			break;
5710 		case COLOR_DEPTH_101010:
5711 			normalized_clk = (normalized_clk * 30) / 24;
5712 			break;
5713 		case COLOR_DEPTH_121212:
5714 			normalized_clk = (normalized_clk * 36) / 24;
5715 			break;
5716 		case COLOR_DEPTH_161616:
5717 			normalized_clk = (normalized_clk * 48) / 24;
5718 			break;
5719 		default:
5720 			/* The above depths are the only ones valid for HDMI. */
5721 			return false;
5722 		}
5723 		if (normalized_clk <= info->max_tmds_clock) {
5724 			timing_out->display_color_depth = depth;
5725 			return true;
5726 		}
5727 	} while (--depth > COLOR_DEPTH_666);
5728 	return false;
5729 }
5730 
5731 static void fill_stream_properties_from_drm_display_mode(
5732 	struct dc_stream_state *stream,
5733 	const struct drm_display_mode *mode_in,
5734 	const struct drm_connector *connector,
5735 	const struct drm_connector_state *connector_state,
5736 	const struct dc_stream_state *old_stream,
5737 	int requested_bpc)
5738 {
5739 	struct dc_crtc_timing *timing_out = &stream->timing;
5740 	const struct drm_display_info *info = &connector->display_info;
5741 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
5742 	struct hdmi_vendor_infoframe hv_frame;
5743 	struct hdmi_avi_infoframe avi_frame;
5744 
5745 	memset(&hv_frame, 0, sizeof(hv_frame));
5746 	memset(&avi_frame, 0, sizeof(avi_frame));
5747 
5748 	timing_out->h_border_left = 0;
5749 	timing_out->h_border_right = 0;
5750 	timing_out->v_border_top = 0;
5751 	timing_out->v_border_bottom = 0;
5752 	/* TODO: un-hardcode */
5753 	if (drm_mode_is_420_only(info, mode_in)
5754 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5755 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5756 	else if (drm_mode_is_420_also(info, mode_in)
5757 			&& aconnector->force_yuv420_output)
5758 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5759 	else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
5760 			&& stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
5761 		timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
5762 	else
5763 		timing_out->pixel_encoding = PIXEL_ENCODING_RGB;
5764 
5765 	timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE;
5766 	timing_out->display_color_depth = convert_color_depth_from_display_info(
5767 		connector,
5768 		(timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420),
5769 		requested_bpc);
5770 	timing_out->scan_type = SCANNING_TYPE_NODATA;
5771 	timing_out->hdmi_vic = 0;
5772 
5773 	if(old_stream) {
5774 		timing_out->vic = old_stream->timing.vic;
5775 		timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY;
5776 		timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY;
5777 	} else {
5778 		timing_out->vic = drm_match_cea_mode(mode_in);
5779 		if (mode_in->flags & DRM_MODE_FLAG_PHSYNC)
5780 			timing_out->flags.HSYNC_POSITIVE_POLARITY = 1;
5781 		if (mode_in->flags & DRM_MODE_FLAG_PVSYNC)
5782 			timing_out->flags.VSYNC_POSITIVE_POLARITY = 1;
5783 	}
5784 
5785 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5786 		drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in);
5787 		timing_out->vic = avi_frame.video_code;
5788 		drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in);
5789 		timing_out->hdmi_vic = hv_frame.vic;
5790 	}
5791 
5792 	if (is_freesync_video_mode(mode_in, aconnector)) {
5793 		timing_out->h_addressable = mode_in->hdisplay;
5794 		timing_out->h_total = mode_in->htotal;
5795 		timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start;
5796 		timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay;
5797 		timing_out->v_total = mode_in->vtotal;
5798 		timing_out->v_addressable = mode_in->vdisplay;
5799 		timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay;
5800 		timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start;
5801 		timing_out->pix_clk_100hz = mode_in->clock * 10;
5802 	} else {
5803 		timing_out->h_addressable = mode_in->crtc_hdisplay;
5804 		timing_out->h_total = mode_in->crtc_htotal;
5805 		timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start;
5806 		timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay;
5807 		timing_out->v_total = mode_in->crtc_vtotal;
5808 		timing_out->v_addressable = mode_in->crtc_vdisplay;
5809 		timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay;
5810 		timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start;
5811 		timing_out->pix_clk_100hz = mode_in->crtc_clock * 10;
5812 	}
5813 
5814 	timing_out->aspect_ratio = get_aspect_ratio(mode_in);
5815 
5816 	stream->output_color_space = get_output_color_space(timing_out);
5817 
5818 	stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
5819 	stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
5820 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
5821 		if (!adjust_colour_depth_from_display_info(timing_out, info) &&
5822 		    drm_mode_is_420_also(info, mode_in) &&
5823 		    timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) {
5824 			timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
5825 			adjust_colour_depth_from_display_info(timing_out, info);
5826 		}
5827 	}
5828 }
5829 
5830 static void fill_audio_info(struct audio_info *audio_info,
5831 			    const struct drm_connector *drm_connector,
5832 			    const struct dc_sink *dc_sink)
5833 {
5834 	int i = 0;
5835 	int cea_revision = 0;
5836 	const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps;
5837 
5838 	audio_info->manufacture_id = edid_caps->manufacturer_id;
5839 	audio_info->product_id = edid_caps->product_id;
5840 
5841 	cea_revision = drm_connector->display_info.cea_rev;
5842 
5843 #ifdef __linux__
5844 	strscpy(audio_info->display_name,
5845 		edid_caps->display_name,
5846 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS);
5847 #else
5848 	strncpy(audio_info->display_name,
5849 		edid_caps->display_name,
5850 		AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS - 1);
5851 #endif
5852 
5853 	if (cea_revision >= 3) {
5854 		audio_info->mode_count = edid_caps->audio_mode_count;
5855 
5856 		for (i = 0; i < audio_info->mode_count; ++i) {
5857 			audio_info->modes[i].format_code =
5858 					(enum audio_format_code)
5859 					(edid_caps->audio_modes[i].format_code);
5860 			audio_info->modes[i].channel_count =
5861 					edid_caps->audio_modes[i].channel_count;
5862 			audio_info->modes[i].sample_rates.all =
5863 					edid_caps->audio_modes[i].sample_rate;
5864 			audio_info->modes[i].sample_size =
5865 					edid_caps->audio_modes[i].sample_size;
5866 		}
5867 	}
5868 
5869 	audio_info->flags.all = edid_caps->speaker_flags;
5870 
5871 	/* TODO: We only check for the progressive mode, check for interlace mode too */
5872 	if (drm_connector->latency_present[0]) {
5873 		audio_info->video_latency = drm_connector->video_latency[0];
5874 		audio_info->audio_latency = drm_connector->audio_latency[0];
5875 	}
5876 
5877 	/* TODO: For DP, video and audio latency should be calculated from DPCD caps */
5878 
5879 }
5880 
5881 static void
5882 copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode,
5883 				      struct drm_display_mode *dst_mode)
5884 {
5885 	dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay;
5886 	dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay;
5887 	dst_mode->crtc_clock = src_mode->crtc_clock;
5888 	dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start;
5889 	dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end;
5890 	dst_mode->crtc_hsync_start =  src_mode->crtc_hsync_start;
5891 	dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end;
5892 	dst_mode->crtc_htotal = src_mode->crtc_htotal;
5893 	dst_mode->crtc_hskew = src_mode->crtc_hskew;
5894 	dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start;
5895 	dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end;
5896 	dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start;
5897 	dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end;
5898 	dst_mode->crtc_vtotal = src_mode->crtc_vtotal;
5899 }
5900 
5901 static void
5902 decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode,
5903 					const struct drm_display_mode *native_mode,
5904 					bool scale_enabled)
5905 {
5906 	if (scale_enabled) {
5907 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5908 	} else if (native_mode->clock == drm_mode->clock &&
5909 			native_mode->htotal == drm_mode->htotal &&
5910 			native_mode->vtotal == drm_mode->vtotal) {
5911 		copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode);
5912 	} else {
5913 		/* no scaling nor amdgpu inserted, no need to patch */
5914 	}
5915 }
5916 
5917 static struct dc_sink *
5918 create_fake_sink(struct amdgpu_dm_connector *aconnector)
5919 {
5920 	struct dc_sink_init_data sink_init_data = { 0 };
5921 	struct dc_sink *sink = NULL;
5922 	sink_init_data.link = aconnector->dc_link;
5923 	sink_init_data.sink_signal = aconnector->dc_link->connector_signal;
5924 
5925 	sink = dc_sink_create(&sink_init_data);
5926 	if (!sink) {
5927 		DRM_ERROR("Failed to create sink!\n");
5928 		return NULL;
5929 	}
5930 	sink->sink_signal = SIGNAL_TYPE_VIRTUAL;
5931 
5932 	return sink;
5933 }
5934 
5935 static void set_multisync_trigger_params(
5936 		struct dc_stream_state *stream)
5937 {
5938 	struct dc_stream_state *master = NULL;
5939 
5940 	if (stream->triggered_crtc_reset.enabled) {
5941 		master = stream->triggered_crtc_reset.event_source;
5942 		stream->triggered_crtc_reset.event =
5943 			master->timing.flags.VSYNC_POSITIVE_POLARITY ?
5944 			CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING;
5945 		stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL;
5946 	}
5947 }
5948 
5949 static void set_master_stream(struct dc_stream_state *stream_set[],
5950 			      int stream_count)
5951 {
5952 	int j, highest_rfr = 0, master_stream = 0;
5953 
5954 	for (j = 0;  j < stream_count; j++) {
5955 		if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) {
5956 			int refresh_rate = 0;
5957 
5958 			refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/
5959 				(stream_set[j]->timing.h_total*stream_set[j]->timing.v_total);
5960 			if (refresh_rate > highest_rfr) {
5961 				highest_rfr = refresh_rate;
5962 				master_stream = j;
5963 			}
5964 		}
5965 	}
5966 	for (j = 0;  j < stream_count; j++) {
5967 		if (stream_set[j])
5968 			stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream];
5969 	}
5970 }
5971 
5972 static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context)
5973 {
5974 	int i = 0;
5975 	struct dc_stream_state *stream;
5976 
5977 	if (context->stream_count < 2)
5978 		return;
5979 	for (i = 0; i < context->stream_count ; i++) {
5980 		if (!context->streams[i])
5981 			continue;
5982 		/*
5983 		 * TODO: add a function to read AMD VSDB bits and set
5984 		 * crtc_sync_master.multi_sync_enabled flag
5985 		 * For now it's set to false
5986 		 */
5987 	}
5988 
5989 	set_master_stream(context->streams, context->stream_count);
5990 
5991 	for (i = 0; i < context->stream_count ; i++) {
5992 		stream = context->streams[i];
5993 
5994 		if (!stream)
5995 			continue;
5996 
5997 		set_multisync_trigger_params(stream);
5998 	}
5999 }
6000 
6001 #if defined(CONFIG_DRM_AMD_DC_DCN)
6002 static void update_dsc_caps(struct amdgpu_dm_connector *aconnector,
6003 							struct dc_sink *sink, struct dc_stream_state *stream,
6004 							struct dsc_dec_dpcd_caps *dsc_caps)
6005 {
6006 	stream->timing.flags.DSC = 0;
6007 	dsc_caps->is_dsc_supported = false;
6008 
6009 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6010 		dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
6011 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw,
6012 				      aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw,
6013 				      dsc_caps);
6014 	}
6015 }
6016 
6017 static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
6018 										struct dc_sink *sink, struct dc_stream_state *stream,
6019 										struct dsc_dec_dpcd_caps *dsc_caps)
6020 {
6021 	struct drm_connector *drm_connector = &aconnector->base;
6022 	uint32_t link_bandwidth_kbps;
6023 
6024 	link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
6025 							dc_link_get_link_cap(aconnector->dc_link));
6026 	/* Set DSC policy according to dsc_clock_en */
6027 	dc_dsc_policy_set_enable_dsc_when_not_needed(
6028 		aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE);
6029 
6030 	if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6031 
6032 		if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0],
6033 						dsc_caps,
6034 						aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override,
6035 						0,
6036 						link_bandwidth_kbps,
6037 						&stream->timing,
6038 						&stream->timing.dsc_cfg)) {
6039 			stream->timing.flags.DSC = 1;
6040 			DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
6041 		}
6042 	}
6043 
6044 	/* Overwrite the stream flag if DSC is enabled through debugfs */
6045 	if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE)
6046 		stream->timing.flags.DSC = 1;
6047 
6048 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h)
6049 		stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h;
6050 
6051 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v)
6052 		stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v;
6053 
6054 	if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel)
6055 		stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel;
6056 }
6057 #endif
6058 
6059 /**
6060  * DOC: FreeSync Video
6061  *
6062  * When a userspace application wants to play a video, the content follows a
6063  * standard format definition that usually specifies the FPS for that format.
6064  * The below list illustrates some video format and the expected FPS,
6065  * respectively:
6066  *
6067  * - TV/NTSC (23.976 FPS)
6068  * - Cinema (24 FPS)
6069  * - TV/PAL (25 FPS)
6070  * - TV/NTSC (29.97 FPS)
6071  * - TV/NTSC (30 FPS)
6072  * - Cinema HFR (48 FPS)
6073  * - TV/PAL (50 FPS)
6074  * - Commonly used (60 FPS)
6075  * - Multiples of 24 (48,72,96 FPS)
6076  *
6077  * The list of standards video format is not huge and can be added to the
6078  * connector modeset list beforehand. With that, userspace can leverage
6079  * FreeSync to extends the front porch in order to attain the target refresh
6080  * rate. Such a switch will happen seamlessly, without screen blanking or
6081  * reprogramming of the output in any other way. If the userspace requests a
6082  * modesetting change compatible with FreeSync modes that only differ in the
6083  * refresh rate, DC will skip the full update and avoid blink during the
6084  * transition. For example, the video player can change the modesetting from
6085  * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without
6086  * causing any display blink. This same concept can be applied to a mode
6087  * setting change.
6088  */
6089 static struct drm_display_mode *
6090 get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector,
6091 			  bool use_probed_modes)
6092 {
6093 	struct drm_display_mode *m, *m_pref = NULL;
6094 	u16 current_refresh, highest_refresh;
6095 	struct list_head *list_head = use_probed_modes ?
6096 						    &aconnector->base.probed_modes :
6097 						    &aconnector->base.modes;
6098 
6099 	if (aconnector->freesync_vid_base.clock != 0)
6100 		return &aconnector->freesync_vid_base;
6101 
6102 	/* Find the preferred mode */
6103 	list_for_each_entry (m, list_head, head) {
6104 		if (m->type & DRM_MODE_TYPE_PREFERRED) {
6105 			m_pref = m;
6106 			break;
6107 		}
6108 	}
6109 
6110 	if (!m_pref) {
6111 		/* Probably an EDID with no preferred mode. Fallback to first entry */
6112 		m_pref = list_first_entry_or_null(
6113 			&aconnector->base.modes, struct drm_display_mode, head);
6114 		if (!m_pref) {
6115 			DRM_DEBUG_DRIVER("No preferred mode found in EDID\n");
6116 			return NULL;
6117 		}
6118 	}
6119 
6120 	highest_refresh = drm_mode_vrefresh(m_pref);
6121 
6122 	/*
6123 	 * Find the mode with highest refresh rate with same resolution.
6124 	 * For some monitors, preferred mode is not the mode with highest
6125 	 * supported refresh rate.
6126 	 */
6127 	list_for_each_entry (m, list_head, head) {
6128 		current_refresh  = drm_mode_vrefresh(m);
6129 
6130 		if (m->hdisplay == m_pref->hdisplay &&
6131 		    m->vdisplay == m_pref->vdisplay &&
6132 		    highest_refresh < current_refresh) {
6133 			highest_refresh = current_refresh;
6134 			m_pref = m;
6135 		}
6136 	}
6137 
6138 	aconnector->freesync_vid_base = *m_pref;
6139 	return m_pref;
6140 }
6141 
6142 static bool is_freesync_video_mode(const struct drm_display_mode *mode,
6143 				   struct amdgpu_dm_connector *aconnector)
6144 {
6145 	struct drm_display_mode *high_mode;
6146 	int timing_diff;
6147 
6148 	high_mode = get_highest_refresh_rate_mode(aconnector, false);
6149 	if (!high_mode || !mode)
6150 		return false;
6151 
6152 	timing_diff = high_mode->vtotal - mode->vtotal;
6153 
6154 	if (high_mode->clock == 0 || high_mode->clock != mode->clock ||
6155 	    high_mode->hdisplay != mode->hdisplay ||
6156 	    high_mode->vdisplay != mode->vdisplay ||
6157 	    high_mode->hsync_start != mode->hsync_start ||
6158 	    high_mode->hsync_end != mode->hsync_end ||
6159 	    high_mode->htotal != mode->htotal ||
6160 	    high_mode->hskew != mode->hskew ||
6161 	    high_mode->vscan != mode->vscan ||
6162 	    high_mode->vsync_start - mode->vsync_start != timing_diff ||
6163 	    high_mode->vsync_end - mode->vsync_end != timing_diff)
6164 		return false;
6165 	else
6166 		return true;
6167 }
6168 
6169 static struct dc_stream_state *
6170 create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6171 		       const struct drm_display_mode *drm_mode,
6172 		       const struct dm_connector_state *dm_state,
6173 		       const struct dc_stream_state *old_stream,
6174 		       int requested_bpc)
6175 {
6176 	struct drm_display_mode *preferred_mode = NULL;
6177 	struct drm_connector *drm_connector;
6178 	const struct drm_connector_state *con_state =
6179 		dm_state ? &dm_state->base : NULL;
6180 	struct dc_stream_state *stream = NULL;
6181 	struct drm_display_mode mode = *drm_mode;
6182 	struct drm_display_mode saved_mode;
6183 	struct drm_display_mode *freesync_mode = NULL;
6184 	bool native_mode_found = false;
6185 	bool recalculate_timing = false;
6186 	bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
6187 	int mode_refresh;
6188 	int preferred_refresh = 0;
6189 #if defined(CONFIG_DRM_AMD_DC_DCN)
6190 	struct dsc_dec_dpcd_caps dsc_caps;
6191 #endif
6192 	struct dc_sink *sink = NULL;
6193 
6194 	memset(&saved_mode, 0, sizeof(saved_mode));
6195 
6196 	if (aconnector == NULL) {
6197 		DRM_ERROR("aconnector is NULL!\n");
6198 		return stream;
6199 	}
6200 
6201 	drm_connector = &aconnector->base;
6202 
6203 	if (!aconnector->dc_sink) {
6204 		sink = create_fake_sink(aconnector);
6205 		if (!sink)
6206 			return stream;
6207 	} else {
6208 		sink = aconnector->dc_sink;
6209 		dc_sink_retain(sink);
6210 	}
6211 
6212 	stream = dc_create_stream_for_sink(sink);
6213 
6214 	if (stream == NULL) {
6215 		DRM_ERROR("Failed to create stream for sink!\n");
6216 		goto finish;
6217 	}
6218 
6219 	stream->dm_stream_context = aconnector;
6220 
6221 	stream->timing.flags.LTE_340MCSC_SCRAMBLE =
6222 		drm_connector->display_info.hdmi.scdc.scrambling.low_rates;
6223 
6224 	list_for_each_entry(preferred_mode, &aconnector->base.modes, head) {
6225 		/* Search for preferred mode */
6226 		if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) {
6227 			native_mode_found = true;
6228 			break;
6229 		}
6230 	}
6231 	if (!native_mode_found)
6232 		preferred_mode = list_first_entry_or_null(
6233 				&aconnector->base.modes,
6234 				struct drm_display_mode,
6235 				head);
6236 
6237 	mode_refresh = drm_mode_vrefresh(&mode);
6238 
6239 	if (preferred_mode == NULL) {
6240 		/*
6241 		 * This may not be an error, the use case is when we have no
6242 		 * usermode calls to reset and set mode upon hotplug. In this
6243 		 * case, we call set mode ourselves to restore the previous mode
6244 		 * and the modelist may not be filled in in time.
6245 		 */
6246 		DRM_DEBUG_DRIVER("No preferred mode found\n");
6247 	} else {
6248 		recalculate_timing = amdgpu_freesync_vid_mode &&
6249 				 is_freesync_video_mode(&mode, aconnector);
6250 		if (recalculate_timing) {
6251 			freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
6252 			saved_mode = mode;
6253 			mode = *freesync_mode;
6254 		} else {
6255 			decide_crtc_timing_for_drm_display_mode(
6256 				&mode, preferred_mode, scale);
6257 
6258 			preferred_refresh = drm_mode_vrefresh(preferred_mode);
6259 		}
6260 	}
6261 
6262 	if (recalculate_timing)
6263 		drm_mode_set_crtcinfo(&saved_mode, 0);
6264 	else if (!dm_state)
6265 		drm_mode_set_crtcinfo(&mode, 0);
6266 
6267        /*
6268 	* If scaling is enabled and refresh rate didn't change
6269 	* we copy the vic and polarities of the old timings
6270 	*/
6271 	if (!scale || mode_refresh != preferred_refresh)
6272 		fill_stream_properties_from_drm_display_mode(
6273 			stream, &mode, &aconnector->base, con_state, NULL,
6274 			requested_bpc);
6275 	else
6276 		fill_stream_properties_from_drm_display_mode(
6277 			stream, &mode, &aconnector->base, con_state, old_stream,
6278 			requested_bpc);
6279 
6280 #if defined(CONFIG_DRM_AMD_DC_DCN)
6281 	/* SST DSC determination policy */
6282 	update_dsc_caps(aconnector, sink, stream, &dsc_caps);
6283 	if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported)
6284 		apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps);
6285 #endif
6286 
6287 	update_stream_scaling_settings(&mode, dm_state, stream);
6288 
6289 	fill_audio_info(
6290 		&stream->audio_info,
6291 		drm_connector,
6292 		sink);
6293 
6294 	update_stream_signal(stream, sink);
6295 
6296 	if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A)
6297 		mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket);
6298 
6299 	if (stream->link->psr_settings.psr_feature_enabled) {
6300 		//
6301 		// should decide stream support vsc sdp colorimetry capability
6302 		// before building vsc info packet
6303 		//
6304 		stream->use_vsc_sdp_for_colorimetry = false;
6305 		if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
6306 			stream->use_vsc_sdp_for_colorimetry =
6307 				aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
6308 		} else {
6309 			if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
6310 				stream->use_vsc_sdp_for_colorimetry = true;
6311 		}
6312 		mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
6313 		aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
6314 
6315 	}
6316 finish:
6317 	dc_sink_release(sink);
6318 
6319 	return stream;
6320 }
6321 
6322 static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc)
6323 {
6324 	drm_crtc_cleanup(crtc);
6325 	kfree(crtc);
6326 }
6327 
6328 static void dm_crtc_destroy_state(struct drm_crtc *crtc,
6329 				  struct drm_crtc_state *state)
6330 {
6331 	struct dm_crtc_state *cur = to_dm_crtc_state(state);
6332 
6333 	/* TODO Destroy dc_stream objects are stream object is flattened */
6334 	if (cur->stream)
6335 		dc_stream_release(cur->stream);
6336 
6337 
6338 	__drm_atomic_helper_crtc_destroy_state(state);
6339 
6340 
6341 	kfree(state);
6342 }
6343 
6344 static void dm_crtc_reset_state(struct drm_crtc *crtc)
6345 {
6346 	struct dm_crtc_state *state;
6347 
6348 	if (crtc->state)
6349 		dm_crtc_destroy_state(crtc, crtc->state);
6350 
6351 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6352 	if (WARN_ON(!state))
6353 		return;
6354 
6355 	__drm_atomic_helper_crtc_reset(crtc, &state->base);
6356 }
6357 
6358 static struct drm_crtc_state *
6359 dm_crtc_duplicate_state(struct drm_crtc *crtc)
6360 {
6361 	struct dm_crtc_state *state, *cur;
6362 
6363 	cur = to_dm_crtc_state(crtc->state);
6364 
6365 	if (WARN_ON(!crtc->state))
6366 		return NULL;
6367 
6368 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6369 	if (!state)
6370 		return NULL;
6371 
6372 	__drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
6373 
6374 	if (cur->stream) {
6375 		state->stream = cur->stream;
6376 		dc_stream_retain(state->stream);
6377 	}
6378 
6379 	state->active_planes = cur->active_planes;
6380 	state->vrr_infopacket = cur->vrr_infopacket;
6381 	state->abm_level = cur->abm_level;
6382 	state->vrr_supported = cur->vrr_supported;
6383 	state->freesync_config = cur->freesync_config;
6384 	state->cm_has_degamma = cur->cm_has_degamma;
6385 	state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
6386 	/* TODO Duplicate dc_stream after objects are stream object is flattened */
6387 
6388 	return &state->base;
6389 }
6390 
6391 #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
6392 static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc)
6393 {
6394 	crtc_debugfs_init(crtc);
6395 
6396 	return 0;
6397 }
6398 #endif
6399 
6400 static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
6401 {
6402 	enum dc_irq_source irq_source;
6403 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6404 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6405 	int rc;
6406 
6407 	irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst;
6408 
6409 	rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY;
6410 
6411 	DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n",
6412 		      acrtc->crtc_id, enable ? "en" : "dis", rc);
6413 	return rc;
6414 }
6415 
6416 static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable)
6417 {
6418 	enum dc_irq_source irq_source;
6419 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
6420 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
6421 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state);
6422 #if defined(CONFIG_DRM_AMD_DC_DCN)
6423 	struct amdgpu_display_manager *dm = &adev->dm;
6424 	struct vblank_control_work *work;
6425 #endif
6426 	int rc = 0;
6427 
6428 	if (enable) {
6429 		/* vblank irq on -> Only need vupdate irq in vrr mode */
6430 		if (amdgpu_dm_vrr_active(acrtc_state))
6431 			rc = dm_set_vupdate_irq(crtc, true);
6432 	} else {
6433 		/* vblank irq off -> vupdate irq off */
6434 		rc = dm_set_vupdate_irq(crtc, false);
6435 	}
6436 
6437 	if (rc)
6438 		return rc;
6439 
6440 	irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst;
6441 
6442 	if (!dc_interrupt_set(adev->dm.dc, irq_source, enable))
6443 		return -EBUSY;
6444 
6445 	if (amdgpu_in_reset(adev))
6446 		return 0;
6447 
6448 #if defined(CONFIG_DRM_AMD_DC_DCN)
6449 	if (dm->vblank_control_workqueue) {
6450 		work = kzalloc(sizeof(*work), GFP_ATOMIC);
6451 		if (!work)
6452 			return -ENOMEM;
6453 
6454 		INIT_WORK(&work->work, vblank_control_worker);
6455 		work->dm = dm;
6456 		work->acrtc = acrtc;
6457 		work->enable = enable;
6458 
6459 		if (acrtc_state->stream) {
6460 			dc_stream_retain(acrtc_state->stream);
6461 			work->stream = acrtc_state->stream;
6462 		}
6463 
6464 		queue_work(dm->vblank_control_workqueue, &work->work);
6465 	}
6466 #endif
6467 
6468 	return 0;
6469 }
6470 
6471 static int dm_enable_vblank(struct drm_crtc *crtc)
6472 {
6473 	return dm_set_vblank(crtc, true);
6474 }
6475 
6476 static void dm_disable_vblank(struct drm_crtc *crtc)
6477 {
6478 	dm_set_vblank(crtc, false);
6479 }
6480 
6481 /* Implemented only the options currently availible for the driver */
6482 static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
6483 	.reset = dm_crtc_reset_state,
6484 	.destroy = amdgpu_dm_crtc_destroy,
6485 	.set_config = drm_atomic_helper_set_config,
6486 	.page_flip = drm_atomic_helper_page_flip,
6487 	.atomic_duplicate_state = dm_crtc_duplicate_state,
6488 	.atomic_destroy_state = dm_crtc_destroy_state,
6489 	.set_crc_source = amdgpu_dm_crtc_set_crc_source,
6490 	.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
6491 	.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
6492 	.get_vblank_counter = amdgpu_get_vblank_counter_kms,
6493 	.enable_vblank = dm_enable_vblank,
6494 	.disable_vblank = dm_disable_vblank,
6495 	.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
6496 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
6497 	.late_register = amdgpu_dm_crtc_late_register,
6498 #endif
6499 };
6500 
6501 static enum drm_connector_status
6502 amdgpu_dm_connector_detect(struct drm_connector *connector, bool force)
6503 {
6504 	bool connected;
6505 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6506 
6507 	/*
6508 	 * Notes:
6509 	 * 1. This interface is NOT called in context of HPD irq.
6510 	 * 2. This interface *is called* in context of user-mode ioctl. Which
6511 	 * makes it a bad place for *any* MST-related activity.
6512 	 */
6513 
6514 	if (aconnector->base.force == DRM_FORCE_UNSPECIFIED &&
6515 	    !aconnector->fake_enable)
6516 		connected = (aconnector->dc_sink != NULL);
6517 	else
6518 		connected = (aconnector->base.force == DRM_FORCE_ON);
6519 
6520 	update_subconnector_property(aconnector);
6521 
6522 	return (connected ? connector_status_connected :
6523 			connector_status_disconnected);
6524 }
6525 
6526 int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector,
6527 					    struct drm_connector_state *connector_state,
6528 					    struct drm_property *property,
6529 					    uint64_t val)
6530 {
6531 	struct drm_device *dev = connector->dev;
6532 	struct amdgpu_device *adev = drm_to_adev(dev);
6533 	struct dm_connector_state *dm_old_state =
6534 		to_dm_connector_state(connector->state);
6535 	struct dm_connector_state *dm_new_state =
6536 		to_dm_connector_state(connector_state);
6537 
6538 	int ret = -EINVAL;
6539 
6540 	if (property == dev->mode_config.scaling_mode_property) {
6541 		enum amdgpu_rmx_type rmx_type;
6542 
6543 		switch (val) {
6544 		case DRM_MODE_SCALE_CENTER:
6545 			rmx_type = RMX_CENTER;
6546 			break;
6547 		case DRM_MODE_SCALE_ASPECT:
6548 			rmx_type = RMX_ASPECT;
6549 			break;
6550 		case DRM_MODE_SCALE_FULLSCREEN:
6551 			rmx_type = RMX_FULL;
6552 			break;
6553 		case DRM_MODE_SCALE_NONE:
6554 		default:
6555 			rmx_type = RMX_OFF;
6556 			break;
6557 		}
6558 
6559 		if (dm_old_state->scaling == rmx_type)
6560 			return 0;
6561 
6562 		dm_new_state->scaling = rmx_type;
6563 		ret = 0;
6564 	} else if (property == adev->mode_info.underscan_hborder_property) {
6565 		dm_new_state->underscan_hborder = val;
6566 		ret = 0;
6567 	} else if (property == adev->mode_info.underscan_vborder_property) {
6568 		dm_new_state->underscan_vborder = val;
6569 		ret = 0;
6570 	} else if (property == adev->mode_info.underscan_property) {
6571 		dm_new_state->underscan_enable = val;
6572 		ret = 0;
6573 	} else if (property == adev->mode_info.abm_level_property) {
6574 		dm_new_state->abm_level = val;
6575 		ret = 0;
6576 	}
6577 
6578 	return ret;
6579 }
6580 
6581 int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector,
6582 					    const struct drm_connector_state *state,
6583 					    struct drm_property *property,
6584 					    uint64_t *val)
6585 {
6586 	struct drm_device *dev = connector->dev;
6587 	struct amdgpu_device *adev = drm_to_adev(dev);
6588 	struct dm_connector_state *dm_state =
6589 		to_dm_connector_state(state);
6590 	int ret = -EINVAL;
6591 
6592 	if (property == dev->mode_config.scaling_mode_property) {
6593 		switch (dm_state->scaling) {
6594 		case RMX_CENTER:
6595 			*val = DRM_MODE_SCALE_CENTER;
6596 			break;
6597 		case RMX_ASPECT:
6598 			*val = DRM_MODE_SCALE_ASPECT;
6599 			break;
6600 		case RMX_FULL:
6601 			*val = DRM_MODE_SCALE_FULLSCREEN;
6602 			break;
6603 		case RMX_OFF:
6604 		default:
6605 			*val = DRM_MODE_SCALE_NONE;
6606 			break;
6607 		}
6608 		ret = 0;
6609 	} else if (property == adev->mode_info.underscan_hborder_property) {
6610 		*val = dm_state->underscan_hborder;
6611 		ret = 0;
6612 	} else if (property == adev->mode_info.underscan_vborder_property) {
6613 		*val = dm_state->underscan_vborder;
6614 		ret = 0;
6615 	} else if (property == adev->mode_info.underscan_property) {
6616 		*val = dm_state->underscan_enable;
6617 		ret = 0;
6618 	} else if (property == adev->mode_info.abm_level_property) {
6619 		*val = dm_state->abm_level;
6620 		ret = 0;
6621 	}
6622 
6623 	return ret;
6624 }
6625 
6626 static void amdgpu_dm_connector_unregister(struct drm_connector *connector)
6627 {
6628 	struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector);
6629 
6630 	drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux);
6631 }
6632 
6633 static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
6634 {
6635 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6636 	const struct dc_link *link = aconnector->dc_link;
6637 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6638 	struct amdgpu_display_manager *dm = &adev->dm;
6639 	int i;
6640 
6641 	/*
6642 	 * Call only if mst_mgr was iniitalized before since it's not done
6643 	 * for all connector types.
6644 	 */
6645 	if (aconnector->mst_mgr.dev)
6646 		drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr);
6647 
6648 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
6649 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
6650 	for (i = 0; i < dm->num_of_edps; i++) {
6651 		if ((link == dm->backlight_link[i]) && dm->backlight_dev[i]) {
6652 			backlight_device_unregister(dm->backlight_dev[i]);
6653 			dm->backlight_dev[i] = NULL;
6654 		}
6655 	}
6656 #endif
6657 
6658 	if (aconnector->dc_em_sink)
6659 		dc_sink_release(aconnector->dc_em_sink);
6660 	aconnector->dc_em_sink = NULL;
6661 	if (aconnector->dc_sink)
6662 		dc_sink_release(aconnector->dc_sink);
6663 	aconnector->dc_sink = NULL;
6664 
6665 	drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
6666 	drm_connector_unregister(connector);
6667 	drm_connector_cleanup(connector);
6668 	if (aconnector->i2c) {
6669 		i2c_del_adapter(&aconnector->i2c->base);
6670 		kfree(aconnector->i2c);
6671 	}
6672 	kfree(aconnector->dm_dp_aux.aux.name);
6673 
6674 	kfree(connector);
6675 }
6676 
6677 void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector)
6678 {
6679 	struct dm_connector_state *state =
6680 		to_dm_connector_state(connector->state);
6681 
6682 	if (connector->state)
6683 		__drm_atomic_helper_connector_destroy_state(connector->state);
6684 
6685 	kfree(state);
6686 
6687 	state = kzalloc(sizeof(*state), GFP_KERNEL);
6688 
6689 	if (state) {
6690 		state->scaling = RMX_OFF;
6691 		state->underscan_enable = false;
6692 		state->underscan_hborder = 0;
6693 		state->underscan_vborder = 0;
6694 		state->base.max_requested_bpc = 8;
6695 		state->vcpi_slots = 0;
6696 		state->pbn = 0;
6697 		if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
6698 			state->abm_level = amdgpu_dm_abm_level;
6699 
6700 		__drm_atomic_helper_connector_reset(connector, &state->base);
6701 	}
6702 }
6703 
6704 struct drm_connector_state *
6705 amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
6706 {
6707 	struct dm_connector_state *state =
6708 		to_dm_connector_state(connector->state);
6709 
6710 	struct dm_connector_state *new_state =
6711 			kmemdup(state, sizeof(*state), GFP_KERNEL);
6712 
6713 	if (!new_state)
6714 		return NULL;
6715 
6716 	__drm_atomic_helper_connector_duplicate_state(connector, &new_state->base);
6717 
6718 	new_state->freesync_capable = state->freesync_capable;
6719 	new_state->abm_level = state->abm_level;
6720 	new_state->scaling = state->scaling;
6721 	new_state->underscan_enable = state->underscan_enable;
6722 	new_state->underscan_hborder = state->underscan_hborder;
6723 	new_state->underscan_vborder = state->underscan_vborder;
6724 	new_state->vcpi_slots = state->vcpi_slots;
6725 	new_state->pbn = state->pbn;
6726 	return &new_state->base;
6727 }
6728 
6729 static int
6730 amdgpu_dm_connector_late_register(struct drm_connector *connector)
6731 {
6732 	struct amdgpu_dm_connector *amdgpu_dm_connector =
6733 		to_amdgpu_dm_connector(connector);
6734 	int r;
6735 
6736 	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
6737 	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
6738 		amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
6739 		r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
6740 		if (r)
6741 			return r;
6742 	}
6743 
6744 #if defined(CONFIG_DEBUG_FS)
6745 	connector_debugfs_init(amdgpu_dm_connector);
6746 #endif
6747 
6748 	return 0;
6749 }
6750 
6751 static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
6752 	.reset = amdgpu_dm_connector_funcs_reset,
6753 	.detect = amdgpu_dm_connector_detect,
6754 	.fill_modes = drm_helper_probe_single_connector_modes,
6755 	.destroy = amdgpu_dm_connector_destroy,
6756 	.atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state,
6757 	.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
6758 	.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
6759 	.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
6760 	.late_register = amdgpu_dm_connector_late_register,
6761 	.early_unregister = amdgpu_dm_connector_unregister
6762 };
6763 
6764 static int get_modes(struct drm_connector *connector)
6765 {
6766 	return amdgpu_dm_connector_get_modes(connector);
6767 }
6768 
6769 static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
6770 {
6771 	struct dc_sink_init_data init_params = {
6772 			.link = aconnector->dc_link,
6773 			.sink_signal = SIGNAL_TYPE_VIRTUAL
6774 	};
6775 	struct edid *edid;
6776 
6777 	if (!aconnector->base.edid_blob_ptr) {
6778 		DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n",
6779 				aconnector->base.name);
6780 
6781 		aconnector->base.force = DRM_FORCE_OFF;
6782 		aconnector->base.override_edid = false;
6783 		return;
6784 	}
6785 
6786 	edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
6787 
6788 	aconnector->edid = edid;
6789 
6790 	aconnector->dc_em_sink = dc_link_add_remote_sink(
6791 		aconnector->dc_link,
6792 		(uint8_t *)edid,
6793 		(edid->extensions + 1) * EDID_LENGTH,
6794 		&init_params);
6795 
6796 	if (aconnector->base.force == DRM_FORCE_ON) {
6797 		aconnector->dc_sink = aconnector->dc_link->local_sink ?
6798 		aconnector->dc_link->local_sink :
6799 		aconnector->dc_em_sink;
6800 		dc_sink_retain(aconnector->dc_sink);
6801 	}
6802 }
6803 
6804 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
6805 {
6806 	struct dc_link *link = (struct dc_link *)aconnector->dc_link;
6807 
6808 	/*
6809 	 * In case of headless boot with force on for DP managed connector
6810 	 * Those settings have to be != 0 to get initial modeset
6811 	 */
6812 	if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) {
6813 		link->verified_link_cap.lane_count = LANE_COUNT_FOUR;
6814 		link->verified_link_cap.link_rate = LINK_RATE_HIGH2;
6815 	}
6816 
6817 
6818 	aconnector->base.override_edid = true;
6819 	create_eml_sink(aconnector);
6820 }
6821 
6822 static struct dc_stream_state *
6823 create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector,
6824 				const struct drm_display_mode *drm_mode,
6825 				const struct dm_connector_state *dm_state,
6826 				const struct dc_stream_state *old_stream)
6827 {
6828 	struct drm_connector *connector = &aconnector->base;
6829 	struct amdgpu_device *adev = drm_to_adev(connector->dev);
6830 	struct dc_stream_state *stream;
6831 	const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL;
6832 	int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8;
6833 	enum dc_status dc_result = DC_OK;
6834 
6835 	do {
6836 		stream = create_stream_for_sink(aconnector, drm_mode,
6837 						dm_state, old_stream,
6838 						requested_bpc);
6839 		if (stream == NULL) {
6840 			DRM_ERROR("Failed to create stream for sink!\n");
6841 			break;
6842 		}
6843 
6844 		dc_result = dc_validate_stream(adev->dm.dc, stream);
6845 
6846 		if (dc_result != DC_OK) {
6847 			DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n",
6848 				      drm_mode->hdisplay,
6849 				      drm_mode->vdisplay,
6850 				      drm_mode->clock,
6851 				      dc_result,
6852 				      dc_status_to_str(dc_result));
6853 
6854 			dc_stream_release(stream);
6855 			stream = NULL;
6856 			requested_bpc -= 2; /* lower bpc to retry validation */
6857 		}
6858 
6859 	} while (stream == NULL && requested_bpc >= 6);
6860 
6861 	if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) {
6862 		DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n");
6863 
6864 		aconnector->force_yuv420_output = true;
6865 		stream = create_validate_stream_for_sink(aconnector, drm_mode,
6866 						dm_state, old_stream);
6867 		aconnector->force_yuv420_output = false;
6868 	}
6869 
6870 	return stream;
6871 }
6872 
6873 enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector,
6874 				   struct drm_display_mode *mode)
6875 {
6876 	int result = MODE_ERROR;
6877 	struct dc_sink *dc_sink;
6878 	/* TODO: Unhardcode stream count */
6879 	struct dc_stream_state *stream;
6880 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
6881 
6882 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
6883 			(mode->flags & DRM_MODE_FLAG_DBLSCAN))
6884 		return result;
6885 
6886 	/*
6887 	 * Only run this the first time mode_valid is called to initilialize
6888 	 * EDID mgmt
6889 	 */
6890 	if (aconnector->base.force != DRM_FORCE_UNSPECIFIED &&
6891 		!aconnector->dc_em_sink)
6892 		handle_edid_mgmt(aconnector);
6893 
6894 	dc_sink = to_amdgpu_dm_connector(connector)->dc_sink;
6895 
6896 	if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL &&
6897 				aconnector->base.force != DRM_FORCE_ON) {
6898 		DRM_ERROR("dc_sink is NULL!\n");
6899 		goto fail;
6900 	}
6901 
6902 	stream = create_validate_stream_for_sink(aconnector, mode, NULL, NULL);
6903 	if (stream) {
6904 		dc_stream_release(stream);
6905 		result = MODE_OK;
6906 	}
6907 
6908 fail:
6909 	/* TODO: error handling*/
6910 	return result;
6911 }
6912 
6913 static int fill_hdr_info_packet(const struct drm_connector_state *state,
6914 				struct dc_info_packet *out)
6915 {
6916 	struct hdmi_drm_infoframe frame;
6917 	unsigned char buf[30]; /* 26 + 4 */
6918 	ssize_t len;
6919 	int ret, i;
6920 
6921 	memset(out, 0, sizeof(*out));
6922 
6923 	if (!state->hdr_output_metadata)
6924 		return 0;
6925 
6926 	ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state);
6927 	if (ret)
6928 		return ret;
6929 
6930 	len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf));
6931 	if (len < 0)
6932 		return (int)len;
6933 
6934 	/* Static metadata is a fixed 26 bytes + 4 byte header. */
6935 	if (len != 30)
6936 		return -EINVAL;
6937 
6938 	/* Prepare the infopacket for DC. */
6939 	switch (state->connector->connector_type) {
6940 	case DRM_MODE_CONNECTOR_HDMIA:
6941 		out->hb0 = 0x87; /* type */
6942 		out->hb1 = 0x01; /* version */
6943 		out->hb2 = 0x1A; /* length */
6944 		out->sb[0] = buf[3]; /* checksum */
6945 		i = 1;
6946 		break;
6947 
6948 	case DRM_MODE_CONNECTOR_DisplayPort:
6949 	case DRM_MODE_CONNECTOR_eDP:
6950 		out->hb0 = 0x00; /* sdp id, zero */
6951 		out->hb1 = 0x87; /* type */
6952 		out->hb2 = 0x1D; /* payload len - 1 */
6953 		out->hb3 = (0x13 << 2); /* sdp version */
6954 		out->sb[0] = 0x01; /* version */
6955 		out->sb[1] = 0x1A; /* length */
6956 		i = 2;
6957 		break;
6958 
6959 	default:
6960 		return -EINVAL;
6961 	}
6962 
6963 	memcpy(&out->sb[i], &buf[4], 26);
6964 	out->valid = true;
6965 
6966 	print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb,
6967 		       sizeof(out->sb), false);
6968 
6969 	return 0;
6970 }
6971 
6972 static int
6973 amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
6974 				 struct drm_atomic_state *state)
6975 {
6976 	struct drm_connector_state *new_con_state =
6977 		drm_atomic_get_new_connector_state(state, conn);
6978 	struct drm_connector_state *old_con_state =
6979 		drm_atomic_get_old_connector_state(state, conn);
6980 	struct drm_crtc *crtc = new_con_state->crtc;
6981 	struct drm_crtc_state *new_crtc_state;
6982 	int ret;
6983 
6984 	trace_amdgpu_dm_connector_atomic_check(new_con_state);
6985 
6986 	if (!crtc)
6987 		return 0;
6988 
6989 	if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
6990 		struct dc_info_packet hdr_infopacket;
6991 
6992 		ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
6993 		if (ret)
6994 			return ret;
6995 
6996 		new_crtc_state = drm_atomic_get_crtc_state(state, crtc);
6997 		if (IS_ERR(new_crtc_state))
6998 			return PTR_ERR(new_crtc_state);
6999 
7000 		/*
7001 		 * DC considers the stream backends changed if the
7002 		 * static metadata changes. Forcing the modeset also
7003 		 * gives a simple way for userspace to switch from
7004 		 * 8bpc to 10bpc when setting the metadata to enter
7005 		 * or exit HDR.
7006 		 *
7007 		 * Changing the static metadata after it's been
7008 		 * set is permissible, however. So only force a
7009 		 * modeset if we're entering or exiting HDR.
7010 		 */
7011 		new_crtc_state->mode_changed =
7012 			!old_con_state->hdr_output_metadata ||
7013 			!new_con_state->hdr_output_metadata;
7014 	}
7015 
7016 	return 0;
7017 }
7018 
7019 static const struct drm_connector_helper_funcs
7020 amdgpu_dm_connector_helper_funcs = {
7021 	/*
7022 	 * If hotplugging a second bigger display in FB Con mode, bigger resolution
7023 	 * modes will be filtered by drm_mode_validate_size(), and those modes
7024 	 * are missing after user start lightdm. So we need to renew modes list.
7025 	 * in get_modes call back, not just return the modes count
7026 	 */
7027 	.get_modes = get_modes,
7028 	.mode_valid = amdgpu_dm_connector_mode_valid,
7029 	.atomic_check = amdgpu_dm_connector_atomic_check,
7030 };
7031 
7032 static void dm_crtc_helper_disable(struct drm_crtc *crtc)
7033 {
7034 }
7035 
7036 static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state)
7037 {
7038 	struct drm_atomic_state *state = new_crtc_state->state;
7039 	struct drm_plane *plane;
7040 	int num_active = 0;
7041 
7042 	drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) {
7043 		struct drm_plane_state *new_plane_state;
7044 
7045 		/* Cursor planes are "fake". */
7046 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
7047 			continue;
7048 
7049 		new_plane_state = drm_atomic_get_new_plane_state(state, plane);
7050 
7051 		if (!new_plane_state) {
7052 			/*
7053 			 * The plane is enable on the CRTC and hasn't changed
7054 			 * state. This means that it previously passed
7055 			 * validation and is therefore enabled.
7056 			 */
7057 			num_active += 1;
7058 			continue;
7059 		}
7060 
7061 		/* We need a framebuffer to be considered enabled. */
7062 		num_active += (new_plane_state->fb != NULL);
7063 	}
7064 
7065 	return num_active;
7066 }
7067 
7068 static void dm_update_crtc_active_planes(struct drm_crtc *crtc,
7069 					 struct drm_crtc_state *new_crtc_state)
7070 {
7071 	struct dm_crtc_state *dm_new_crtc_state =
7072 		to_dm_crtc_state(new_crtc_state);
7073 
7074 	dm_new_crtc_state->active_planes = 0;
7075 
7076 	if (!dm_new_crtc_state->stream)
7077 		return;
7078 
7079 	dm_new_crtc_state->active_planes =
7080 		count_crtc_active_planes(new_crtc_state);
7081 }
7082 
7083 static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc,
7084 				       struct drm_atomic_state *state)
7085 {
7086 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
7087 									  crtc);
7088 	struct amdgpu_device *adev = drm_to_adev(crtc->dev);
7089 	struct dc *dc = adev->dm.dc;
7090 	struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state);
7091 	int ret = -EINVAL;
7092 
7093 	trace_amdgpu_dm_crtc_atomic_check(crtc_state);
7094 
7095 	dm_update_crtc_active_planes(crtc, crtc_state);
7096 
7097 	if (WARN_ON(unlikely(!dm_crtc_state->stream &&
7098 		     modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) {
7099 		return ret;
7100 	}
7101 
7102 	/*
7103 	 * We require the primary plane to be enabled whenever the CRTC is, otherwise
7104 	 * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other
7105 	 * planes are disabled, which is not supported by the hardware. And there is legacy
7106 	 * userspace which stops using the HW cursor altogether in response to the resulting EINVAL.
7107 	 */
7108 	if (crtc_state->enable &&
7109 	    !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) {
7110 		DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n");
7111 		return -EINVAL;
7112 	}
7113 
7114 	/* In some use cases, like reset, no stream is attached */
7115 	if (!dm_crtc_state->stream)
7116 		return 0;
7117 
7118 	if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK)
7119 		return 0;
7120 
7121 	DRM_DEBUG_ATOMIC("Failed DC stream validation\n");
7122 	return ret;
7123 }
7124 
7125 static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
7126 				      const struct drm_display_mode *mode,
7127 				      struct drm_display_mode *adjusted_mode)
7128 {
7129 	return true;
7130 }
7131 
7132 static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
7133 	.disable = dm_crtc_helper_disable,
7134 	.atomic_check = dm_crtc_helper_atomic_check,
7135 	.mode_fixup = dm_crtc_helper_mode_fixup,
7136 	.get_scanout_position = amdgpu_crtc_get_scanout_position,
7137 };
7138 
7139 static void dm_encoder_helper_disable(struct drm_encoder *encoder)
7140 {
7141 
7142 }
7143 
7144 static int convert_dc_color_depth_into_bpc (enum dc_color_depth display_color_depth)
7145 {
7146 	switch (display_color_depth) {
7147 		case COLOR_DEPTH_666:
7148 			return 6;
7149 		case COLOR_DEPTH_888:
7150 			return 8;
7151 		case COLOR_DEPTH_101010:
7152 			return 10;
7153 		case COLOR_DEPTH_121212:
7154 			return 12;
7155 		case COLOR_DEPTH_141414:
7156 			return 14;
7157 		case COLOR_DEPTH_161616:
7158 			return 16;
7159 		default:
7160 			break;
7161 		}
7162 	return 0;
7163 }
7164 
7165 static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder,
7166 					  struct drm_crtc_state *crtc_state,
7167 					  struct drm_connector_state *conn_state)
7168 {
7169 	struct drm_atomic_state *state = crtc_state->state;
7170 	struct drm_connector *connector = conn_state->connector;
7171 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
7172 	struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state);
7173 	const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
7174 	struct drm_dp_mst_topology_mgr *mst_mgr;
7175 	struct drm_dp_mst_port *mst_port;
7176 	enum dc_color_depth color_depth;
7177 	int clock, bpp = 0;
7178 	bool is_y420 = false;
7179 
7180 	if (!aconnector->port || !aconnector->dc_sink)
7181 		return 0;
7182 
7183 	mst_port = aconnector->port;
7184 	mst_mgr = &aconnector->mst_port->mst_mgr;
7185 
7186 	if (!crtc_state->connectors_changed && !crtc_state->mode_changed)
7187 		return 0;
7188 
7189 	if (!state->duplicated) {
7190 		int max_bpc = conn_state->max_requested_bpc;
7191 		is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) &&
7192 				aconnector->force_yuv420_output;
7193 		color_depth = convert_color_depth_from_display_info(connector,
7194 								    is_y420,
7195 								    max_bpc);
7196 		bpp = convert_dc_color_depth_into_bpc(color_depth) * 3;
7197 		clock = adjusted_mode->clock;
7198 		dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false);
7199 	}
7200 	dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state,
7201 									   mst_mgr,
7202 									   mst_port,
7203 									   dm_new_connector_state->pbn,
7204 									   dm_mst_get_pbn_divider(aconnector->dc_link));
7205 	if (dm_new_connector_state->vcpi_slots < 0) {
7206 		DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots);
7207 		return dm_new_connector_state->vcpi_slots;
7208 	}
7209 	return 0;
7210 }
7211 
7212 const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = {
7213 	.disable = dm_encoder_helper_disable,
7214 	.atomic_check = dm_encoder_helper_atomic_check
7215 };
7216 
7217 #if defined(CONFIG_DRM_AMD_DC_DCN)
7218 static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state,
7219 					    struct dc_state *dc_state,
7220 					    struct dsc_mst_fairness_vars *vars)
7221 {
7222 	struct dc_stream_state *stream = NULL;
7223 	struct drm_connector *connector;
7224 	struct drm_connector_state *new_con_state;
7225 	struct amdgpu_dm_connector *aconnector;
7226 	struct dm_connector_state *dm_conn_state;
7227 	int i, j, clock;
7228 	int vcpi, pbn_div, pbn = 0;
7229 
7230 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
7231 
7232 		aconnector = to_amdgpu_dm_connector(connector);
7233 
7234 		if (!aconnector->port)
7235 			continue;
7236 
7237 		if (!new_con_state || !new_con_state->crtc)
7238 			continue;
7239 
7240 		dm_conn_state = to_dm_connector_state(new_con_state);
7241 
7242 		for (j = 0; j < dc_state->stream_count; j++) {
7243 			stream = dc_state->streams[j];
7244 			if (!stream)
7245 				continue;
7246 
7247 			if ((struct amdgpu_dm_connector*)stream->dm_stream_context == aconnector)
7248 				break;
7249 
7250 			stream = NULL;
7251 		}
7252 
7253 		if (!stream)
7254 			continue;
7255 
7256 		if (stream->timing.flags.DSC != 1) {
7257 			drm_dp_mst_atomic_enable_dsc(state,
7258 						     aconnector->port,
7259 						     dm_conn_state->pbn,
7260 						     0,
7261 						     false);
7262 			continue;
7263 		}
7264 
7265 		pbn_div = dm_mst_get_pbn_divider(stream->link);
7266 		clock = stream->timing.pix_clk_100hz / 10;
7267 		/* pbn is calculated by compute_mst_dsc_configs_for_state*/
7268 		for (j = 0; j < dc_state->stream_count; j++) {
7269 			if (vars[j].aconnector == aconnector) {
7270 				pbn = vars[j].pbn;
7271 				break;
7272 			}
7273 		}
7274 
7275 		vcpi = drm_dp_mst_atomic_enable_dsc(state,
7276 						    aconnector->port,
7277 						    pbn, pbn_div,
7278 						    true);
7279 		if (vcpi < 0)
7280 			return vcpi;
7281 
7282 		dm_conn_state->pbn = pbn;
7283 		dm_conn_state->vcpi_slots = vcpi;
7284 	}
7285 	return 0;
7286 }
7287 #endif
7288 
7289 static void dm_drm_plane_reset(struct drm_plane *plane)
7290 {
7291 	struct dm_plane_state *amdgpu_state = NULL;
7292 
7293 	if (plane->state)
7294 		plane->funcs->atomic_destroy_state(plane, plane->state);
7295 
7296 	amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL);
7297 	WARN_ON(amdgpu_state == NULL);
7298 
7299 	if (amdgpu_state)
7300 		__drm_atomic_helper_plane_reset(plane, &amdgpu_state->base);
7301 }
7302 
7303 static struct drm_plane_state *
7304 dm_drm_plane_duplicate_state(struct drm_plane *plane)
7305 {
7306 	struct dm_plane_state *dm_plane_state, *old_dm_plane_state;
7307 
7308 	old_dm_plane_state = to_dm_plane_state(plane->state);
7309 	dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL);
7310 	if (!dm_plane_state)
7311 		return NULL;
7312 
7313 	__drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base);
7314 
7315 	if (old_dm_plane_state->dc_state) {
7316 		dm_plane_state->dc_state = old_dm_plane_state->dc_state;
7317 		dc_plane_state_retain(dm_plane_state->dc_state);
7318 	}
7319 
7320 	return &dm_plane_state->base;
7321 }
7322 
7323 static void dm_drm_plane_destroy_state(struct drm_plane *plane,
7324 				struct drm_plane_state *state)
7325 {
7326 	struct dm_plane_state *dm_plane_state = to_dm_plane_state(state);
7327 
7328 	if (dm_plane_state->dc_state)
7329 		dc_plane_state_release(dm_plane_state->dc_state);
7330 
7331 	drm_atomic_helper_plane_destroy_state(plane, state);
7332 }
7333 
7334 static const struct drm_plane_funcs dm_plane_funcs = {
7335 	.update_plane	= drm_atomic_helper_update_plane,
7336 	.disable_plane	= drm_atomic_helper_disable_plane,
7337 	.destroy	= drm_primary_helper_destroy,
7338 	.reset = dm_drm_plane_reset,
7339 	.atomic_duplicate_state = dm_drm_plane_duplicate_state,
7340 	.atomic_destroy_state = dm_drm_plane_destroy_state,
7341 	.format_mod_supported = dm_plane_format_mod_supported,
7342 };
7343 
7344 static int dm_plane_helper_prepare_fb(struct drm_plane *plane,
7345 				      struct drm_plane_state *new_state)
7346 {
7347 	struct amdgpu_framebuffer *afb;
7348 	struct drm_gem_object *obj;
7349 	struct amdgpu_device *adev;
7350 	struct amdgpu_bo *rbo;
7351 	struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old;
7352 	struct list_head list;
7353 	struct ttm_validate_buffer tv;
7354 	struct ww_acquire_ctx ticket;
7355 	uint32_t domain;
7356 	int r;
7357 
7358 	if (!new_state->fb) {
7359 		DRM_DEBUG_KMS("No FB bound\n");
7360 		return 0;
7361 	}
7362 
7363 	afb = to_amdgpu_framebuffer(new_state->fb);
7364 	obj = new_state->fb->obj[0];
7365 	rbo = gem_to_amdgpu_bo(obj);
7366 	adev = amdgpu_ttm_adev(rbo->tbo.bdev);
7367 	INIT_LIST_HEAD(&list);
7368 
7369 	tv.bo = &rbo->tbo;
7370 	tv.num_shared = 1;
7371 	list_add(&tv.head, &list);
7372 
7373 	r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL);
7374 	if (r) {
7375 		dev_err(adev->dev, "fail to reserve bo (%d)\n", r);
7376 		return r;
7377 	}
7378 
7379 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7380 		domain = amdgpu_display_supported_domains(adev, rbo->flags);
7381 	else
7382 		domain = AMDGPU_GEM_DOMAIN_VRAM;
7383 
7384 	r = amdgpu_bo_pin(rbo, domain);
7385 	if (unlikely(r != 0)) {
7386 		if (r != -ERESTARTSYS)
7387 			DRM_ERROR("Failed to pin framebuffer with error %d\n", r);
7388 		ttm_eu_backoff_reservation(&ticket, &list);
7389 		return r;
7390 	}
7391 
7392 	r = amdgpu_ttm_alloc_gart(&rbo->tbo);
7393 	if (unlikely(r != 0)) {
7394 		amdgpu_bo_unpin(rbo);
7395 		ttm_eu_backoff_reservation(&ticket, &list);
7396 		DRM_ERROR("%p bind failed\n", rbo);
7397 		return r;
7398 	}
7399 
7400 	ttm_eu_backoff_reservation(&ticket, &list);
7401 
7402 	afb->address = amdgpu_bo_gpu_offset(rbo);
7403 
7404 	amdgpu_bo_ref(rbo);
7405 
7406 	/**
7407 	 * We don't do surface updates on planes that have been newly created,
7408 	 * but we also don't have the afb->address during atomic check.
7409 	 *
7410 	 * Fill in buffer attributes depending on the address here, but only on
7411 	 * newly created planes since they're not being used by DC yet and this
7412 	 * won't modify global state.
7413 	 */
7414 	dm_plane_state_old = to_dm_plane_state(plane->state);
7415 	dm_plane_state_new = to_dm_plane_state(new_state);
7416 
7417 	if (dm_plane_state_new->dc_state &&
7418 	    dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) {
7419 		struct dc_plane_state *plane_state =
7420 			dm_plane_state_new->dc_state;
7421 		bool force_disable_dcc = !plane_state->dcc.enable;
7422 
7423 		fill_plane_buffer_attributes(
7424 			adev, afb, plane_state->format, plane_state->rotation,
7425 			afb->tiling_flags,
7426 			&plane_state->tiling_info, &plane_state->plane_size,
7427 			&plane_state->dcc, &plane_state->address,
7428 			afb->tmz_surface, force_disable_dcc);
7429 	}
7430 
7431 	return 0;
7432 }
7433 
7434 static void dm_plane_helper_cleanup_fb(struct drm_plane *plane,
7435 				       struct drm_plane_state *old_state)
7436 {
7437 	struct amdgpu_bo *rbo;
7438 	int r;
7439 
7440 	if (!old_state->fb)
7441 		return;
7442 
7443 	rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]);
7444 	r = amdgpu_bo_reserve(rbo, false);
7445 	if (unlikely(r)) {
7446 		DRM_ERROR("failed to reserve rbo before unpin\n");
7447 		return;
7448 	}
7449 
7450 	amdgpu_bo_unpin(rbo);
7451 	amdgpu_bo_unreserve(rbo);
7452 	amdgpu_bo_unref(&rbo);
7453 }
7454 
7455 static int dm_plane_helper_check_state(struct drm_plane_state *state,
7456 				       struct drm_crtc_state *new_crtc_state)
7457 {
7458 	struct drm_framebuffer *fb = state->fb;
7459 	int min_downscale, max_upscale;
7460 	int min_scale = 0;
7461 	int max_scale = INT_MAX;
7462 
7463 	/* Plane enabled? Validate viewport and get scaling factors from plane caps. */
7464 	if (fb && state->crtc) {
7465 		/* Validate viewport to cover the case when only the position changes */
7466 		if (state->plane->type != DRM_PLANE_TYPE_CURSOR) {
7467 			int viewport_width = state->crtc_w;
7468 			int viewport_height = state->crtc_h;
7469 
7470 			if (state->crtc_x < 0)
7471 				viewport_width += state->crtc_x;
7472 			else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay)
7473 				viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x;
7474 
7475 			if (state->crtc_y < 0)
7476 				viewport_height += state->crtc_y;
7477 			else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay)
7478 				viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y;
7479 
7480 			if (viewport_width < 0 || viewport_height < 0) {
7481 				DRM_DEBUG_ATOMIC("Plane completely outside of screen\n");
7482 				return -EINVAL;
7483 			} else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */
7484 				DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2);
7485 				return -EINVAL;
7486 			} else if (viewport_height < MIN_VIEWPORT_SIZE) {
7487 				DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE);
7488 				return -EINVAL;
7489 			}
7490 
7491 		}
7492 
7493 		/* Get min/max allowed scaling factors from plane caps. */
7494 		get_min_max_dc_plane_scaling(state->crtc->dev, fb,
7495 					     &min_downscale, &max_upscale);
7496 		/*
7497 		 * Convert to drm convention: 16.16 fixed point, instead of dc's
7498 		 * 1.0 == 1000. Also drm scaling is src/dst instead of dc's
7499 		 * dst/src, so min_scale = 1.0 / max_upscale, etc.
7500 		 */
7501 		min_scale = (1000 << 16) / max_upscale;
7502 		max_scale = (1000 << 16) / min_downscale;
7503 	}
7504 
7505 	return drm_atomic_helper_check_plane_state(
7506 		state, new_crtc_state, min_scale, max_scale, true, true);
7507 }
7508 
7509 static int dm_plane_atomic_check(struct drm_plane *plane,
7510 				 struct drm_atomic_state *state)
7511 {
7512 	struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state,
7513 										 plane);
7514 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
7515 	struct dc *dc = adev->dm.dc;
7516 	struct dm_plane_state *dm_plane_state;
7517 	struct dc_scaling_info scaling_info;
7518 	struct drm_crtc_state *new_crtc_state;
7519 	int ret;
7520 
7521 	trace_amdgpu_dm_plane_atomic_check(new_plane_state);
7522 
7523 	dm_plane_state = to_dm_plane_state(new_plane_state);
7524 
7525 	if (!dm_plane_state->dc_state)
7526 		return 0;
7527 
7528 	new_crtc_state =
7529 		drm_atomic_get_new_crtc_state(state,
7530 					      new_plane_state->crtc);
7531 	if (!new_crtc_state)
7532 		return -EINVAL;
7533 
7534 	ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
7535 	if (ret)
7536 		return ret;
7537 
7538 	ret = fill_dc_scaling_info(new_plane_state, &scaling_info);
7539 	if (ret)
7540 		return ret;
7541 
7542 	if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK)
7543 		return 0;
7544 
7545 	return -EINVAL;
7546 }
7547 
7548 static int dm_plane_atomic_async_check(struct drm_plane *plane,
7549 				       struct drm_atomic_state *state)
7550 {
7551 	/* Only support async updates on cursor planes. */
7552 	if (plane->type != DRM_PLANE_TYPE_CURSOR)
7553 		return -EINVAL;
7554 
7555 	return 0;
7556 }
7557 
7558 static void dm_plane_atomic_async_update(struct drm_plane *plane,
7559 					 struct drm_atomic_state *state)
7560 {
7561 	struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state,
7562 									   plane);
7563 	struct drm_plane_state *old_state =
7564 		drm_atomic_get_old_plane_state(state, plane);
7565 
7566 	trace_amdgpu_dm_atomic_update_cursor(new_state);
7567 
7568 	swap(plane->state->fb, new_state->fb);
7569 
7570 	plane->state->src_x = new_state->src_x;
7571 	plane->state->src_y = new_state->src_y;
7572 	plane->state->src_w = new_state->src_w;
7573 	plane->state->src_h = new_state->src_h;
7574 	plane->state->crtc_x = new_state->crtc_x;
7575 	plane->state->crtc_y = new_state->crtc_y;
7576 	plane->state->crtc_w = new_state->crtc_w;
7577 	plane->state->crtc_h = new_state->crtc_h;
7578 
7579 	handle_cursor_update(plane, old_state);
7580 }
7581 
7582 static const struct drm_plane_helper_funcs dm_plane_helper_funcs = {
7583 	.prepare_fb = dm_plane_helper_prepare_fb,
7584 	.cleanup_fb = dm_plane_helper_cleanup_fb,
7585 	.atomic_check = dm_plane_atomic_check,
7586 	.atomic_async_check = dm_plane_atomic_async_check,
7587 	.atomic_async_update = dm_plane_atomic_async_update
7588 };
7589 
7590 /*
7591  * TODO: these are currently initialized to rgb formats only.
7592  * For future use cases we should either initialize them dynamically based on
7593  * plane capabilities, or initialize this array to all formats, so internal drm
7594  * check will succeed, and let DC implement proper check
7595  */
7596 static const uint32_t rgb_formats[] = {
7597 	DRM_FORMAT_XRGB8888,
7598 	DRM_FORMAT_ARGB8888,
7599 	DRM_FORMAT_RGBA8888,
7600 	DRM_FORMAT_XRGB2101010,
7601 	DRM_FORMAT_XBGR2101010,
7602 	DRM_FORMAT_ARGB2101010,
7603 	DRM_FORMAT_ABGR2101010,
7604 	DRM_FORMAT_XRGB16161616,
7605 	DRM_FORMAT_XBGR16161616,
7606 	DRM_FORMAT_ARGB16161616,
7607 	DRM_FORMAT_ABGR16161616,
7608 	DRM_FORMAT_XBGR8888,
7609 	DRM_FORMAT_ABGR8888,
7610 	DRM_FORMAT_RGB565,
7611 };
7612 
7613 static const uint32_t overlay_formats[] = {
7614 	DRM_FORMAT_XRGB8888,
7615 	DRM_FORMAT_ARGB8888,
7616 	DRM_FORMAT_RGBA8888,
7617 	DRM_FORMAT_XBGR8888,
7618 	DRM_FORMAT_ABGR8888,
7619 	DRM_FORMAT_RGB565
7620 };
7621 
7622 static const u32 cursor_formats[] = {
7623 	DRM_FORMAT_ARGB8888
7624 };
7625 
7626 static int get_plane_formats(const struct drm_plane *plane,
7627 			     const struct dc_plane_cap *plane_cap,
7628 			     uint32_t *formats, int max_formats)
7629 {
7630 	int i, num_formats = 0;
7631 
7632 	/*
7633 	 * TODO: Query support for each group of formats directly from
7634 	 * DC plane caps. This will require adding more formats to the
7635 	 * caps list.
7636 	 */
7637 
7638 	switch (plane->type) {
7639 	case DRM_PLANE_TYPE_PRIMARY:
7640 		for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) {
7641 			if (num_formats >= max_formats)
7642 				break;
7643 
7644 			formats[num_formats++] = rgb_formats[i];
7645 		}
7646 
7647 		if (plane_cap && plane_cap->pixel_format_support.nv12)
7648 			formats[num_formats++] = DRM_FORMAT_NV12;
7649 		if (plane_cap && plane_cap->pixel_format_support.p010)
7650 			formats[num_formats++] = DRM_FORMAT_P010;
7651 		if (plane_cap && plane_cap->pixel_format_support.fp16) {
7652 			formats[num_formats++] = DRM_FORMAT_XRGB16161616F;
7653 			formats[num_formats++] = DRM_FORMAT_ARGB16161616F;
7654 			formats[num_formats++] = DRM_FORMAT_XBGR16161616F;
7655 			formats[num_formats++] = DRM_FORMAT_ABGR16161616F;
7656 		}
7657 		break;
7658 
7659 	case DRM_PLANE_TYPE_OVERLAY:
7660 		for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) {
7661 			if (num_formats >= max_formats)
7662 				break;
7663 
7664 			formats[num_formats++] = overlay_formats[i];
7665 		}
7666 		break;
7667 
7668 	case DRM_PLANE_TYPE_CURSOR:
7669 		for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) {
7670 			if (num_formats >= max_formats)
7671 				break;
7672 
7673 			formats[num_formats++] = cursor_formats[i];
7674 		}
7675 		break;
7676 	}
7677 
7678 	return num_formats;
7679 }
7680 
7681 static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
7682 				struct drm_plane *plane,
7683 				unsigned long possible_crtcs,
7684 				const struct dc_plane_cap *plane_cap)
7685 {
7686 	uint32_t formats[32];
7687 	int num_formats;
7688 	int res = -EPERM;
7689 	unsigned int supported_rotations;
7690 	uint64_t *modifiers = NULL;
7691 
7692 	num_formats = get_plane_formats(plane, plane_cap, formats,
7693 					ARRAY_SIZE(formats));
7694 
7695 	res = get_plane_modifiers(dm->adev, plane->type, &modifiers);
7696 	if (res)
7697 		return res;
7698 
7699 	res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs,
7700 				       &dm_plane_funcs, formats, num_formats,
7701 				       modifiers, plane->type, NULL);
7702 	kfree(modifiers);
7703 	if (res)
7704 		return res;
7705 
7706 	if (plane->type == DRM_PLANE_TYPE_OVERLAY &&
7707 	    plane_cap && plane_cap->per_pixel_alpha) {
7708 		unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
7709 					  BIT(DRM_MODE_BLEND_PREMULTI);
7710 
7711 		drm_plane_create_alpha_property(plane);
7712 		drm_plane_create_blend_mode_property(plane, blend_caps);
7713 	}
7714 
7715 	if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
7716 	    plane_cap &&
7717 	    (plane_cap->pixel_format_support.nv12 ||
7718 	     plane_cap->pixel_format_support.p010)) {
7719 		/* This only affects YUV formats. */
7720 		drm_plane_create_color_properties(
7721 			plane,
7722 			BIT(DRM_COLOR_YCBCR_BT601) |
7723 			BIT(DRM_COLOR_YCBCR_BT709) |
7724 			BIT(DRM_COLOR_YCBCR_BT2020),
7725 			BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) |
7726 			BIT(DRM_COLOR_YCBCR_FULL_RANGE),
7727 			DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE);
7728 	}
7729 
7730 	supported_rotations =
7731 		DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 |
7732 		DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270;
7733 
7734 	if (dm->adev->asic_type >= CHIP_BONAIRE &&
7735 	    plane->type != DRM_PLANE_TYPE_CURSOR)
7736 		drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0,
7737 						   supported_rotations);
7738 
7739 	drm_plane_helper_add(plane, &dm_plane_helper_funcs);
7740 
7741 	/* Create (reset) the plane state */
7742 	if (plane->funcs->reset)
7743 		plane->funcs->reset(plane);
7744 
7745 	return 0;
7746 }
7747 
7748 static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
7749 			       struct drm_plane *plane,
7750 			       uint32_t crtc_index)
7751 {
7752 	struct amdgpu_crtc *acrtc = NULL;
7753 	struct drm_plane *cursor_plane;
7754 
7755 	int res = -ENOMEM;
7756 
7757 	cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL);
7758 	if (!cursor_plane)
7759 		goto fail;
7760 
7761 	cursor_plane->type = DRM_PLANE_TYPE_CURSOR;
7762 	res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL);
7763 
7764 	acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL);
7765 	if (!acrtc)
7766 		goto fail;
7767 
7768 	res = drm_crtc_init_with_planes(
7769 			dm->ddev,
7770 			&acrtc->base,
7771 			plane,
7772 			cursor_plane,
7773 			&amdgpu_dm_crtc_funcs, NULL);
7774 
7775 	if (res)
7776 		goto fail;
7777 
7778 	drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs);
7779 
7780 	/* Create (reset) the plane state */
7781 	if (acrtc->base.funcs->reset)
7782 		acrtc->base.funcs->reset(&acrtc->base);
7783 
7784 	acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size;
7785 	acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size;
7786 
7787 	acrtc->crtc_id = crtc_index;
7788 	acrtc->base.enabled = false;
7789 	acrtc->otg_inst = -1;
7790 
7791 	dm->adev->mode_info.crtcs[crtc_index] = acrtc;
7792 	drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
7793 				   true, MAX_COLOR_LUT_ENTRIES);
7794 	drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
7795 
7796 	return 0;
7797 
7798 fail:
7799 	kfree(acrtc);
7800 	kfree(cursor_plane);
7801 	return res;
7802 }
7803 
7804 
7805 static int to_drm_connector_type(enum amd_signal_type st)
7806 {
7807 	switch (st) {
7808 	case SIGNAL_TYPE_HDMI_TYPE_A:
7809 		return DRM_MODE_CONNECTOR_HDMIA;
7810 	case SIGNAL_TYPE_EDP:
7811 		return DRM_MODE_CONNECTOR_eDP;
7812 	case SIGNAL_TYPE_LVDS:
7813 		return DRM_MODE_CONNECTOR_LVDS;
7814 	case SIGNAL_TYPE_RGB:
7815 		return DRM_MODE_CONNECTOR_VGA;
7816 	case SIGNAL_TYPE_DISPLAY_PORT:
7817 	case SIGNAL_TYPE_DISPLAY_PORT_MST:
7818 		return DRM_MODE_CONNECTOR_DisplayPort;
7819 	case SIGNAL_TYPE_DVI_DUAL_LINK:
7820 	case SIGNAL_TYPE_DVI_SINGLE_LINK:
7821 		return DRM_MODE_CONNECTOR_DVID;
7822 	case SIGNAL_TYPE_VIRTUAL:
7823 		return DRM_MODE_CONNECTOR_VIRTUAL;
7824 
7825 	default:
7826 		return DRM_MODE_CONNECTOR_Unknown;
7827 	}
7828 }
7829 
7830 static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector)
7831 {
7832 	struct drm_encoder *encoder;
7833 
7834 	/* There is only one encoder per connector */
7835 	drm_connector_for_each_possible_encoder(connector, encoder)
7836 		return encoder;
7837 
7838 	return NULL;
7839 }
7840 
7841 static void amdgpu_dm_get_native_mode(struct drm_connector *connector)
7842 {
7843 	struct drm_encoder *encoder;
7844 	struct amdgpu_encoder *amdgpu_encoder;
7845 
7846 	encoder = amdgpu_dm_connector_to_encoder(connector);
7847 
7848 	if (encoder == NULL)
7849 		return;
7850 
7851 	amdgpu_encoder = to_amdgpu_encoder(encoder);
7852 
7853 	amdgpu_encoder->native_mode.clock = 0;
7854 
7855 	if (!list_empty(&connector->probed_modes)) {
7856 		struct drm_display_mode *preferred_mode = NULL;
7857 
7858 		list_for_each_entry(preferred_mode,
7859 				    &connector->probed_modes,
7860 				    head) {
7861 			if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED)
7862 				amdgpu_encoder->native_mode = *preferred_mode;
7863 
7864 			break;
7865 		}
7866 
7867 	}
7868 }
7869 
7870 static struct drm_display_mode *
7871 amdgpu_dm_create_common_mode(struct drm_encoder *encoder,
7872 			     char *name,
7873 			     int hdisplay, int vdisplay)
7874 {
7875 	struct drm_device *dev = encoder->dev;
7876 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7877 	struct drm_display_mode *mode = NULL;
7878 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7879 
7880 	mode = drm_mode_duplicate(dev, native_mode);
7881 
7882 	if (mode == NULL)
7883 		return NULL;
7884 
7885 	mode->hdisplay = hdisplay;
7886 	mode->vdisplay = vdisplay;
7887 	mode->type &= ~DRM_MODE_TYPE_PREFERRED;
7888 #ifdef __linux__
7889 	strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7890 #else
7891 	strncpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
7892 #endif
7893 
7894 	return mode;
7895 
7896 }
7897 
7898 static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder,
7899 						 struct drm_connector *connector)
7900 {
7901 	struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
7902 	struct drm_display_mode *mode = NULL;
7903 	struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode;
7904 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7905 				to_amdgpu_dm_connector(connector);
7906 	int i;
7907 	int n;
7908 	struct mode_size {
7909 		char name[DRM_DISPLAY_MODE_LEN];
7910 		int w;
7911 		int h;
7912 	} common_modes[] = {
7913 		{  "640x480",  640,  480},
7914 		{  "800x600",  800,  600},
7915 		{ "1024x768", 1024,  768},
7916 		{ "1280x720", 1280,  720},
7917 		{ "1280x800", 1280,  800},
7918 		{"1280x1024", 1280, 1024},
7919 		{ "1440x900", 1440,  900},
7920 		{"1680x1050", 1680, 1050},
7921 		{"1600x1200", 1600, 1200},
7922 		{"1920x1080", 1920, 1080},
7923 		{"1920x1200", 1920, 1200}
7924 	};
7925 
7926 	n = ARRAY_SIZE(common_modes);
7927 
7928 	for (i = 0; i < n; i++) {
7929 		struct drm_display_mode *curmode = NULL;
7930 		bool mode_existed = false;
7931 
7932 		if (common_modes[i].w > native_mode->hdisplay ||
7933 		    common_modes[i].h > native_mode->vdisplay ||
7934 		   (common_modes[i].w == native_mode->hdisplay &&
7935 		    common_modes[i].h == native_mode->vdisplay))
7936 			continue;
7937 
7938 		list_for_each_entry(curmode, &connector->probed_modes, head) {
7939 			if (common_modes[i].w == curmode->hdisplay &&
7940 			    common_modes[i].h == curmode->vdisplay) {
7941 				mode_existed = true;
7942 				break;
7943 			}
7944 		}
7945 
7946 		if (mode_existed)
7947 			continue;
7948 
7949 		mode = amdgpu_dm_create_common_mode(encoder,
7950 				common_modes[i].name, common_modes[i].w,
7951 				common_modes[i].h);
7952 		if (!mode)
7953 			continue;
7954 
7955 		drm_mode_probed_add(connector, mode);
7956 		amdgpu_dm_connector->num_modes++;
7957 	}
7958 }
7959 
7960 static void amdgpu_set_panel_orientation(struct drm_connector *connector)
7961 {
7962 	struct drm_encoder *encoder;
7963 	struct amdgpu_encoder *amdgpu_encoder;
7964 	const struct drm_display_mode *native_mode;
7965 
7966 	if (connector->connector_type != DRM_MODE_CONNECTOR_eDP &&
7967 	    connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
7968 		return;
7969 
7970 	encoder = amdgpu_dm_connector_to_encoder(connector);
7971 	if (!encoder)
7972 		return;
7973 
7974 	amdgpu_encoder = to_amdgpu_encoder(encoder);
7975 
7976 	native_mode = &amdgpu_encoder->native_mode;
7977 	if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0)
7978 		return;
7979 
7980 	drm_connector_set_panel_orientation_with_quirk(connector,
7981 						       DRM_MODE_PANEL_ORIENTATION_UNKNOWN,
7982 						       native_mode->hdisplay,
7983 						       native_mode->vdisplay);
7984 }
7985 
7986 static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector,
7987 					      struct edid *edid)
7988 {
7989 	struct amdgpu_dm_connector *amdgpu_dm_connector =
7990 			to_amdgpu_dm_connector(connector);
7991 
7992 	if (edid) {
7993 		/* empty probed_modes */
7994 		INIT_LIST_HEAD(&connector->probed_modes);
7995 		amdgpu_dm_connector->num_modes =
7996 				drm_add_edid_modes(connector, edid);
7997 
7998 		/* sorting the probed modes before calling function
7999 		 * amdgpu_dm_get_native_mode() since EDID can have
8000 		 * more than one preferred mode. The modes that are
8001 		 * later in the probed mode list could be of higher
8002 		 * and preferred resolution. For example, 3840x2160
8003 		 * resolution in base EDID preferred timing and 4096x2160
8004 		 * preferred resolution in DID extension block later.
8005 		 */
8006 		drm_mode_sort(&connector->probed_modes);
8007 		amdgpu_dm_get_native_mode(connector);
8008 
8009 		/* Freesync capabilities are reset by calling
8010 		 * drm_add_edid_modes() and need to be
8011 		 * restored here.
8012 		 */
8013 		amdgpu_dm_update_freesync_caps(connector, edid);
8014 
8015 		amdgpu_set_panel_orientation(connector);
8016 	} else {
8017 		amdgpu_dm_connector->num_modes = 0;
8018 	}
8019 }
8020 
8021 static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector,
8022 			      struct drm_display_mode *mode)
8023 {
8024 	struct drm_display_mode *m;
8025 
8026 	list_for_each_entry (m, &aconnector->base.probed_modes, head) {
8027 		if (drm_mode_equal(m, mode))
8028 			return true;
8029 	}
8030 
8031 	return false;
8032 }
8033 
8034 static uint add_fs_modes(struct amdgpu_dm_connector *aconnector)
8035 {
8036 	const struct drm_display_mode *m;
8037 	struct drm_display_mode *new_mode;
8038 	uint i;
8039 	uint32_t new_modes_count = 0;
8040 
8041 	/* Standard FPS values
8042 	 *
8043 	 * 23.976   - TV/NTSC
8044 	 * 24 	    - Cinema
8045 	 * 25 	    - TV/PAL
8046 	 * 29.97    - TV/NTSC
8047 	 * 30 	    - TV/NTSC
8048 	 * 48 	    - Cinema HFR
8049 	 * 50 	    - TV/PAL
8050 	 * 60 	    - Commonly used
8051 	 * 48,72,96 - Multiples of 24
8052 	 */
8053 	static const uint32_t common_rates[] = {
8054 		23976, 24000, 25000, 29970, 30000,
8055 		48000, 50000, 60000, 72000, 96000
8056 	};
8057 
8058 	/*
8059 	 * Find mode with highest refresh rate with the same resolution
8060 	 * as the preferred mode. Some monitors report a preferred mode
8061 	 * with lower resolution than the highest refresh rate supported.
8062 	 */
8063 
8064 	m = get_highest_refresh_rate_mode(aconnector, true);
8065 	if (!m)
8066 		return 0;
8067 
8068 	for (i = 0; i < ARRAY_SIZE(common_rates); i++) {
8069 		uint64_t target_vtotal, target_vtotal_diff;
8070 		uint64_t num, den;
8071 
8072 		if (drm_mode_vrefresh(m) * 1000 < common_rates[i])
8073 			continue;
8074 
8075 		if (common_rates[i] < aconnector->min_vfreq * 1000 ||
8076 		    common_rates[i] > aconnector->max_vfreq * 1000)
8077 			continue;
8078 
8079 		num = (unsigned long long)m->clock * 1000 * 1000;
8080 		den = common_rates[i] * (unsigned long long)m->htotal;
8081 		target_vtotal = div_u64(num, den);
8082 		target_vtotal_diff = target_vtotal - m->vtotal;
8083 
8084 		/* Check for illegal modes */
8085 		if (m->vsync_start + target_vtotal_diff < m->vdisplay ||
8086 		    m->vsync_end + target_vtotal_diff < m->vsync_start ||
8087 		    m->vtotal + target_vtotal_diff < m->vsync_end)
8088 			continue;
8089 
8090 		new_mode = drm_mode_duplicate(aconnector->base.dev, m);
8091 		if (!new_mode)
8092 			goto out;
8093 
8094 		new_mode->vtotal += (u16)target_vtotal_diff;
8095 		new_mode->vsync_start += (u16)target_vtotal_diff;
8096 		new_mode->vsync_end += (u16)target_vtotal_diff;
8097 		new_mode->type &= ~DRM_MODE_TYPE_PREFERRED;
8098 		new_mode->type |= DRM_MODE_TYPE_DRIVER;
8099 
8100 		if (!is_duplicate_mode(aconnector, new_mode)) {
8101 			drm_mode_probed_add(&aconnector->base, new_mode);
8102 			new_modes_count += 1;
8103 		} else
8104 			drm_mode_destroy(aconnector->base.dev, new_mode);
8105 	}
8106  out:
8107 	return new_modes_count;
8108 }
8109 
8110 static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector,
8111 						   struct edid *edid)
8112 {
8113 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8114 		to_amdgpu_dm_connector(connector);
8115 
8116 	if (!(amdgpu_freesync_vid_mode && edid))
8117 		return;
8118 
8119 	if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
8120 		amdgpu_dm_connector->num_modes +=
8121 			add_fs_modes(amdgpu_dm_connector);
8122 }
8123 
8124 static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
8125 {
8126 	struct amdgpu_dm_connector *amdgpu_dm_connector =
8127 			to_amdgpu_dm_connector(connector);
8128 	struct drm_encoder *encoder;
8129 	struct edid *edid = amdgpu_dm_connector->edid;
8130 
8131 	encoder = amdgpu_dm_connector_to_encoder(connector);
8132 
8133 	if (!drm_edid_is_valid(edid)) {
8134 		amdgpu_dm_connector->num_modes =
8135 				drm_add_modes_noedid(connector, 640, 480);
8136 	} else {
8137 		amdgpu_dm_connector_ddc_get_modes(connector, edid);
8138 		amdgpu_dm_connector_add_common_modes(encoder, connector);
8139 		amdgpu_dm_connector_add_freesync_modes(connector, edid);
8140 	}
8141 	amdgpu_dm_fbc_init(connector);
8142 
8143 	return amdgpu_dm_connector->num_modes;
8144 }
8145 
8146 void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
8147 				     struct amdgpu_dm_connector *aconnector,
8148 				     int connector_type,
8149 				     struct dc_link *link,
8150 				     int link_index)
8151 {
8152 	struct amdgpu_device *adev = drm_to_adev(dm->ddev);
8153 
8154 	/*
8155 	 * Some of the properties below require access to state, like bpc.
8156 	 * Allocate some default initial connector state with our reset helper.
8157 	 */
8158 	if (aconnector->base.funcs->reset)
8159 		aconnector->base.funcs->reset(&aconnector->base);
8160 
8161 	aconnector->connector_id = link_index;
8162 	aconnector->dc_link = link;
8163 	aconnector->base.interlace_allowed = false;
8164 	aconnector->base.doublescan_allowed = false;
8165 	aconnector->base.stereo_allowed = false;
8166 	aconnector->base.dpms = DRM_MODE_DPMS_OFF;
8167 	aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */
8168 	aconnector->audio_inst = -1;
8169 	rw_init(&aconnector->hpd_lock, "dmhpd");
8170 
8171 	/*
8172 	 * configure support HPD hot plug connector_>polled default value is 0
8173 	 * which means HPD hot plug not supported
8174 	 */
8175 	switch (connector_type) {
8176 	case DRM_MODE_CONNECTOR_HDMIA:
8177 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8178 		aconnector->base.ycbcr_420_allowed =
8179 			link->link_enc->features.hdmi_ycbcr420_supported ? true : false;
8180 		break;
8181 	case DRM_MODE_CONNECTOR_DisplayPort:
8182 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8183 		aconnector->base.ycbcr_420_allowed =
8184 			link->link_enc->features.dp_ycbcr420_supported ? true : false;
8185 		break;
8186 	case DRM_MODE_CONNECTOR_DVID:
8187 		aconnector->base.polled = DRM_CONNECTOR_POLL_HPD;
8188 		break;
8189 	default:
8190 		break;
8191 	}
8192 
8193 	drm_object_attach_property(&aconnector->base.base,
8194 				dm->ddev->mode_config.scaling_mode_property,
8195 				DRM_MODE_SCALE_NONE);
8196 
8197 	drm_object_attach_property(&aconnector->base.base,
8198 				adev->mode_info.underscan_property,
8199 				UNDERSCAN_OFF);
8200 	drm_object_attach_property(&aconnector->base.base,
8201 				adev->mode_info.underscan_hborder_property,
8202 				0);
8203 	drm_object_attach_property(&aconnector->base.base,
8204 				adev->mode_info.underscan_vborder_property,
8205 				0);
8206 
8207 	if (!aconnector->mst_port)
8208 		drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16);
8209 
8210 	/* This defaults to the max in the range, but we want 8bpc for non-edp. */
8211 	aconnector->base.state->max_bpc = (connector_type == DRM_MODE_CONNECTOR_eDP) ? 16 : 8;
8212 	aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc;
8213 
8214 	if (connector_type == DRM_MODE_CONNECTOR_eDP &&
8215 	    (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) {
8216 		drm_object_attach_property(&aconnector->base.base,
8217 				adev->mode_info.abm_level_property, 0);
8218 	}
8219 
8220 	if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
8221 	    connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
8222 	    connector_type == DRM_MODE_CONNECTOR_eDP) {
8223 		drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
8224 
8225 		if (!aconnector->mst_port)
8226 			drm_connector_attach_vrr_capable_property(&aconnector->base);
8227 
8228 #ifdef CONFIG_DRM_AMD_DC_HDCP
8229 		if (adev->dm.hdcp_workqueue)
8230 			drm_connector_attach_content_protection_property(&aconnector->base, true);
8231 #endif
8232 	}
8233 }
8234 
8235 static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
8236 			      struct i2c_msg *msgs, int num)
8237 {
8238 	struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap);
8239 	struct ddc_service *ddc_service = i2c->ddc_service;
8240 	struct i2c_command cmd;
8241 	int i;
8242 	int result = -EIO;
8243 
8244 	cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
8245 
8246 	if (!cmd.payloads)
8247 		return result;
8248 
8249 	cmd.number_of_payloads = num;
8250 	cmd.engine = I2C_COMMAND_ENGINE_DEFAULT;
8251 	cmd.speed = 100;
8252 
8253 	for (i = 0; i < num; i++) {
8254 		cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD);
8255 		cmd.payloads[i].address = msgs[i].addr;
8256 		cmd.payloads[i].length = msgs[i].len;
8257 		cmd.payloads[i].data = msgs[i].buf;
8258 	}
8259 
8260 	if (dc_submit_i2c(
8261 			ddc_service->ctx->dc,
8262 			ddc_service->ddc_pin->hw_info.ddc_channel,
8263 			&cmd))
8264 		result = num;
8265 
8266 	kfree(cmd.payloads);
8267 	return result;
8268 }
8269 
8270 static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap)
8271 {
8272 	return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
8273 }
8274 
8275 static const struct i2c_algorithm amdgpu_dm_i2c_algo = {
8276 	.master_xfer = amdgpu_dm_i2c_xfer,
8277 	.functionality = amdgpu_dm_i2c_func,
8278 };
8279 
8280 static struct amdgpu_i2c_adapter *
8281 create_i2c(struct ddc_service *ddc_service,
8282 	   int link_index,
8283 	   int *res)
8284 {
8285 	struct amdgpu_device *adev = ddc_service->ctx->driver_context;
8286 	struct amdgpu_i2c_adapter *i2c;
8287 
8288 	i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL);
8289 	if (!i2c)
8290 		return NULL;
8291 #ifdef notyet
8292 	i2c->base.owner = THIS_MODULE;
8293 	i2c->base.class = I2C_CLASS_DDC;
8294 	i2c->base.dev.parent = &adev->pdev->dev;
8295 #endif
8296 	i2c->base.algo = &amdgpu_dm_i2c_algo;
8297 	snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index);
8298 	i2c_set_adapdata(&i2c->base, i2c);
8299 	i2c->ddc_service = ddc_service;
8300 	i2c->ddc_service->ddc_pin->hw_info.ddc_channel = link_index;
8301 
8302 	return i2c;
8303 }
8304 
8305 
8306 /*
8307  * Note: this function assumes that dc_link_detect() was called for the
8308  * dc_link which will be represented by this aconnector.
8309  */
8310 static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
8311 				    struct amdgpu_dm_connector *aconnector,
8312 				    uint32_t link_index,
8313 				    struct amdgpu_encoder *aencoder)
8314 {
8315 	int res = 0;
8316 	int connector_type;
8317 	struct dc *dc = dm->dc;
8318 	struct dc_link *link = dc_get_link_at_index(dc, link_index);
8319 	struct amdgpu_i2c_adapter *i2c;
8320 
8321 	link->priv = aconnector;
8322 
8323 	DRM_DEBUG_DRIVER("%s()\n", __func__);
8324 
8325 	i2c = create_i2c(link->ddc, link->link_index, &res);
8326 	if (!i2c) {
8327 		DRM_ERROR("Failed to create i2c adapter data\n");
8328 		return -ENOMEM;
8329 	}
8330 
8331 	aconnector->i2c = i2c;
8332 	res = i2c_add_adapter(&i2c->base);
8333 
8334 	if (res) {
8335 		DRM_ERROR("Failed to register hw i2c %d\n", link->link_index);
8336 		goto out_free;
8337 	}
8338 
8339 	connector_type = to_drm_connector_type(link->connector_signal);
8340 
8341 	res = drm_connector_init_with_ddc(
8342 			dm->ddev,
8343 			&aconnector->base,
8344 			&amdgpu_dm_connector_funcs,
8345 			connector_type,
8346 			&i2c->base);
8347 
8348 	if (res) {
8349 		DRM_ERROR("connector_init failed\n");
8350 		aconnector->connector_id = -1;
8351 		goto out_free;
8352 	}
8353 
8354 	drm_connector_helper_add(
8355 			&aconnector->base,
8356 			&amdgpu_dm_connector_helper_funcs);
8357 
8358 	amdgpu_dm_connector_init_helper(
8359 		dm,
8360 		aconnector,
8361 		connector_type,
8362 		link,
8363 		link_index);
8364 
8365 	drm_connector_attach_encoder(
8366 		&aconnector->base, &aencoder->base);
8367 
8368 	if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
8369 		|| connector_type == DRM_MODE_CONNECTOR_eDP)
8370 		amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index);
8371 
8372 out_free:
8373 	if (res) {
8374 		kfree(i2c);
8375 		aconnector->i2c = NULL;
8376 	}
8377 	return res;
8378 }
8379 
8380 int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev)
8381 {
8382 	switch (adev->mode_info.num_crtc) {
8383 	case 1:
8384 		return 0x1;
8385 	case 2:
8386 		return 0x3;
8387 	case 3:
8388 		return 0x7;
8389 	case 4:
8390 		return 0xf;
8391 	case 5:
8392 		return 0x1f;
8393 	case 6:
8394 	default:
8395 		return 0x3f;
8396 	}
8397 }
8398 
8399 static int amdgpu_dm_encoder_init(struct drm_device *dev,
8400 				  struct amdgpu_encoder *aencoder,
8401 				  uint32_t link_index)
8402 {
8403 	struct amdgpu_device *adev = drm_to_adev(dev);
8404 
8405 	int res = drm_encoder_init(dev,
8406 				   &aencoder->base,
8407 				   &amdgpu_dm_encoder_funcs,
8408 				   DRM_MODE_ENCODER_TMDS,
8409 				   NULL);
8410 
8411 	aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev);
8412 
8413 	if (!res)
8414 		aencoder->encoder_id = link_index;
8415 	else
8416 		aencoder->encoder_id = -1;
8417 
8418 	drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs);
8419 
8420 	return res;
8421 }
8422 
8423 static void manage_dm_interrupts(struct amdgpu_device *adev,
8424 				 struct amdgpu_crtc *acrtc,
8425 				 bool enable)
8426 {
8427 	/*
8428 	 * We have no guarantee that the frontend index maps to the same
8429 	 * backend index - some even map to more than one.
8430 	 *
8431 	 * TODO: Use a different interrupt or check DC itself for the mapping.
8432 	 */
8433 	int irq_type =
8434 		amdgpu_display_crtc_idx_to_irq_type(
8435 			adev,
8436 			acrtc->crtc_id);
8437 
8438 	if (enable) {
8439 		drm_crtc_vblank_on(&acrtc->base);
8440 		amdgpu_irq_get(
8441 			adev,
8442 			&adev->pageflip_irq,
8443 			irq_type);
8444 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8445 		amdgpu_irq_get(
8446 			adev,
8447 			&adev->vline0_irq,
8448 			irq_type);
8449 #endif
8450 	} else {
8451 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
8452 		amdgpu_irq_put(
8453 			adev,
8454 			&adev->vline0_irq,
8455 			irq_type);
8456 #endif
8457 		amdgpu_irq_put(
8458 			adev,
8459 			&adev->pageflip_irq,
8460 			irq_type);
8461 		drm_crtc_vblank_off(&acrtc->base);
8462 	}
8463 }
8464 
8465 static void dm_update_pflip_irq_state(struct amdgpu_device *adev,
8466 				      struct amdgpu_crtc *acrtc)
8467 {
8468 	int irq_type =
8469 		amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id);
8470 
8471 	/**
8472 	 * This reads the current state for the IRQ and force reapplies
8473 	 * the setting to hardware.
8474 	 */
8475 	amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type);
8476 }
8477 
8478 static bool
8479 is_scaling_state_different(const struct dm_connector_state *dm_state,
8480 			   const struct dm_connector_state *old_dm_state)
8481 {
8482 	if (dm_state->scaling != old_dm_state->scaling)
8483 		return true;
8484 	if (!dm_state->underscan_enable && old_dm_state->underscan_enable) {
8485 		if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0)
8486 			return true;
8487 	} else  if (dm_state->underscan_enable && !old_dm_state->underscan_enable) {
8488 		if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0)
8489 			return true;
8490 	} else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder ||
8491 		   dm_state->underscan_vborder != old_dm_state->underscan_vborder)
8492 		return true;
8493 	return false;
8494 }
8495 
8496 #ifdef CONFIG_DRM_AMD_DC_HDCP
8497 static bool is_content_protection_different(struct drm_connector_state *state,
8498 					    const struct drm_connector_state *old_state,
8499 					    const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w)
8500 {
8501 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
8502 	struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
8503 
8504 	/* Handle: Type0/1 change */
8505 	if (old_state->hdcp_content_type != state->hdcp_content_type &&
8506 	    state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
8507 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8508 		return true;
8509 	}
8510 
8511 	/* CP is being re enabled, ignore this
8512 	 *
8513 	 * Handles:	ENABLED -> DESIRED
8514 	 */
8515 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED &&
8516 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8517 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED;
8518 		return false;
8519 	}
8520 
8521 	/* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED
8522 	 *
8523 	 * Handles:	UNDESIRED -> ENABLED
8524 	 */
8525 	if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED &&
8526 	    state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED)
8527 		state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
8528 
8529 	/* Stream removed and re-enabled
8530 	 *
8531 	 * Can sometimes overlap with the HPD case,
8532 	 * thus set update_hdcp to false to avoid
8533 	 * setting HDCP multiple times.
8534 	 *
8535 	 * Handles:	DESIRED -> DESIRED (Special case)
8536 	 */
8537 	if (!(old_state->crtc && old_state->crtc->enabled) &&
8538 		state->crtc && state->crtc->enabled &&
8539 		connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) {
8540 		dm_con_state->update_hdcp = false;
8541 		return true;
8542 	}
8543 
8544 	/* Hot-plug, headless s3, dpms
8545 	 *
8546 	 * Only start HDCP if the display is connected/enabled.
8547 	 * update_hdcp flag will be set to false until the next
8548 	 * HPD comes in.
8549 	 *
8550 	 * Handles:	DESIRED -> DESIRED (Special case)
8551 	 */
8552 	if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
8553 	    connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) {
8554 		dm_con_state->update_hdcp = false;
8555 		return true;
8556 	}
8557 
8558 	/*
8559 	 * Handles:	UNDESIRED -> UNDESIRED
8560 	 *		DESIRED -> DESIRED
8561 	 *		ENABLED -> ENABLED
8562 	 */
8563 	if (old_state->content_protection == state->content_protection)
8564 		return false;
8565 
8566 	/*
8567 	 * Handles:	UNDESIRED -> DESIRED
8568 	 *		DESIRED -> UNDESIRED
8569 	 *		ENABLED -> UNDESIRED
8570 	 */
8571 	if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED)
8572 		return true;
8573 
8574 	/*
8575 	 * Handles:	DESIRED -> ENABLED
8576 	 */
8577 	return false;
8578 }
8579 
8580 #endif
8581 static void remove_stream(struct amdgpu_device *adev,
8582 			  struct amdgpu_crtc *acrtc,
8583 			  struct dc_stream_state *stream)
8584 {
8585 	/* this is the update mode case */
8586 
8587 	acrtc->otg_inst = -1;
8588 	acrtc->enabled = false;
8589 }
8590 
8591 static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc,
8592 			       struct dc_cursor_position *position)
8593 {
8594 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8595 	int x, y;
8596 	int xorigin = 0, yorigin = 0;
8597 
8598 	if (!crtc || !plane->state->fb)
8599 		return 0;
8600 
8601 	if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) ||
8602 	    (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) {
8603 		DRM_ERROR("%s: bad cursor width or height %d x %d\n",
8604 			  __func__,
8605 			  plane->state->crtc_w,
8606 			  plane->state->crtc_h);
8607 		return -EINVAL;
8608 	}
8609 
8610 	x = plane->state->crtc_x;
8611 	y = plane->state->crtc_y;
8612 
8613 	if (x <= -amdgpu_crtc->max_cursor_width ||
8614 	    y <= -amdgpu_crtc->max_cursor_height)
8615 		return 0;
8616 
8617 	if (x < 0) {
8618 		xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1);
8619 		x = 0;
8620 	}
8621 	if (y < 0) {
8622 		yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1);
8623 		y = 0;
8624 	}
8625 	position->enable = true;
8626 	position->translate_by_source = true;
8627 	position->x = x;
8628 	position->y = y;
8629 	position->x_hotspot = xorigin;
8630 	position->y_hotspot = yorigin;
8631 
8632 	return 0;
8633 }
8634 
8635 static void handle_cursor_update(struct drm_plane *plane,
8636 				 struct drm_plane_state *old_plane_state)
8637 {
8638 	struct amdgpu_device *adev = drm_to_adev(plane->dev);
8639 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb);
8640 	struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc;
8641 	struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL;
8642 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
8643 	uint64_t address = afb ? afb->address : 0;
8644 	struct dc_cursor_position position = {0};
8645 	struct dc_cursor_attributes attributes;
8646 	int ret;
8647 
8648 	if (!plane->state->fb && !old_plane_state->fb)
8649 		return;
8650 
8651 	DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n",
8652 		      __func__,
8653 		      amdgpu_crtc->crtc_id,
8654 		      plane->state->crtc_w,
8655 		      plane->state->crtc_h);
8656 
8657 	ret = get_cursor_position(plane, crtc, &position);
8658 	if (ret)
8659 		return;
8660 
8661 	if (!position.enable) {
8662 		/* turn off cursor */
8663 		if (crtc_state && crtc_state->stream) {
8664 			mutex_lock(&adev->dm.dc_lock);
8665 			dc_stream_set_cursor_position(crtc_state->stream,
8666 						      &position);
8667 			mutex_unlock(&adev->dm.dc_lock);
8668 		}
8669 		return;
8670 	}
8671 
8672 	amdgpu_crtc->cursor_width = plane->state->crtc_w;
8673 	amdgpu_crtc->cursor_height = plane->state->crtc_h;
8674 
8675 	memset(&attributes, 0, sizeof(attributes));
8676 	attributes.address.high_part = upper_32_bits(address);
8677 	attributes.address.low_part  = lower_32_bits(address);
8678 	attributes.width             = plane->state->crtc_w;
8679 	attributes.height            = plane->state->crtc_h;
8680 	attributes.color_format      = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA;
8681 	attributes.rotation_angle    = 0;
8682 	attributes.attribute_flags.value = 0;
8683 
8684 	attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0];
8685 
8686 	if (crtc_state->stream) {
8687 		mutex_lock(&adev->dm.dc_lock);
8688 		if (!dc_stream_set_cursor_attributes(crtc_state->stream,
8689 							 &attributes))
8690 			DRM_ERROR("DC failed to set cursor attributes\n");
8691 
8692 		if (!dc_stream_set_cursor_position(crtc_state->stream,
8693 						   &position))
8694 			DRM_ERROR("DC failed to set cursor position\n");
8695 		mutex_unlock(&adev->dm.dc_lock);
8696 	}
8697 }
8698 
8699 static void prepare_flip_isr(struct amdgpu_crtc *acrtc)
8700 {
8701 
8702 	assert_spin_locked(&acrtc->base.dev->event_lock);
8703 	WARN_ON(acrtc->event);
8704 
8705 	acrtc->event = acrtc->base.state->event;
8706 
8707 	/* Set the flip status */
8708 	acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED;
8709 
8710 	/* Mark this event as consumed */
8711 	acrtc->base.state->event = NULL;
8712 
8713 	DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n",
8714 		     acrtc->crtc_id);
8715 }
8716 
8717 static void update_freesync_state_on_stream(
8718 	struct amdgpu_display_manager *dm,
8719 	struct dm_crtc_state *new_crtc_state,
8720 	struct dc_stream_state *new_stream,
8721 	struct dc_plane_state *surface,
8722 	u32 flip_timestamp_in_us)
8723 {
8724 	struct mod_vrr_params vrr_params;
8725 	struct dc_info_packet vrr_infopacket = {0};
8726 	struct amdgpu_device *adev = dm->adev;
8727 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8728 	unsigned long flags;
8729 	bool pack_sdp_v1_3 = false;
8730 
8731 	if (!new_stream)
8732 		return;
8733 
8734 	/*
8735 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8736 	 * For now it's sufficient to just guard against these conditions.
8737 	 */
8738 
8739 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8740 		return;
8741 
8742 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8743         vrr_params = acrtc->dm_irq_params.vrr_params;
8744 
8745 	if (surface) {
8746 		mod_freesync_handle_preflip(
8747 			dm->freesync_module,
8748 			surface,
8749 			new_stream,
8750 			flip_timestamp_in_us,
8751 			&vrr_params);
8752 
8753 		if (adev->family < AMDGPU_FAMILY_AI &&
8754 		    amdgpu_dm_vrr_active(new_crtc_state)) {
8755 			mod_freesync_handle_v_update(dm->freesync_module,
8756 						     new_stream, &vrr_params);
8757 
8758 			/* Need to call this before the frame ends. */
8759 			dc_stream_adjust_vmin_vmax(dm->dc,
8760 						   new_crtc_state->stream,
8761 						   &vrr_params.adjust);
8762 		}
8763 	}
8764 
8765 	mod_freesync_build_vrr_infopacket(
8766 		dm->freesync_module,
8767 		new_stream,
8768 		&vrr_params,
8769 		PACKET_TYPE_VRR,
8770 		TRANSFER_FUNC_UNKNOWN,
8771 		&vrr_infopacket,
8772 		pack_sdp_v1_3);
8773 
8774 	new_crtc_state->freesync_timing_changed |=
8775 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8776 			&vrr_params.adjust,
8777 			sizeof(vrr_params.adjust)) != 0);
8778 
8779 	new_crtc_state->freesync_vrr_info_changed |=
8780 		(memcmp(&new_crtc_state->vrr_infopacket,
8781 			&vrr_infopacket,
8782 			sizeof(vrr_infopacket)) != 0);
8783 
8784 	acrtc->dm_irq_params.vrr_params = vrr_params;
8785 	new_crtc_state->vrr_infopacket = vrr_infopacket;
8786 
8787 	new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust;
8788 	new_stream->vrr_infopacket = vrr_infopacket;
8789 
8790 	if (new_crtc_state->freesync_vrr_info_changed)
8791 		DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d",
8792 			      new_crtc_state->base.crtc->base.id,
8793 			      (int)new_crtc_state->base.vrr_enabled,
8794 			      (int)vrr_params.state);
8795 
8796 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8797 }
8798 
8799 static void update_stream_irq_parameters(
8800 	struct amdgpu_display_manager *dm,
8801 	struct dm_crtc_state *new_crtc_state)
8802 {
8803 	struct dc_stream_state *new_stream = new_crtc_state->stream;
8804 	struct mod_vrr_params vrr_params;
8805 	struct mod_freesync_config config = new_crtc_state->freesync_config;
8806 	struct amdgpu_device *adev = dm->adev;
8807 	struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc);
8808 	unsigned long flags;
8809 
8810 	if (!new_stream)
8811 		return;
8812 
8813 	/*
8814 	 * TODO: Determine why min/max totals and vrefresh can be 0 here.
8815 	 * For now it's sufficient to just guard against these conditions.
8816 	 */
8817 	if (!new_stream->timing.h_total || !new_stream->timing.v_total)
8818 		return;
8819 
8820 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
8821 	vrr_params = acrtc->dm_irq_params.vrr_params;
8822 
8823 	if (new_crtc_state->vrr_supported &&
8824 	    config.min_refresh_in_uhz &&
8825 	    config.max_refresh_in_uhz) {
8826 		/*
8827 		 * if freesync compatible mode was set, config.state will be set
8828 		 * in atomic check
8829 		 */
8830 		if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz &&
8831 		    (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) ||
8832 		     new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) {
8833 			vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz;
8834 			vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz;
8835 			vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz;
8836 			vrr_params.state = VRR_STATE_ACTIVE_FIXED;
8837 		} else {
8838 			config.state = new_crtc_state->base.vrr_enabled ?
8839 						     VRR_STATE_ACTIVE_VARIABLE :
8840 						     VRR_STATE_INACTIVE;
8841 		}
8842 	} else {
8843 		config.state = VRR_STATE_UNSUPPORTED;
8844 	}
8845 
8846 	mod_freesync_build_vrr_params(dm->freesync_module,
8847 				      new_stream,
8848 				      &config, &vrr_params);
8849 
8850 	new_crtc_state->freesync_timing_changed |=
8851 		(memcmp(&acrtc->dm_irq_params.vrr_params.adjust,
8852 			&vrr_params.adjust, sizeof(vrr_params.adjust)) != 0);
8853 
8854 	new_crtc_state->freesync_config = config;
8855 	/* Copy state for access from DM IRQ handler */
8856 	acrtc->dm_irq_params.freesync_config = config;
8857 	acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes;
8858 	acrtc->dm_irq_params.vrr_params = vrr_params;
8859 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
8860 }
8861 
8862 static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state,
8863 					    struct dm_crtc_state *new_state)
8864 {
8865 	bool old_vrr_active = amdgpu_dm_vrr_active(old_state);
8866 	bool new_vrr_active = amdgpu_dm_vrr_active(new_state);
8867 
8868 	if (!old_vrr_active && new_vrr_active) {
8869 		/* Transition VRR inactive -> active:
8870 		 * While VRR is active, we must not disable vblank irq, as a
8871 		 * reenable after disable would compute bogus vblank/pflip
8872 		 * timestamps if it likely happened inside display front-porch.
8873 		 *
8874 		 * We also need vupdate irq for the actual core vblank handling
8875 		 * at end of vblank.
8876 		 */
8877 		dm_set_vupdate_irq(new_state->base.crtc, true);
8878 		drm_crtc_vblank_get(new_state->base.crtc);
8879 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n",
8880 				 __func__, new_state->base.crtc->base.id);
8881 	} else if (old_vrr_active && !new_vrr_active) {
8882 		/* Transition VRR active -> inactive:
8883 		 * Allow vblank irq disable again for fixed refresh rate.
8884 		 */
8885 		dm_set_vupdate_irq(new_state->base.crtc, false);
8886 		drm_crtc_vblank_put(new_state->base.crtc);
8887 		DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n",
8888 				 __func__, new_state->base.crtc->base.id);
8889 	}
8890 }
8891 
8892 static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state)
8893 {
8894 	struct drm_plane *plane;
8895 	struct drm_plane_state *old_plane_state;
8896 	int i;
8897 
8898 	/*
8899 	 * TODO: Make this per-stream so we don't issue redundant updates for
8900 	 * commits with multiple streams.
8901 	 */
8902 	for_each_old_plane_in_state(state, plane, old_plane_state, i)
8903 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
8904 			handle_cursor_update(plane, old_plane_state);
8905 }
8906 
8907 static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
8908 				    struct dc_state *dc_state,
8909 				    struct drm_device *dev,
8910 				    struct amdgpu_display_manager *dm,
8911 				    struct drm_crtc *pcrtc,
8912 				    bool wait_for_vblank)
8913 {
8914 	uint32_t i;
8915 	uint64_t timestamp_ns;
8916 	struct drm_plane *plane;
8917 	struct drm_plane_state *old_plane_state, *new_plane_state;
8918 	struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc);
8919 	struct drm_crtc_state *new_pcrtc_state =
8920 			drm_atomic_get_new_crtc_state(state, pcrtc);
8921 	struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state);
8922 	struct dm_crtc_state *dm_old_crtc_state =
8923 			to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc));
8924 	int planes_count = 0, vpos, hpos;
8925 	long r;
8926 	unsigned long flags;
8927 	struct amdgpu_bo *abo;
8928 	uint32_t target_vblank, last_flip_vblank;
8929 	bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
8930 	bool pflip_present = false;
8931 	struct {
8932 		struct dc_surface_update surface_updates[MAX_SURFACES];
8933 		struct dc_plane_info plane_infos[MAX_SURFACES];
8934 		struct dc_scaling_info scaling_infos[MAX_SURFACES];
8935 		struct dc_flip_addrs flip_addrs[MAX_SURFACES];
8936 		struct dc_stream_update stream_update;
8937 	} *bundle;
8938 
8939 	bundle = kzalloc(sizeof(*bundle), GFP_KERNEL);
8940 
8941 	if (!bundle) {
8942 		dm_error("Failed to allocate update bundle\n");
8943 		goto cleanup;
8944 	}
8945 
8946 	/*
8947 	 * Disable the cursor first if we're disabling all the planes.
8948 	 * It'll remain on the screen after the planes are re-enabled
8949 	 * if we don't.
8950 	 */
8951 	if (acrtc_state->active_planes == 0)
8952 		amdgpu_dm_commit_cursors(state);
8953 
8954 	/* update planes when needed */
8955 	for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
8956 		struct drm_crtc *crtc = new_plane_state->crtc;
8957 		struct drm_crtc_state *new_crtc_state;
8958 		struct drm_framebuffer *fb = new_plane_state->fb;
8959 		struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb;
8960 		bool plane_needs_flip;
8961 		struct dc_plane_state *dc_plane;
8962 		struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state);
8963 
8964 		/* Cursor plane is handled after stream updates */
8965 		if (plane->type == DRM_PLANE_TYPE_CURSOR)
8966 			continue;
8967 
8968 		if (!fb || !crtc || pcrtc != crtc)
8969 			continue;
8970 
8971 		new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
8972 		if (!new_crtc_state->active)
8973 			continue;
8974 
8975 		dc_plane = dm_new_plane_state->dc_state;
8976 
8977 		bundle->surface_updates[planes_count].surface = dc_plane;
8978 		if (new_pcrtc_state->color_mgmt_changed) {
8979 			bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
8980 			bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func;
8981 			bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix;
8982 		}
8983 
8984 		fill_dc_scaling_info(new_plane_state,
8985 				     &bundle->scaling_infos[planes_count]);
8986 
8987 		bundle->surface_updates[planes_count].scaling_info =
8988 			&bundle->scaling_infos[planes_count];
8989 
8990 		plane_needs_flip = old_plane_state->fb && new_plane_state->fb;
8991 
8992 		pflip_present = pflip_present || plane_needs_flip;
8993 
8994 		if (!plane_needs_flip) {
8995 			planes_count += 1;
8996 			continue;
8997 		}
8998 
8999 		abo = gem_to_amdgpu_bo(fb->obj[0]);
9000 
9001 		/*
9002 		 * Wait for all fences on this FB. Do limited wait to avoid
9003 		 * deadlock during GPU reset when this fence will not signal
9004 		 * but we hold reservation lock for the BO.
9005 		 */
9006 		r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
9007 					  msecs_to_jiffies(5000));
9008 		if (unlikely(r <= 0))
9009 			DRM_ERROR("Waiting for fences timed out!");
9010 
9011 		fill_dc_plane_info_and_addr(
9012 			dm->adev, new_plane_state,
9013 			afb->tiling_flags,
9014 			&bundle->plane_infos[planes_count],
9015 			&bundle->flip_addrs[planes_count].address,
9016 			afb->tmz_surface, false);
9017 
9018 		DRM_DEBUG_ATOMIC("plane: id=%d dcc_en=%d\n",
9019 				 new_plane_state->plane->index,
9020 				 bundle->plane_infos[planes_count].dcc.enable);
9021 
9022 		bundle->surface_updates[planes_count].plane_info =
9023 			&bundle->plane_infos[planes_count];
9024 
9025 		/*
9026 		 * Only allow immediate flips for fast updates that don't
9027 		 * change FB pitch, DCC state, rotation or mirroing.
9028 		 */
9029 		bundle->flip_addrs[planes_count].flip_immediate =
9030 			crtc->state->async_flip &&
9031 			acrtc_state->update_type == UPDATE_TYPE_FAST;
9032 
9033 		timestamp_ns = ktime_get_ns();
9034 		bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000);
9035 		bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count];
9036 		bundle->surface_updates[planes_count].surface = dc_plane;
9037 
9038 		if (!bundle->surface_updates[planes_count].surface) {
9039 			DRM_ERROR("No surface for CRTC: id=%d\n",
9040 					acrtc_attach->crtc_id);
9041 			continue;
9042 		}
9043 
9044 		if (plane == pcrtc->primary)
9045 			update_freesync_state_on_stream(
9046 				dm,
9047 				acrtc_state,
9048 				acrtc_state->stream,
9049 				dc_plane,
9050 				bundle->flip_addrs[planes_count].flip_timestamp_in_us);
9051 
9052 		DRM_DEBUG_ATOMIC("%s Flipping to hi: 0x%x, low: 0x%x\n",
9053 				 __func__,
9054 				 bundle->flip_addrs[planes_count].address.grph.addr.high_part,
9055 				 bundle->flip_addrs[planes_count].address.grph.addr.low_part);
9056 
9057 		planes_count += 1;
9058 
9059 	}
9060 
9061 	if (pflip_present) {
9062 		if (!vrr_active) {
9063 			/* Use old throttling in non-vrr fixed refresh rate mode
9064 			 * to keep flip scheduling based on target vblank counts
9065 			 * working in a backwards compatible way, e.g., for
9066 			 * clients using the GLX_OML_sync_control extension or
9067 			 * DRI3/Present extension with defined target_msc.
9068 			 */
9069 			last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
9070 		}
9071 		else {
9072 			/* For variable refresh rate mode only:
9073 			 * Get vblank of last completed flip to avoid > 1 vrr
9074 			 * flips per video frame by use of throttling, but allow
9075 			 * flip programming anywhere in the possibly large
9076 			 * variable vrr vblank interval for fine-grained flip
9077 			 * timing control and more opportunity to avoid stutter
9078 			 * on late submission of flips.
9079 			 */
9080 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9081 			last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank;
9082 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9083 		}
9084 
9085 		target_vblank = last_flip_vblank + wait_for_vblank;
9086 
9087 		/*
9088 		 * Wait until we're out of the vertical blank period before the one
9089 		 * targeted by the flip
9090 		 */
9091 		while ((acrtc_attach->enabled &&
9092 			(amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id,
9093 							    0, &vpos, &hpos, NULL,
9094 							    NULL, &pcrtc->hwmode)
9095 			 & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
9096 			(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
9097 			(int)(target_vblank -
9098 			  amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
9099 			usleep_range(1000, 1100);
9100 		}
9101 
9102 		/**
9103 		 * Prepare the flip event for the pageflip interrupt to handle.
9104 		 *
9105 		 * This only works in the case where we've already turned on the
9106 		 * appropriate hardware blocks (eg. HUBP) so in the transition case
9107 		 * from 0 -> n planes we have to skip a hardware generated event
9108 		 * and rely on sending it from software.
9109 		 */
9110 		if (acrtc_attach->base.state->event &&
9111 		    acrtc_state->active_planes > 0) {
9112 			drm_crtc_vblank_get(pcrtc);
9113 
9114 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9115 
9116 			WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE);
9117 			prepare_flip_isr(acrtc_attach);
9118 
9119 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9120 		}
9121 
9122 		if (acrtc_state->stream) {
9123 			if (acrtc_state->freesync_vrr_info_changed)
9124 				bundle->stream_update.vrr_infopacket =
9125 					&acrtc_state->stream->vrr_infopacket;
9126 		}
9127 	}
9128 
9129 	/* Update the planes if changed or disable if we don't have any. */
9130 	if ((planes_count || acrtc_state->active_planes == 0) &&
9131 		acrtc_state->stream) {
9132 #if defined(CONFIG_DRM_AMD_DC_DCN)
9133 		/*
9134 		 * If PSR or idle optimizations are enabled then flush out
9135 		 * any pending work before hardware programming.
9136 		 */
9137 		if (dm->vblank_control_workqueue)
9138 			flush_workqueue(dm->vblank_control_workqueue);
9139 #endif
9140 
9141 		bundle->stream_update.stream = acrtc_state->stream;
9142 		if (new_pcrtc_state->mode_changed) {
9143 			bundle->stream_update.src = acrtc_state->stream->src;
9144 			bundle->stream_update.dst = acrtc_state->stream->dst;
9145 		}
9146 
9147 		if (new_pcrtc_state->color_mgmt_changed) {
9148 			/*
9149 			 * TODO: This isn't fully correct since we've actually
9150 			 * already modified the stream in place.
9151 			 */
9152 			bundle->stream_update.gamut_remap =
9153 				&acrtc_state->stream->gamut_remap_matrix;
9154 			bundle->stream_update.output_csc_transform =
9155 				&acrtc_state->stream->csc_color_matrix;
9156 			bundle->stream_update.out_transfer_func =
9157 				acrtc_state->stream->out_transfer_func;
9158 		}
9159 
9160 		acrtc_state->stream->abm_level = acrtc_state->abm_level;
9161 		if (acrtc_state->abm_level != dm_old_crtc_state->abm_level)
9162 			bundle->stream_update.abm_level = &acrtc_state->abm_level;
9163 
9164 		/*
9165 		 * If FreeSync state on the stream has changed then we need to
9166 		 * re-adjust the min/max bounds now that DC doesn't handle this
9167 		 * as part of commit.
9168 		 */
9169 		if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) {
9170 			spin_lock_irqsave(&pcrtc->dev->event_lock, flags);
9171 			dc_stream_adjust_vmin_vmax(
9172 				dm->dc, acrtc_state->stream,
9173 				&acrtc_attach->dm_irq_params.vrr_params.adjust);
9174 			spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
9175 		}
9176 		mutex_lock(&dm->dc_lock);
9177 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9178 				acrtc_state->stream->link->psr_settings.psr_allow_active)
9179 			amdgpu_dm_psr_disable(acrtc_state->stream);
9180 
9181 		dc_commit_updates_for_stream(dm->dc,
9182 						     bundle->surface_updates,
9183 						     planes_count,
9184 						     acrtc_state->stream,
9185 						     &bundle->stream_update,
9186 						     dc_state);
9187 
9188 		/**
9189 		 * Enable or disable the interrupts on the backend.
9190 		 *
9191 		 * Most pipes are put into power gating when unused.
9192 		 *
9193 		 * When power gating is enabled on a pipe we lose the
9194 		 * interrupt enablement state when power gating is disabled.
9195 		 *
9196 		 * So we need to update the IRQ control state in hardware
9197 		 * whenever the pipe turns on (since it could be previously
9198 		 * power gated) or off (since some pipes can't be power gated
9199 		 * on some ASICs).
9200 		 */
9201 		if (dm_old_crtc_state->active_planes != acrtc_state->active_planes)
9202 			dm_update_pflip_irq_state(drm_to_adev(dev),
9203 						  acrtc_attach);
9204 
9205 		if ((acrtc_state->update_type > UPDATE_TYPE_FAST) &&
9206 				acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9207 				!acrtc_state->stream->link->psr_settings.psr_feature_enabled)
9208 			amdgpu_dm_link_setup_psr(acrtc_state->stream);
9209 
9210 		/* Decrement skip count when PSR is enabled and we're doing fast updates. */
9211 		if (acrtc_state->update_type == UPDATE_TYPE_FAST &&
9212 		    acrtc_state->stream->link->psr_settings.psr_feature_enabled) {
9213 			struct amdgpu_dm_connector *aconn =
9214 				(struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context;
9215 
9216 			if (aconn->psr_skip_count > 0)
9217 				aconn->psr_skip_count--;
9218 
9219 			/* Allow PSR when skip count is 0. */
9220 			acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count;
9221 		} else {
9222 			acrtc_attach->dm_irq_params.allow_psr_entry = false;
9223 		}
9224 
9225 		mutex_unlock(&dm->dc_lock);
9226 	}
9227 
9228 	/*
9229 	 * Update cursor state *after* programming all the planes.
9230 	 * This avoids redundant programming in the case where we're going
9231 	 * to be disabling a single plane - those pipes are being disabled.
9232 	 */
9233 	if (acrtc_state->active_planes)
9234 		amdgpu_dm_commit_cursors(state);
9235 
9236 cleanup:
9237 	kfree(bundle);
9238 }
9239 
9240 static void amdgpu_dm_commit_audio(struct drm_device *dev,
9241 				   struct drm_atomic_state *state)
9242 {
9243 	struct amdgpu_device *adev = drm_to_adev(dev);
9244 	struct amdgpu_dm_connector *aconnector;
9245 	struct drm_connector *connector;
9246 	struct drm_connector_state *old_con_state, *new_con_state;
9247 	struct drm_crtc_state *new_crtc_state;
9248 	struct dm_crtc_state *new_dm_crtc_state;
9249 	const struct dc_stream_status *status;
9250 	int i, inst;
9251 
9252 	/* Notify device removals. */
9253 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9254 		if (old_con_state->crtc != new_con_state->crtc) {
9255 			/* CRTC changes require notification. */
9256 			goto notify;
9257 		}
9258 
9259 		if (!new_con_state->crtc)
9260 			continue;
9261 
9262 		new_crtc_state = drm_atomic_get_new_crtc_state(
9263 			state, new_con_state->crtc);
9264 
9265 		if (!new_crtc_state)
9266 			continue;
9267 
9268 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9269 			continue;
9270 
9271 	notify:
9272 		aconnector = to_amdgpu_dm_connector(connector);
9273 
9274 		mutex_lock(&adev->dm.audio_lock);
9275 		inst = aconnector->audio_inst;
9276 		aconnector->audio_inst = -1;
9277 		mutex_unlock(&adev->dm.audio_lock);
9278 
9279 		amdgpu_dm_audio_eld_notify(adev, inst);
9280 	}
9281 
9282 	/* Notify audio device additions. */
9283 	for_each_new_connector_in_state(state, connector, new_con_state, i) {
9284 		if (!new_con_state->crtc)
9285 			continue;
9286 
9287 		new_crtc_state = drm_atomic_get_new_crtc_state(
9288 			state, new_con_state->crtc);
9289 
9290 		if (!new_crtc_state)
9291 			continue;
9292 
9293 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
9294 			continue;
9295 
9296 		new_dm_crtc_state = to_dm_crtc_state(new_crtc_state);
9297 		if (!new_dm_crtc_state->stream)
9298 			continue;
9299 
9300 		status = dc_stream_get_status(new_dm_crtc_state->stream);
9301 		if (!status)
9302 			continue;
9303 
9304 		aconnector = to_amdgpu_dm_connector(connector);
9305 
9306 		mutex_lock(&adev->dm.audio_lock);
9307 		inst = status->audio_inst;
9308 		aconnector->audio_inst = inst;
9309 		mutex_unlock(&adev->dm.audio_lock);
9310 
9311 		amdgpu_dm_audio_eld_notify(adev, inst);
9312 	}
9313 }
9314 
9315 /*
9316  * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC
9317  * @crtc_state: the DRM CRTC state
9318  * @stream_state: the DC stream state.
9319  *
9320  * Copy the mirrored transient state flags from DRM, to DC. It is used to bring
9321  * a dc_stream_state's flags in sync with a drm_crtc_state's flags.
9322  */
9323 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
9324 						struct dc_stream_state *stream_state)
9325 {
9326 	stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
9327 }
9328 
9329 /**
9330  * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation.
9331  * @state: The atomic state to commit
9332  *
9333  * This will tell DC to commit the constructed DC state from atomic_check,
9334  * programming the hardware. Any failures here implies a hardware failure, since
9335  * atomic check should have filtered anything non-kosher.
9336  */
9337 static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
9338 {
9339 	struct drm_device *dev = state->dev;
9340 	struct amdgpu_device *adev = drm_to_adev(dev);
9341 	struct amdgpu_display_manager *dm = &adev->dm;
9342 	struct dm_atomic_state *dm_state;
9343 	struct dc_state *dc_state = NULL, *dc_state_temp = NULL;
9344 	uint32_t i, j;
9345 	struct drm_crtc *crtc;
9346 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
9347 	unsigned long flags;
9348 	bool wait_for_vblank = true;
9349 	struct drm_connector *connector;
9350 	struct drm_connector_state *old_con_state, *new_con_state;
9351 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
9352 	int crtc_disable_count = 0;
9353 	bool mode_set_reset_required = false;
9354 
9355 	trace_amdgpu_dm_atomic_commit_tail_begin(state);
9356 
9357 	drm_atomic_helper_update_legacy_modeset_state(dev, state);
9358 
9359 	dm_state = dm_atomic_get_new_state(state);
9360 	if (dm_state && dm_state->context) {
9361 		dc_state = dm_state->context;
9362 	} else {
9363 		/* No state changes, retain current state. */
9364 		dc_state_temp = dc_create_state(dm->dc);
9365 		ASSERT(dc_state_temp);
9366 		dc_state = dc_state_temp;
9367 		dc_resource_state_copy_construct_current(dm->dc, dc_state);
9368 	}
9369 
9370 	for_each_oldnew_crtc_in_state (state, crtc, old_crtc_state,
9371 				       new_crtc_state, i) {
9372 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9373 
9374 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9375 
9376 		if (old_crtc_state->active &&
9377 		    (!new_crtc_state->active ||
9378 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9379 			manage_dm_interrupts(adev, acrtc, false);
9380 			dc_stream_release(dm_old_crtc_state->stream);
9381 		}
9382 	}
9383 
9384 	drm_atomic_helper_calc_timestamping_constants(state);
9385 
9386 	/* update changed items */
9387 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9388 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9389 
9390 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9391 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9392 
9393 		DRM_DEBUG_ATOMIC(
9394 			"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
9395 			"planes_changed:%d, mode_changed:%d,active_changed:%d,"
9396 			"connectors_changed:%d\n",
9397 			acrtc->crtc_id,
9398 			new_crtc_state->enable,
9399 			new_crtc_state->active,
9400 			new_crtc_state->planes_changed,
9401 			new_crtc_state->mode_changed,
9402 			new_crtc_state->active_changed,
9403 			new_crtc_state->connectors_changed);
9404 
9405 		/* Disable cursor if disabling crtc */
9406 		if (old_crtc_state->active && !new_crtc_state->active) {
9407 			struct dc_cursor_position position;
9408 
9409 			memset(&position, 0, sizeof(position));
9410 			mutex_lock(&dm->dc_lock);
9411 			dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
9412 			mutex_unlock(&dm->dc_lock);
9413 		}
9414 
9415 		/* Copy all transient state flags into dc state */
9416 		if (dm_new_crtc_state->stream) {
9417 			amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base,
9418 							    dm_new_crtc_state->stream);
9419 		}
9420 
9421 		/* handles headless hotplug case, updating new_state and
9422 		 * aconnector as needed
9423 		 */
9424 
9425 		if (modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) {
9426 
9427 			DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc);
9428 
9429 			if (!dm_new_crtc_state->stream) {
9430 				/*
9431 				 * this could happen because of issues with
9432 				 * userspace notifications delivery.
9433 				 * In this case userspace tries to set mode on
9434 				 * display which is disconnected in fact.
9435 				 * dc_sink is NULL in this case on aconnector.
9436 				 * We expect reset mode will come soon.
9437 				 *
9438 				 * This can also happen when unplug is done
9439 				 * during resume sequence ended
9440 				 *
9441 				 * In this case, we want to pretend we still
9442 				 * have a sink to keep the pipe running so that
9443 				 * hw state is consistent with the sw state
9444 				 */
9445 				DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
9446 						__func__, acrtc->base.base.id);
9447 				continue;
9448 			}
9449 
9450 			if (dm_old_crtc_state->stream)
9451 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9452 
9453 			pm_runtime_get_noresume(dev->dev);
9454 
9455 			acrtc->enabled = true;
9456 			acrtc->hw_mode = new_crtc_state->mode;
9457 			crtc->hwmode = new_crtc_state->mode;
9458 			mode_set_reset_required = true;
9459 		} else if (modereset_required(new_crtc_state)) {
9460 			DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc);
9461 			/* i.e. reset mode */
9462 			if (dm_old_crtc_state->stream)
9463 				remove_stream(adev, acrtc, dm_old_crtc_state->stream);
9464 
9465 			mode_set_reset_required = true;
9466 		}
9467 	} /* for_each_crtc_in_state() */
9468 
9469 	if (dc_state) {
9470 		/* if there mode set or reset, disable eDP PSR */
9471 		if (mode_set_reset_required) {
9472 #if defined(CONFIG_DRM_AMD_DC_DCN)
9473 			if (dm->vblank_control_workqueue)
9474 				flush_workqueue(dm->vblank_control_workqueue);
9475 #endif
9476 			amdgpu_dm_psr_disable_all(dm);
9477 		}
9478 
9479 		dm_enable_per_frame_crtc_master_sync(dc_state);
9480 		mutex_lock(&dm->dc_lock);
9481 		WARN_ON(!dc_commit_state(dm->dc, dc_state));
9482 #if defined(CONFIG_DRM_AMD_DC_DCN)
9483                /* Allow idle optimization when vblank count is 0 for display off */
9484                if (dm->active_vblank_irq_count == 0)
9485                    dc_allow_idle_optimizations(dm->dc,true);
9486 #endif
9487 		mutex_unlock(&dm->dc_lock);
9488 	}
9489 
9490 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9491 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9492 
9493 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9494 
9495 		if (dm_new_crtc_state->stream != NULL) {
9496 			const struct dc_stream_status *status =
9497 					dc_stream_get_status(dm_new_crtc_state->stream);
9498 
9499 			if (!status)
9500 				status = dc_stream_get_status_from_state(dc_state,
9501 									 dm_new_crtc_state->stream);
9502 			if (!status)
9503 				DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc);
9504 			else
9505 				acrtc->otg_inst = status->primary_otg_inst;
9506 		}
9507 	}
9508 #ifdef CONFIG_DRM_AMD_DC_HDCP
9509 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9510 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9511 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9512 		struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9513 
9514 		new_crtc_state = NULL;
9515 
9516 		if (acrtc)
9517 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9518 
9519 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9520 
9521 		if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL &&
9522 		    connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) {
9523 			hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
9524 			new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
9525 			dm_new_con_state->update_hdcp = true;
9526 			continue;
9527 		}
9528 
9529 		if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue))
9530 			hdcp_update_display(
9531 				adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector,
9532 				new_con_state->hdcp_content_type,
9533 				new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED);
9534 	}
9535 #endif
9536 
9537 	/* Handle connector state changes */
9538 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
9539 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
9540 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
9541 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
9542 		struct dc_surface_update dummy_updates[MAX_SURFACES];
9543 		struct dc_stream_update stream_update;
9544 		struct dc_info_packet hdr_packet;
9545 		struct dc_stream_status *status = NULL;
9546 		bool abm_changed, hdr_changed, scaling_changed;
9547 
9548 		memset(&dummy_updates, 0, sizeof(dummy_updates));
9549 		memset(&stream_update, 0, sizeof(stream_update));
9550 
9551 		if (acrtc) {
9552 			new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base);
9553 			old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base);
9554 		}
9555 
9556 		/* Skip any modesets/resets */
9557 		if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state))
9558 			continue;
9559 
9560 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9561 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9562 
9563 		scaling_changed = is_scaling_state_different(dm_new_con_state,
9564 							     dm_old_con_state);
9565 
9566 		abm_changed = dm_new_crtc_state->abm_level !=
9567 			      dm_old_crtc_state->abm_level;
9568 
9569 		hdr_changed =
9570 			!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
9571 
9572 		if (!scaling_changed && !abm_changed && !hdr_changed)
9573 			continue;
9574 
9575 		stream_update.stream = dm_new_crtc_state->stream;
9576 		if (scaling_changed) {
9577 			update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
9578 					dm_new_con_state, dm_new_crtc_state->stream);
9579 
9580 			stream_update.src = dm_new_crtc_state->stream->src;
9581 			stream_update.dst = dm_new_crtc_state->stream->dst;
9582 		}
9583 
9584 		if (abm_changed) {
9585 			dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level;
9586 
9587 			stream_update.abm_level = &dm_new_crtc_state->abm_level;
9588 		}
9589 
9590 		if (hdr_changed) {
9591 			fill_hdr_info_packet(new_con_state, &hdr_packet);
9592 			stream_update.hdr_static_metadata = &hdr_packet;
9593 		}
9594 
9595 		status = dc_stream_get_status(dm_new_crtc_state->stream);
9596 
9597 		if (WARN_ON(!status))
9598 			continue;
9599 
9600 		WARN_ON(!status->plane_count);
9601 
9602 		/*
9603 		 * TODO: DC refuses to perform stream updates without a dc_surface_update.
9604 		 * Here we create an empty update on each plane.
9605 		 * To fix this, DC should permit updating only stream properties.
9606 		 */
9607 		for (j = 0; j < status->plane_count; j++)
9608 			dummy_updates[j].surface = status->plane_states[0];
9609 
9610 
9611 		mutex_lock(&dm->dc_lock);
9612 		dc_commit_updates_for_stream(dm->dc,
9613 						     dummy_updates,
9614 						     status->plane_count,
9615 						     dm_new_crtc_state->stream,
9616 						     &stream_update,
9617 						     dc_state);
9618 		mutex_unlock(&dm->dc_lock);
9619 	}
9620 
9621 	/* Count number of newly disabled CRTCs for dropping PM refs later. */
9622 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state,
9623 				      new_crtc_state, i) {
9624 		if (old_crtc_state->active && !new_crtc_state->active)
9625 			crtc_disable_count++;
9626 
9627 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9628 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
9629 
9630 		/* For freesync config update on crtc state and params for irq */
9631 		update_stream_irq_parameters(dm, dm_new_crtc_state);
9632 
9633 		/* Handle vrr on->off / off->on transitions */
9634 		amdgpu_dm_handle_vrr_transition(dm_old_crtc_state,
9635 						dm_new_crtc_state);
9636 	}
9637 
9638 	/**
9639 	 * Enable interrupts for CRTCs that are newly enabled or went through
9640 	 * a modeset. It was intentionally deferred until after the front end
9641 	 * state was modified to wait until the OTG was on and so the IRQ
9642 	 * handlers didn't access stale or invalid state.
9643 	 */
9644 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
9645 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
9646 #ifdef CONFIG_DEBUG_FS
9647 		bool configure_crc = false;
9648 		enum amdgpu_dm_pipe_crc_source cur_crc_src;
9649 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9650 		struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk;
9651 #endif
9652 		spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9653 		cur_crc_src = acrtc->dm_irq_params.crc_src;
9654 		spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9655 #endif
9656 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9657 
9658 		if (new_crtc_state->active &&
9659 		    (!old_crtc_state->active ||
9660 		     drm_atomic_crtc_needs_modeset(new_crtc_state))) {
9661 			dc_stream_retain(dm_new_crtc_state->stream);
9662 			acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
9663 			manage_dm_interrupts(adev, acrtc, true);
9664 
9665 #ifdef CONFIG_DEBUG_FS
9666 			/**
9667 			 * Frontend may have changed so reapply the CRC capture
9668 			 * settings for the stream.
9669 			 */
9670 			dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9671 
9672 			if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) {
9673 				configure_crc = true;
9674 #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY)
9675 				if (amdgpu_dm_crc_window_is_activated(crtc)) {
9676 					spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9677 					acrtc->dm_irq_params.crc_window.update_win = true;
9678 					acrtc->dm_irq_params.crc_window.skip_frame_cnt = 2;
9679 					spin_lock_irq(&crc_rd_wrk->crc_rd_work_lock);
9680 					crc_rd_wrk->crtc = crtc;
9681 					spin_unlock_irq(&crc_rd_wrk->crc_rd_work_lock);
9682 					spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9683 				}
9684 #endif
9685 			}
9686 
9687 			if (configure_crc)
9688 				if (amdgpu_dm_crtc_configure_crc_source(
9689 					crtc, dm_new_crtc_state, cur_crc_src))
9690 					DRM_DEBUG_DRIVER("Failed to configure crc source");
9691 #endif
9692 		}
9693 	}
9694 
9695 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j)
9696 		if (new_crtc_state->async_flip)
9697 			wait_for_vblank = false;
9698 
9699 	/* update planes when needed per crtc*/
9700 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) {
9701 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
9702 
9703 		if (dm_new_crtc_state->stream)
9704 			amdgpu_dm_commit_planes(state, dc_state, dev,
9705 						dm, crtc, wait_for_vblank);
9706 	}
9707 
9708 	/* Update audio instances for each connector. */
9709 	amdgpu_dm_commit_audio(dev, state);
9710 
9711 #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||		\
9712 	defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
9713 	/* restore the backlight level */
9714 	for (i = 0; i < dm->num_of_edps; i++) {
9715 		if (dm->backlight_dev[i] &&
9716 		    (dm->actual_brightness[i] != dm->brightness[i]))
9717 			amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]);
9718 	}
9719 #endif
9720 	/*
9721 	 * send vblank event on all events not handled in flip and
9722 	 * mark consumed event for drm_atomic_helper_commit_hw_done
9723 	 */
9724 	spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags);
9725 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
9726 
9727 		if (new_crtc_state->event)
9728 			drm_send_event_locked(dev, &new_crtc_state->event->base);
9729 
9730 		new_crtc_state->event = NULL;
9731 	}
9732 	spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags);
9733 
9734 	/* Signal HW programming completion */
9735 	drm_atomic_helper_commit_hw_done(state);
9736 
9737 	if (wait_for_vblank)
9738 		drm_atomic_helper_wait_for_flip_done(dev, state);
9739 
9740 	drm_atomic_helper_cleanup_planes(dev, state);
9741 
9742 	/* return the stolen vga memory back to VRAM */
9743 	if (!adev->mman.keep_stolen_vga_memory)
9744 		amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL);
9745 	amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
9746 
9747 	/*
9748 	 * Finally, drop a runtime PM reference for each newly disabled CRTC,
9749 	 * so we can put the GPU into runtime suspend if we're not driving any
9750 	 * displays anymore
9751 	 */
9752 	for (i = 0; i < crtc_disable_count; i++)
9753 		pm_runtime_put_autosuspend(dev->dev);
9754 	pm_runtime_mark_last_busy(dev->dev);
9755 
9756 	if (dc_state_temp)
9757 		dc_release_state(dc_state_temp);
9758 }
9759 
9760 
9761 static int dm_force_atomic_commit(struct drm_connector *connector)
9762 {
9763 	int ret = 0;
9764 	struct drm_device *ddev = connector->dev;
9765 	struct drm_atomic_state *state = drm_atomic_state_alloc(ddev);
9766 	struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9767 	struct drm_plane *plane = disconnected_acrtc->base.primary;
9768 	struct drm_connector_state *conn_state;
9769 	struct drm_crtc_state *crtc_state;
9770 	struct drm_plane_state *plane_state;
9771 
9772 	if (!state)
9773 		return -ENOMEM;
9774 
9775 	state->acquire_ctx = ddev->mode_config.acquire_ctx;
9776 
9777 	/* Construct an atomic state to restore previous display setting */
9778 
9779 	/*
9780 	 * Attach connectors to drm_atomic_state
9781 	 */
9782 	conn_state = drm_atomic_get_connector_state(state, connector);
9783 
9784 	ret = PTR_ERR_OR_ZERO(conn_state);
9785 	if (ret)
9786 		goto out;
9787 
9788 	/* Attach crtc to drm_atomic_state*/
9789 	crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base);
9790 
9791 	ret = PTR_ERR_OR_ZERO(crtc_state);
9792 	if (ret)
9793 		goto out;
9794 
9795 	/* force a restore */
9796 	crtc_state->mode_changed = true;
9797 
9798 	/* Attach plane to drm_atomic_state */
9799 	plane_state = drm_atomic_get_plane_state(state, plane);
9800 
9801 	ret = PTR_ERR_OR_ZERO(plane_state);
9802 	if (ret)
9803 		goto out;
9804 
9805 	/* Call commit internally with the state we just constructed */
9806 	ret = drm_atomic_commit(state);
9807 
9808 out:
9809 	drm_atomic_state_put(state);
9810 	if (ret)
9811 		DRM_ERROR("Restoring old state failed with %i\n", ret);
9812 
9813 	return ret;
9814 }
9815 
9816 /*
9817  * This function handles all cases when set mode does not come upon hotplug.
9818  * This includes when a display is unplugged then plugged back into the
9819  * same port and when running without usermode desktop manager supprot
9820  */
9821 void dm_restore_drm_connector_state(struct drm_device *dev,
9822 				    struct drm_connector *connector)
9823 {
9824 	struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
9825 	struct amdgpu_crtc *disconnected_acrtc;
9826 	struct dm_crtc_state *acrtc_state;
9827 
9828 	if (!aconnector->dc_sink || !connector->state || !connector->encoder)
9829 		return;
9830 
9831 	disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
9832 	if (!disconnected_acrtc)
9833 		return;
9834 
9835 	acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
9836 	if (!acrtc_state->stream)
9837 		return;
9838 
9839 	/*
9840 	 * If the previous sink is not released and different from the current,
9841 	 * we deduce we are in a state where we can not rely on usermode call
9842 	 * to turn on the display, so we do it here
9843 	 */
9844 	if (acrtc_state->stream->sink != aconnector->dc_sink)
9845 		dm_force_atomic_commit(&aconnector->base);
9846 }
9847 
9848 /*
9849  * Grabs all modesetting locks to serialize against any blocking commits,
9850  * Waits for completion of all non blocking commits.
9851  */
9852 static int do_aquire_global_lock(struct drm_device *dev,
9853 				 struct drm_atomic_state *state)
9854 {
9855 	struct drm_crtc *crtc;
9856 	struct drm_crtc_commit *commit;
9857 	long ret;
9858 
9859 	/*
9860 	 * Adding all modeset locks to aquire_ctx will
9861 	 * ensure that when the framework release it the
9862 	 * extra locks we are locking here will get released to
9863 	 */
9864 	ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx);
9865 	if (ret)
9866 		return ret;
9867 
9868 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
9869 		spin_lock(&crtc->commit_lock);
9870 		commit = list_first_entry_or_null(&crtc->commit_list,
9871 				struct drm_crtc_commit, commit_entry);
9872 		if (commit)
9873 			drm_crtc_commit_get(commit);
9874 		spin_unlock(&crtc->commit_lock);
9875 
9876 		if (!commit)
9877 			continue;
9878 
9879 		/*
9880 		 * Make sure all pending HW programming completed and
9881 		 * page flips done
9882 		 */
9883 		ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ);
9884 
9885 		if (ret > 0)
9886 			ret = wait_for_completion_interruptible_timeout(
9887 					&commit->flip_done, 10*HZ);
9888 
9889 		if (ret == 0)
9890 			DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done "
9891 				  "timed out\n", crtc->base.id, crtc->name);
9892 
9893 		drm_crtc_commit_put(commit);
9894 	}
9895 
9896 	return ret < 0 ? ret : 0;
9897 }
9898 
9899 static void get_freesync_config_for_crtc(
9900 	struct dm_crtc_state *new_crtc_state,
9901 	struct dm_connector_state *new_con_state)
9902 {
9903 	struct mod_freesync_config config = {0};
9904 	struct amdgpu_dm_connector *aconnector =
9905 			to_amdgpu_dm_connector(new_con_state->base.connector);
9906 	struct drm_display_mode *mode = &new_crtc_state->base.mode;
9907 	int vrefresh = drm_mode_vrefresh(mode);
9908 	bool fs_vid_mode = false;
9909 
9910 	new_crtc_state->vrr_supported = new_con_state->freesync_capable &&
9911 					vrefresh >= aconnector->min_vfreq &&
9912 					vrefresh <= aconnector->max_vfreq;
9913 
9914 	if (new_crtc_state->vrr_supported) {
9915 		new_crtc_state->stream->ignore_msa_timing_param = true;
9916 		fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED;
9917 
9918 		config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000;
9919 		config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000;
9920 		config.vsif_supported = true;
9921 		config.btr = true;
9922 
9923 		if (fs_vid_mode) {
9924 			config.state = VRR_STATE_ACTIVE_FIXED;
9925 			config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz;
9926 			goto out;
9927 		} else if (new_crtc_state->base.vrr_enabled) {
9928 			config.state = VRR_STATE_ACTIVE_VARIABLE;
9929 		} else {
9930 			config.state = VRR_STATE_INACTIVE;
9931 		}
9932 	}
9933 out:
9934 	new_crtc_state->freesync_config = config;
9935 }
9936 
9937 static void reset_freesync_config_for_crtc(
9938 	struct dm_crtc_state *new_crtc_state)
9939 {
9940 	new_crtc_state->vrr_supported = false;
9941 
9942 	memset(&new_crtc_state->vrr_infopacket, 0,
9943 	       sizeof(new_crtc_state->vrr_infopacket));
9944 }
9945 
9946 static bool
9947 is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state,
9948 				 struct drm_crtc_state *new_crtc_state)
9949 {
9950 	struct drm_display_mode old_mode, new_mode;
9951 
9952 	if (!old_crtc_state || !new_crtc_state)
9953 		return false;
9954 
9955 	old_mode = old_crtc_state->mode;
9956 	new_mode = new_crtc_state->mode;
9957 
9958 	if (old_mode.clock       == new_mode.clock &&
9959 	    old_mode.hdisplay    == new_mode.hdisplay &&
9960 	    old_mode.vdisplay    == new_mode.vdisplay &&
9961 	    old_mode.htotal      == new_mode.htotal &&
9962 	    old_mode.vtotal      != new_mode.vtotal &&
9963 	    old_mode.hsync_start == new_mode.hsync_start &&
9964 	    old_mode.vsync_start != new_mode.vsync_start &&
9965 	    old_mode.hsync_end   == new_mode.hsync_end &&
9966 	    old_mode.vsync_end   != new_mode.vsync_end &&
9967 	    old_mode.hskew       == new_mode.hskew &&
9968 	    old_mode.vscan       == new_mode.vscan &&
9969 	    (old_mode.vsync_end - old_mode.vsync_start) ==
9970 	    (new_mode.vsync_end - new_mode.vsync_start))
9971 		return true;
9972 
9973 	return false;
9974 }
9975 
9976 static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) {
9977 	uint64_t num, den, res;
9978 	struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base;
9979 
9980 	dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED;
9981 
9982 	num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000;
9983 	den = (unsigned long long)new_crtc_state->mode.htotal *
9984 	      (unsigned long long)new_crtc_state->mode.vtotal;
9985 
9986 	res = div_u64(num, den);
9987 	dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res;
9988 }
9989 
9990 static int dm_update_crtc_state(struct amdgpu_display_manager *dm,
9991 				struct drm_atomic_state *state,
9992 				struct drm_crtc *crtc,
9993 				struct drm_crtc_state *old_crtc_state,
9994 				struct drm_crtc_state *new_crtc_state,
9995 				bool enable,
9996 				bool *lock_and_validation_needed)
9997 {
9998 	struct dm_atomic_state *dm_state = NULL;
9999 	struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
10000 	struct dc_stream_state *new_stream;
10001 	int ret = 0;
10002 
10003 	/*
10004 	 * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set
10005 	 * update changed items
10006 	 */
10007 	struct amdgpu_crtc *acrtc = NULL;
10008 	struct amdgpu_dm_connector *aconnector = NULL;
10009 	struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL;
10010 	struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL;
10011 
10012 	new_stream = NULL;
10013 
10014 	dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10015 	dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10016 	acrtc = to_amdgpu_crtc(crtc);
10017 	aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc);
10018 
10019 	/* TODO This hack should go away */
10020 	if (aconnector && enable) {
10021 		/* Make sure fake sink is created in plug-in scenario */
10022 		drm_new_conn_state = drm_atomic_get_new_connector_state(state,
10023 							    &aconnector->base);
10024 		drm_old_conn_state = drm_atomic_get_old_connector_state(state,
10025 							    &aconnector->base);
10026 
10027 		if (IS_ERR(drm_new_conn_state)) {
10028 			ret = PTR_ERR_OR_ZERO(drm_new_conn_state);
10029 			goto fail;
10030 		}
10031 
10032 		dm_new_conn_state = to_dm_connector_state(drm_new_conn_state);
10033 		dm_old_conn_state = to_dm_connector_state(drm_old_conn_state);
10034 
10035 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10036 			goto skip_modeset;
10037 
10038 		new_stream = create_validate_stream_for_sink(aconnector,
10039 							     &new_crtc_state->mode,
10040 							     dm_new_conn_state,
10041 							     dm_old_crtc_state->stream);
10042 
10043 		/*
10044 		 * we can have no stream on ACTION_SET if a display
10045 		 * was disconnected during S3, in this case it is not an
10046 		 * error, the OS will be updated after detection, and
10047 		 * will do the right thing on next atomic commit
10048 		 */
10049 
10050 		if (!new_stream) {
10051 			DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n",
10052 					__func__, acrtc->base.base.id);
10053 			ret = -ENOMEM;
10054 			goto fail;
10055 		}
10056 
10057 		/*
10058 		 * TODO: Check VSDB bits to decide whether this should
10059 		 * be enabled or not.
10060 		 */
10061 		new_stream->triggered_crtc_reset.enabled =
10062 			dm->force_timing_sync;
10063 
10064 		dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10065 
10066 		ret = fill_hdr_info_packet(drm_new_conn_state,
10067 					   &new_stream->hdr_static_metadata);
10068 		if (ret)
10069 			goto fail;
10070 
10071 		/*
10072 		 * If we already removed the old stream from the context
10073 		 * (and set the new stream to NULL) then we can't reuse
10074 		 * the old stream even if the stream and scaling are unchanged.
10075 		 * We'll hit the BUG_ON and black screen.
10076 		 *
10077 		 * TODO: Refactor this function to allow this check to work
10078 		 * in all conditions.
10079 		 */
10080 		if (amdgpu_freesync_vid_mode &&
10081 		    dm_new_crtc_state->stream &&
10082 		    is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state))
10083 			goto skip_modeset;
10084 
10085 		if (dm_new_crtc_state->stream &&
10086 		    dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
10087 		    dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
10088 			new_crtc_state->mode_changed = false;
10089 			DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d",
10090 					 new_crtc_state->mode_changed);
10091 		}
10092 	}
10093 
10094 	/* mode_changed flag may get updated above, need to check again */
10095 	if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
10096 		goto skip_modeset;
10097 
10098 	DRM_DEBUG_ATOMIC(
10099 		"amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, "
10100 		"planes_changed:%d, mode_changed:%d,active_changed:%d,"
10101 		"connectors_changed:%d\n",
10102 		acrtc->crtc_id,
10103 		new_crtc_state->enable,
10104 		new_crtc_state->active,
10105 		new_crtc_state->planes_changed,
10106 		new_crtc_state->mode_changed,
10107 		new_crtc_state->active_changed,
10108 		new_crtc_state->connectors_changed);
10109 
10110 	/* Remove stream for any changed/disabled CRTC */
10111 	if (!enable) {
10112 
10113 		if (!dm_old_crtc_state->stream)
10114 			goto skip_modeset;
10115 
10116 		if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream &&
10117 		    is_timing_unchanged_for_freesync(new_crtc_state,
10118 						     old_crtc_state)) {
10119 			new_crtc_state->mode_changed = false;
10120 			DRM_DEBUG_DRIVER(
10121 				"Mode change not required for front porch change, "
10122 				"setting mode_changed to %d",
10123 				new_crtc_state->mode_changed);
10124 
10125 			set_freesync_fixed_config(dm_new_crtc_state);
10126 
10127 			goto skip_modeset;
10128 		} else if (amdgpu_freesync_vid_mode && aconnector &&
10129 			   is_freesync_video_mode(&new_crtc_state->mode,
10130 						  aconnector)) {
10131 			struct drm_display_mode *high_mode;
10132 
10133 			high_mode = get_highest_refresh_rate_mode(aconnector, false);
10134 			if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) {
10135 				set_freesync_fixed_config(dm_new_crtc_state);
10136 			}
10137 		}
10138 
10139 		ret = dm_atomic_get_state(state, &dm_state);
10140 		if (ret)
10141 			goto fail;
10142 
10143 		DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n",
10144 				crtc->base.id);
10145 
10146 		/* i.e. reset mode */
10147 		if (dc_remove_stream_from_ctx(
10148 				dm->dc,
10149 				dm_state->context,
10150 				dm_old_crtc_state->stream) != DC_OK) {
10151 			ret = -EINVAL;
10152 			goto fail;
10153 		}
10154 
10155 		dc_stream_release(dm_old_crtc_state->stream);
10156 		dm_new_crtc_state->stream = NULL;
10157 
10158 		reset_freesync_config_for_crtc(dm_new_crtc_state);
10159 
10160 		*lock_and_validation_needed = true;
10161 
10162 	} else {/* Add stream for any updated/enabled CRTC */
10163 		/*
10164 		 * Quick fix to prevent NULL pointer on new_stream when
10165 		 * added MST connectors not found in existing crtc_state in the chained mode
10166 		 * TODO: need to dig out the root cause of that
10167 		 */
10168 		if (!aconnector || (!aconnector->dc_sink && aconnector->mst_port))
10169 			goto skip_modeset;
10170 
10171 		if (modereset_required(new_crtc_state))
10172 			goto skip_modeset;
10173 
10174 		if (modeset_required(new_crtc_state, new_stream,
10175 				     dm_old_crtc_state->stream)) {
10176 
10177 			WARN_ON(dm_new_crtc_state->stream);
10178 
10179 			ret = dm_atomic_get_state(state, &dm_state);
10180 			if (ret)
10181 				goto fail;
10182 
10183 			dm_new_crtc_state->stream = new_stream;
10184 
10185 			dc_stream_retain(new_stream);
10186 
10187 			DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n",
10188 					 crtc->base.id);
10189 
10190 			if (dc_add_stream_to_ctx(
10191 					dm->dc,
10192 					dm_state->context,
10193 					dm_new_crtc_state->stream) != DC_OK) {
10194 				ret = -EINVAL;
10195 				goto fail;
10196 			}
10197 
10198 			*lock_and_validation_needed = true;
10199 		}
10200 	}
10201 
10202 skip_modeset:
10203 	/* Release extra reference */
10204 	if (new_stream)
10205 		 dc_stream_release(new_stream);
10206 
10207 	/*
10208 	 * We want to do dc stream updates that do not require a
10209 	 * full modeset below.
10210 	 */
10211 	if (!(enable && aconnector && new_crtc_state->active))
10212 		return 0;
10213 	/*
10214 	 * Given above conditions, the dc state cannot be NULL because:
10215 	 * 1. We're in the process of enabling CRTCs (just been added
10216 	 *    to the dc context, or already is on the context)
10217 	 * 2. Has a valid connector attached, and
10218 	 * 3. Is currently active and enabled.
10219 	 * => The dc stream state currently exists.
10220 	 */
10221 	BUG_ON(dm_new_crtc_state->stream == NULL);
10222 
10223 	/* Scaling or underscan settings */
10224 	if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) ||
10225 				drm_atomic_crtc_needs_modeset(new_crtc_state))
10226 		update_stream_scaling_settings(
10227 			&new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream);
10228 
10229 	/* ABM settings */
10230 	dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level;
10231 
10232 	/*
10233 	 * Color management settings. We also update color properties
10234 	 * when a modeset is needed, to ensure it gets reprogrammed.
10235 	 */
10236 	if (dm_new_crtc_state->base.color_mgmt_changed ||
10237 	    drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10238 		ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state);
10239 		if (ret)
10240 			goto fail;
10241 	}
10242 
10243 	/* Update Freesync settings. */
10244 	get_freesync_config_for_crtc(dm_new_crtc_state,
10245 				     dm_new_conn_state);
10246 
10247 	return ret;
10248 
10249 fail:
10250 	if (new_stream)
10251 		dc_stream_release(new_stream);
10252 	return ret;
10253 }
10254 
10255 static bool should_reset_plane(struct drm_atomic_state *state,
10256 			       struct drm_plane *plane,
10257 			       struct drm_plane_state *old_plane_state,
10258 			       struct drm_plane_state *new_plane_state)
10259 {
10260 	struct drm_plane *other;
10261 	struct drm_plane_state *old_other_state, *new_other_state;
10262 	struct drm_crtc_state *new_crtc_state;
10263 	int i;
10264 
10265 	/*
10266 	 * TODO: Remove this hack once the checks below are sufficient
10267 	 * enough to determine when we need to reset all the planes on
10268 	 * the stream.
10269 	 */
10270 	if (state->allow_modeset)
10271 		return true;
10272 
10273 	/* Exit early if we know that we're adding or removing the plane. */
10274 	if (old_plane_state->crtc != new_plane_state->crtc)
10275 		return true;
10276 
10277 	/* old crtc == new_crtc == NULL, plane not in context. */
10278 	if (!new_plane_state->crtc)
10279 		return false;
10280 
10281 	new_crtc_state =
10282 		drm_atomic_get_new_crtc_state(state, new_plane_state->crtc);
10283 
10284 	if (!new_crtc_state)
10285 		return true;
10286 
10287 	/* CRTC Degamma changes currently require us to recreate planes. */
10288 	if (new_crtc_state->color_mgmt_changed)
10289 		return true;
10290 
10291 	if (drm_atomic_crtc_needs_modeset(new_crtc_state))
10292 		return true;
10293 
10294 	/*
10295 	 * If there are any new primary or overlay planes being added or
10296 	 * removed then the z-order can potentially change. To ensure
10297 	 * correct z-order and pipe acquisition the current DC architecture
10298 	 * requires us to remove and recreate all existing planes.
10299 	 *
10300 	 * TODO: Come up with a more elegant solution for this.
10301 	 */
10302 	for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) {
10303 		struct amdgpu_framebuffer *old_afb, *new_afb;
10304 		if (other->type == DRM_PLANE_TYPE_CURSOR)
10305 			continue;
10306 
10307 		if (old_other_state->crtc != new_plane_state->crtc &&
10308 		    new_other_state->crtc != new_plane_state->crtc)
10309 			continue;
10310 
10311 		if (old_other_state->crtc != new_other_state->crtc)
10312 			return true;
10313 
10314 		/* Src/dst size and scaling updates. */
10315 		if (old_other_state->src_w != new_other_state->src_w ||
10316 		    old_other_state->src_h != new_other_state->src_h ||
10317 		    old_other_state->crtc_w != new_other_state->crtc_w ||
10318 		    old_other_state->crtc_h != new_other_state->crtc_h)
10319 			return true;
10320 
10321 		/* Rotation / mirroring updates. */
10322 		if (old_other_state->rotation != new_other_state->rotation)
10323 			return true;
10324 
10325 		/* Blending updates. */
10326 		if (old_other_state->pixel_blend_mode !=
10327 		    new_other_state->pixel_blend_mode)
10328 			return true;
10329 
10330 		/* Alpha updates. */
10331 		if (old_other_state->alpha != new_other_state->alpha)
10332 			return true;
10333 
10334 		/* Colorspace changes. */
10335 		if (old_other_state->color_range != new_other_state->color_range ||
10336 		    old_other_state->color_encoding != new_other_state->color_encoding)
10337 			return true;
10338 
10339 		/* Framebuffer checks fall at the end. */
10340 		if (!old_other_state->fb || !new_other_state->fb)
10341 			continue;
10342 
10343 		/* Pixel format changes can require bandwidth updates. */
10344 		if (old_other_state->fb->format != new_other_state->fb->format)
10345 			return true;
10346 
10347 		old_afb = (struct amdgpu_framebuffer *)old_other_state->fb;
10348 		new_afb = (struct amdgpu_framebuffer *)new_other_state->fb;
10349 
10350 		/* Tiling and DCC changes also require bandwidth updates. */
10351 		if (old_afb->tiling_flags != new_afb->tiling_flags ||
10352 		    old_afb->base.modifier != new_afb->base.modifier)
10353 			return true;
10354 	}
10355 
10356 	return false;
10357 }
10358 
10359 static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc,
10360 			      struct drm_plane_state *new_plane_state,
10361 			      struct drm_framebuffer *fb)
10362 {
10363 	struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev);
10364 	struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb);
10365 	unsigned int pitch;
10366 	bool linear;
10367 
10368 	if (fb->width > new_acrtc->max_cursor_width ||
10369 	    fb->height > new_acrtc->max_cursor_height) {
10370 		DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n",
10371 				 new_plane_state->fb->width,
10372 				 new_plane_state->fb->height);
10373 		return -EINVAL;
10374 	}
10375 	if (new_plane_state->src_w != fb->width << 16 ||
10376 	    new_plane_state->src_h != fb->height << 16) {
10377 		DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10378 		return -EINVAL;
10379 	}
10380 
10381 	/* Pitch in pixels */
10382 	pitch = fb->pitches[0] / fb->format->cpp[0];
10383 
10384 	if (fb->width != pitch) {
10385 		DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d",
10386 				 fb->width, pitch);
10387 		return -EINVAL;
10388 	}
10389 
10390 	switch (pitch) {
10391 	case 64:
10392 	case 128:
10393 	case 256:
10394 		/* FB pitch is supported by cursor plane */
10395 		break;
10396 	default:
10397 		DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch);
10398 		return -EINVAL;
10399 	}
10400 
10401 	/* Core DRM takes care of checking FB modifiers, so we only need to
10402 	 * check tiling flags when the FB doesn't have a modifier. */
10403 	if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) {
10404 		if (adev->family < AMDGPU_FAMILY_AI) {
10405 			linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 &&
10406 			         AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 &&
10407 				 AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0;
10408 		} else {
10409 			linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0;
10410 		}
10411 		if (!linear) {
10412 			DRM_DEBUG_ATOMIC("Cursor FB not linear");
10413 			return -EINVAL;
10414 		}
10415 	}
10416 
10417 	return 0;
10418 }
10419 
10420 static int dm_update_plane_state(struct dc *dc,
10421 				 struct drm_atomic_state *state,
10422 				 struct drm_plane *plane,
10423 				 struct drm_plane_state *old_plane_state,
10424 				 struct drm_plane_state *new_plane_state,
10425 				 bool enable,
10426 				 bool *lock_and_validation_needed)
10427 {
10428 
10429 	struct dm_atomic_state *dm_state = NULL;
10430 	struct drm_crtc *new_plane_crtc, *old_plane_crtc;
10431 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10432 	struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state;
10433 	struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state;
10434 	struct amdgpu_crtc *new_acrtc;
10435 	bool needs_reset;
10436 	int ret = 0;
10437 
10438 
10439 	new_plane_crtc = new_plane_state->crtc;
10440 	old_plane_crtc = old_plane_state->crtc;
10441 	dm_new_plane_state = to_dm_plane_state(new_plane_state);
10442 	dm_old_plane_state = to_dm_plane_state(old_plane_state);
10443 
10444 	if (plane->type == DRM_PLANE_TYPE_CURSOR) {
10445 		if (!enable || !new_plane_crtc ||
10446 			drm_atomic_plane_disabling(plane->state, new_plane_state))
10447 			return 0;
10448 
10449 		new_acrtc = to_amdgpu_crtc(new_plane_crtc);
10450 
10451 		if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) {
10452 			DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n");
10453 			return -EINVAL;
10454 		}
10455 
10456 		if (new_plane_state->fb) {
10457 			ret = dm_check_cursor_fb(new_acrtc, new_plane_state,
10458 						 new_plane_state->fb);
10459 			if (ret)
10460 				return ret;
10461 		}
10462 
10463 		return 0;
10464 	}
10465 
10466 	needs_reset = should_reset_plane(state, plane, old_plane_state,
10467 					 new_plane_state);
10468 
10469 	/* Remove any changed/removed planes */
10470 	if (!enable) {
10471 		if (!needs_reset)
10472 			return 0;
10473 
10474 		if (!old_plane_crtc)
10475 			return 0;
10476 
10477 		old_crtc_state = drm_atomic_get_old_crtc_state(
10478 				state, old_plane_crtc);
10479 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10480 
10481 		if (!dm_old_crtc_state->stream)
10482 			return 0;
10483 
10484 		DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n",
10485 				plane->base.id, old_plane_crtc->base.id);
10486 
10487 		ret = dm_atomic_get_state(state, &dm_state);
10488 		if (ret)
10489 			return ret;
10490 
10491 		if (!dc_remove_plane_from_context(
10492 				dc,
10493 				dm_old_crtc_state->stream,
10494 				dm_old_plane_state->dc_state,
10495 				dm_state->context)) {
10496 
10497 			return -EINVAL;
10498 		}
10499 
10500 
10501 		dc_plane_state_release(dm_old_plane_state->dc_state);
10502 		dm_new_plane_state->dc_state = NULL;
10503 
10504 		*lock_and_validation_needed = true;
10505 
10506 	} else { /* Add new planes */
10507 		struct dc_plane_state *dc_new_plane_state;
10508 
10509 		if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10510 			return 0;
10511 
10512 		if (!new_plane_crtc)
10513 			return 0;
10514 
10515 		new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc);
10516 		dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
10517 
10518 		if (!dm_new_crtc_state->stream)
10519 			return 0;
10520 
10521 		if (!needs_reset)
10522 			return 0;
10523 
10524 		ret = dm_plane_helper_check_state(new_plane_state, new_crtc_state);
10525 		if (ret)
10526 			return ret;
10527 
10528 		WARN_ON(dm_new_plane_state->dc_state);
10529 
10530 		dc_new_plane_state = dc_create_plane_state(dc);
10531 		if (!dc_new_plane_state)
10532 			return -ENOMEM;
10533 
10534 		DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n",
10535 				 plane->base.id, new_plane_crtc->base.id);
10536 
10537 		ret = fill_dc_plane_attributes(
10538 			drm_to_adev(new_plane_crtc->dev),
10539 			dc_new_plane_state,
10540 			new_plane_state,
10541 			new_crtc_state);
10542 		if (ret) {
10543 			dc_plane_state_release(dc_new_plane_state);
10544 			return ret;
10545 		}
10546 
10547 		ret = dm_atomic_get_state(state, &dm_state);
10548 		if (ret) {
10549 			dc_plane_state_release(dc_new_plane_state);
10550 			return ret;
10551 		}
10552 
10553 		/*
10554 		 * Any atomic check errors that occur after this will
10555 		 * not need a release. The plane state will be attached
10556 		 * to the stream, and therefore part of the atomic
10557 		 * state. It'll be released when the atomic state is
10558 		 * cleaned.
10559 		 */
10560 		if (!dc_add_plane_to_context(
10561 				dc,
10562 				dm_new_crtc_state->stream,
10563 				dc_new_plane_state,
10564 				dm_state->context)) {
10565 
10566 			dc_plane_state_release(dc_new_plane_state);
10567 			return -EINVAL;
10568 		}
10569 
10570 		dm_new_plane_state->dc_state = dc_new_plane_state;
10571 
10572 		/* Tell DC to do a full surface update every time there
10573 		 * is a plane change. Inefficient, but works for now.
10574 		 */
10575 		dm_new_plane_state->dc_state->update_flags.bits.full_update = 1;
10576 
10577 		*lock_and_validation_needed = true;
10578 	}
10579 
10580 
10581 	return ret;
10582 }
10583 
10584 static int dm_check_crtc_cursor(struct drm_atomic_state *state,
10585 				struct drm_crtc *crtc,
10586 				struct drm_crtc_state *new_crtc_state)
10587 {
10588 	struct drm_plane_state *new_cursor_state, *new_primary_state;
10589 	int cursor_scale_w, cursor_scale_h, primary_scale_w, primary_scale_h;
10590 
10591 	/* On DCE and DCN there is no dedicated hardware cursor plane. We get a
10592 	 * cursor per pipe but it's going to inherit the scaling and
10593 	 * positioning from the underlying pipe. Check the cursor plane's
10594 	 * blending properties match the primary plane's. */
10595 
10596 	new_cursor_state = drm_atomic_get_new_plane_state(state, crtc->cursor);
10597 	new_primary_state = drm_atomic_get_new_plane_state(state, crtc->primary);
10598 	if (!new_cursor_state || !new_primary_state ||
10599 	    !new_cursor_state->fb || !new_primary_state->fb) {
10600 		return 0;
10601 	}
10602 
10603 	cursor_scale_w = new_cursor_state->crtc_w * 1000 /
10604 			 (new_cursor_state->src_w >> 16);
10605 	cursor_scale_h = new_cursor_state->crtc_h * 1000 /
10606 			 (new_cursor_state->src_h >> 16);
10607 
10608 	primary_scale_w = new_primary_state->crtc_w * 1000 /
10609 			 (new_primary_state->src_w >> 16);
10610 	primary_scale_h = new_primary_state->crtc_h * 1000 /
10611 			 (new_primary_state->src_h >> 16);
10612 
10613 	if (cursor_scale_w != primary_scale_w ||
10614 	    cursor_scale_h != primary_scale_h) {
10615 		drm_dbg_atomic(crtc->dev, "Cursor plane scaling doesn't match primary plane\n");
10616 		return -EINVAL;
10617 	}
10618 
10619 	return 0;
10620 }
10621 
10622 #if defined(CONFIG_DRM_AMD_DC_DCN)
10623 static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
10624 {
10625 	struct drm_connector *connector;
10626 	struct drm_connector_state *conn_state, *old_conn_state;
10627 	struct amdgpu_dm_connector *aconnector = NULL;
10628 	int i;
10629 	for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) {
10630 		if (!conn_state->crtc)
10631 			conn_state = old_conn_state;
10632 
10633 		if (conn_state->crtc != crtc)
10634 			continue;
10635 
10636 		aconnector = to_amdgpu_dm_connector(connector);
10637 		if (!aconnector->port || !aconnector->mst_port)
10638 			aconnector = NULL;
10639 		else
10640 			break;
10641 	}
10642 
10643 	if (!aconnector)
10644 		return 0;
10645 
10646 	return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
10647 }
10648 #endif
10649 
10650 static int validate_overlay(struct drm_atomic_state *state)
10651 {
10652 	int i;
10653 	struct drm_plane *plane;
10654 	struct drm_plane_state *new_plane_state;
10655 	struct drm_plane_state *primary_state, *overlay_state = NULL;
10656 
10657 	/* Check if primary plane is contained inside overlay */
10658 	for_each_new_plane_in_state_reverse(state, plane, new_plane_state, i) {
10659 		if (plane->type == DRM_PLANE_TYPE_OVERLAY) {
10660 			if (drm_atomic_plane_disabling(plane->state, new_plane_state))
10661 				return 0;
10662 
10663 			overlay_state = new_plane_state;
10664 			continue;
10665 		}
10666 	}
10667 
10668 	/* check if we're making changes to the overlay plane */
10669 	if (!overlay_state)
10670 		return 0;
10671 
10672 	/* check if overlay plane is enabled */
10673 	if (!overlay_state->crtc)
10674 		return 0;
10675 
10676 	/* find the primary plane for the CRTC that the overlay is enabled on */
10677 	primary_state = drm_atomic_get_plane_state(state, overlay_state->crtc->primary);
10678 	if (IS_ERR(primary_state))
10679 		return PTR_ERR(primary_state);
10680 
10681 	/* check if primary plane is enabled */
10682 	if (!primary_state->crtc)
10683 		return 0;
10684 
10685 	/* Perform the bounds check to ensure the overlay plane covers the primary */
10686 	if (primary_state->crtc_x < overlay_state->crtc_x ||
10687 	    primary_state->crtc_y < overlay_state->crtc_y ||
10688 	    primary_state->crtc_x + primary_state->crtc_w > overlay_state->crtc_x + overlay_state->crtc_w ||
10689 	    primary_state->crtc_y + primary_state->crtc_h > overlay_state->crtc_y + overlay_state->crtc_h) {
10690 		DRM_DEBUG_ATOMIC("Overlay plane is enabled with hardware cursor but does not fully cover primary plane\n");
10691 		return -EINVAL;
10692 	}
10693 
10694 	return 0;
10695 }
10696 
10697 /**
10698  * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
10699  * @dev: The DRM device
10700  * @state: The atomic state to commit
10701  *
10702  * Validate that the given atomic state is programmable by DC into hardware.
10703  * This involves constructing a &struct dc_state reflecting the new hardware
10704  * state we wish to commit, then querying DC to see if it is programmable. It's
10705  * important not to modify the existing DC state. Otherwise, atomic_check
10706  * may unexpectedly commit hardware changes.
10707  *
10708  * When validating the DC state, it's important that the right locks are
10709  * acquired. For full updates case which removes/adds/updates streams on one
10710  * CRTC while flipping on another CRTC, acquiring global lock will guarantee
10711  * that any such full update commit will wait for completion of any outstanding
10712  * flip using DRMs synchronization events.
10713  *
10714  * Note that DM adds the affected connectors for all CRTCs in state, when that
10715  * might not seem necessary. This is because DC stream creation requires the
10716  * DC sink, which is tied to the DRM connector state. Cleaning this up should
10717  * be possible but non-trivial - a possible TODO item.
10718  *
10719  * Return: -Error code if validation failed.
10720  */
10721 static int amdgpu_dm_atomic_check(struct drm_device *dev,
10722 				  struct drm_atomic_state *state)
10723 {
10724 	struct amdgpu_device *adev = drm_to_adev(dev);
10725 	struct dm_atomic_state *dm_state = NULL;
10726 	struct dc *dc = adev->dm.dc;
10727 	struct drm_connector *connector;
10728 	struct drm_connector_state *old_con_state, *new_con_state;
10729 	struct drm_crtc *crtc;
10730 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
10731 	struct drm_plane *plane;
10732 	struct drm_plane_state *old_plane_state, *new_plane_state;
10733 	enum dc_status status;
10734 	int ret, i;
10735 	bool lock_and_validation_needed = false;
10736 	struct dm_crtc_state *dm_old_crtc_state;
10737 #if defined(CONFIG_DRM_AMD_DC_DCN)
10738 	struct dsc_mst_fairness_vars vars[MAX_PIPES];
10739 #endif
10740 
10741 	trace_amdgpu_dm_atomic_check_begin(state);
10742 
10743 	ret = drm_atomic_helper_check_modeset(dev, state);
10744 	if (ret)
10745 		goto fail;
10746 
10747 	/* Check connector changes */
10748 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10749 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10750 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10751 
10752 		/* Skip connectors that are disabled or part of modeset already. */
10753 		if (!old_con_state->crtc && !new_con_state->crtc)
10754 			continue;
10755 
10756 		if (!new_con_state->crtc)
10757 			continue;
10758 
10759 		new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
10760 		if (IS_ERR(new_crtc_state)) {
10761 			ret = PTR_ERR(new_crtc_state);
10762 			goto fail;
10763 		}
10764 
10765 		if (dm_old_con_state->abm_level !=
10766 		    dm_new_con_state->abm_level)
10767 			new_crtc_state->connectors_changed = true;
10768 	}
10769 
10770 #if defined(CONFIG_DRM_AMD_DC_DCN)
10771 	if (dc_resource_is_dsc_encoding_supported(dc)) {
10772 		for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10773 			if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
10774 				ret = add_affected_mst_dsc_crtcs(state, crtc);
10775 				if (ret)
10776 					goto fail;
10777 			}
10778 		}
10779 	}
10780 #endif
10781 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10782 		dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
10783 
10784 		if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
10785 		    !new_crtc_state->color_mgmt_changed &&
10786 		    old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled &&
10787 			dm_old_crtc_state->dsc_force_changed == false)
10788 			continue;
10789 
10790 		ret = amdgpu_dm_verify_lut_sizes(new_crtc_state);
10791 		if (ret)
10792 			goto fail;
10793 
10794 		if (!new_crtc_state->enable)
10795 			continue;
10796 
10797 		ret = drm_atomic_add_affected_connectors(state, crtc);
10798 		if (ret)
10799 			return ret;
10800 
10801 		ret = drm_atomic_add_affected_planes(state, crtc);
10802 		if (ret)
10803 			goto fail;
10804 
10805 		if (dm_old_crtc_state->dsc_force_changed)
10806 			new_crtc_state->mode_changed = true;
10807 	}
10808 
10809 	/*
10810 	 * Add all primary and overlay planes on the CRTC to the state
10811 	 * whenever a plane is enabled to maintain correct z-ordering
10812 	 * and to enable fast surface updates.
10813 	 */
10814 	drm_for_each_crtc(crtc, dev) {
10815 		bool modified = false;
10816 
10817 		for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
10818 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
10819 				continue;
10820 
10821 			if (new_plane_state->crtc == crtc ||
10822 			    old_plane_state->crtc == crtc) {
10823 				modified = true;
10824 				break;
10825 			}
10826 		}
10827 
10828 		if (!modified)
10829 			continue;
10830 
10831 		drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) {
10832 			if (plane->type == DRM_PLANE_TYPE_CURSOR)
10833 				continue;
10834 
10835 			new_plane_state =
10836 				drm_atomic_get_plane_state(state, plane);
10837 
10838 			if (IS_ERR(new_plane_state)) {
10839 				ret = PTR_ERR(new_plane_state);
10840 				goto fail;
10841 			}
10842 		}
10843 	}
10844 
10845 	/* Remove exiting planes if they are modified */
10846 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10847 		ret = dm_update_plane_state(dc, state, plane,
10848 					    old_plane_state,
10849 					    new_plane_state,
10850 					    false,
10851 					    &lock_and_validation_needed);
10852 		if (ret)
10853 			goto fail;
10854 	}
10855 
10856 	/* Disable all crtcs which require disable */
10857 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10858 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
10859 					   old_crtc_state,
10860 					   new_crtc_state,
10861 					   false,
10862 					   &lock_and_validation_needed);
10863 		if (ret)
10864 			goto fail;
10865 	}
10866 
10867 	/* Enable all crtcs which require enable */
10868 	for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
10869 		ret = dm_update_crtc_state(&adev->dm, state, crtc,
10870 					   old_crtc_state,
10871 					   new_crtc_state,
10872 					   true,
10873 					   &lock_and_validation_needed);
10874 		if (ret)
10875 			goto fail;
10876 	}
10877 
10878 	ret = validate_overlay(state);
10879 	if (ret)
10880 		goto fail;
10881 
10882 	/* Add new/modified planes */
10883 	for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) {
10884 		ret = dm_update_plane_state(dc, state, plane,
10885 					    old_plane_state,
10886 					    new_plane_state,
10887 					    true,
10888 					    &lock_and_validation_needed);
10889 		if (ret)
10890 			goto fail;
10891 	}
10892 
10893 	/* Run this here since we want to validate the streams we created */
10894 	ret = drm_atomic_helper_check_planes(dev, state);
10895 	if (ret)
10896 		goto fail;
10897 
10898 	/* Check cursor planes scaling */
10899 	for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
10900 		ret = dm_check_crtc_cursor(state, crtc, new_crtc_state);
10901 		if (ret)
10902 			goto fail;
10903 	}
10904 
10905 	if (state->legacy_cursor_update) {
10906 		/*
10907 		 * This is a fast cursor update coming from the plane update
10908 		 * helper, check if it can be done asynchronously for better
10909 		 * performance.
10910 		 */
10911 		state->async_update =
10912 			!drm_atomic_helper_async_check(dev, state);
10913 
10914 		/*
10915 		 * Skip the remaining global validation if this is an async
10916 		 * update. Cursor updates can be done without affecting
10917 		 * state or bandwidth calcs and this avoids the performance
10918 		 * penalty of locking the private state object and
10919 		 * allocating a new dc_state.
10920 		 */
10921 		if (state->async_update)
10922 			return 0;
10923 	}
10924 
10925 	/* Check scaling and underscan changes*/
10926 	/* TODO Removed scaling changes validation due to inability to commit
10927 	 * new stream into context w\o causing full reset. Need to
10928 	 * decide how to handle.
10929 	 */
10930 	for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
10931 		struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
10932 		struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
10933 		struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc);
10934 
10935 		/* Skip any modesets/resets */
10936 		if (!acrtc || drm_atomic_crtc_needs_modeset(
10937 				drm_atomic_get_new_crtc_state(state, &acrtc->base)))
10938 			continue;
10939 
10940 		/* Skip any thing not scale or underscan changes */
10941 		if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state))
10942 			continue;
10943 
10944 		lock_and_validation_needed = true;
10945 	}
10946 
10947 	/**
10948 	 * Streams and planes are reset when there are changes that affect
10949 	 * bandwidth. Anything that affects bandwidth needs to go through
10950 	 * DC global validation to ensure that the configuration can be applied
10951 	 * to hardware.
10952 	 *
10953 	 * We have to currently stall out here in atomic_check for outstanding
10954 	 * commits to finish in this case because our IRQ handlers reference
10955 	 * DRM state directly - we can end up disabling interrupts too early
10956 	 * if we don't.
10957 	 *
10958 	 * TODO: Remove this stall and drop DM state private objects.
10959 	 */
10960 	if (lock_and_validation_needed) {
10961 		ret = dm_atomic_get_state(state, &dm_state);
10962 		if (ret)
10963 			goto fail;
10964 
10965 		ret = do_aquire_global_lock(dev, state);
10966 		if (ret)
10967 			goto fail;
10968 
10969 #if defined(CONFIG_DRM_AMD_DC_DCN)
10970 		if (!compute_mst_dsc_configs_for_state(state, dm_state->context, vars))
10971 			goto fail;
10972 
10973 		ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
10974 		if (ret)
10975 			goto fail;
10976 #endif
10977 
10978 		/*
10979 		 * Perform validation of MST topology in the state:
10980 		 * We need to perform MST atomic check before calling
10981 		 * dc_validate_global_state(), or there is a chance
10982 		 * to get stuck in an infinite loop and hang eventually.
10983 		 */
10984 		ret = drm_dp_mst_atomic_check(state);
10985 		if (ret)
10986 			goto fail;
10987 		status = dc_validate_global_state(dc, dm_state->context, false);
10988 		if (status != DC_OK) {
10989 			drm_dbg_atomic(dev,
10990 				       "DC global validation failure: %s (%d)",
10991 				       dc_status_to_str(status), status);
10992 			ret = -EINVAL;
10993 			goto fail;
10994 		}
10995 	} else {
10996 		/*
10997 		 * The commit is a fast update. Fast updates shouldn't change
10998 		 * the DC context, affect global validation, and can have their
10999 		 * commit work done in parallel with other commits not touching
11000 		 * the same resource. If we have a new DC context as part of
11001 		 * the DM atomic state from validation we need to free it and
11002 		 * retain the existing one instead.
11003 		 *
11004 		 * Furthermore, since the DM atomic state only contains the DC
11005 		 * context and can safely be annulled, we can free the state
11006 		 * and clear the associated private object now to free
11007 		 * some memory and avoid a possible use-after-free later.
11008 		 */
11009 
11010 		for (i = 0; i < state->num_private_objs; i++) {
11011 			struct drm_private_obj *obj = state->private_objs[i].ptr;
11012 
11013 			if (obj->funcs == adev->dm.atomic_obj.funcs) {
11014 				int j = state->num_private_objs-1;
11015 
11016 				dm_atomic_destroy_state(obj,
11017 						state->private_objs[i].state);
11018 
11019 				/* If i is not at the end of the array then the
11020 				 * last element needs to be moved to where i was
11021 				 * before the array can safely be truncated.
11022 				 */
11023 				if (i != j)
11024 					state->private_objs[i] =
11025 						state->private_objs[j];
11026 
11027 				state->private_objs[j].ptr = NULL;
11028 				state->private_objs[j].state = NULL;
11029 				state->private_objs[j].old_state = NULL;
11030 				state->private_objs[j].new_state = NULL;
11031 
11032 				state->num_private_objs = j;
11033 				break;
11034 			}
11035 		}
11036 	}
11037 
11038 	/* Store the overall update type for use later in atomic check. */
11039 	for_each_new_crtc_in_state (state, crtc, new_crtc_state, i) {
11040 		struct dm_crtc_state *dm_new_crtc_state =
11041 			to_dm_crtc_state(new_crtc_state);
11042 
11043 		dm_new_crtc_state->update_type = lock_and_validation_needed ?
11044 							 UPDATE_TYPE_FULL :
11045 							 UPDATE_TYPE_FAST;
11046 	}
11047 
11048 	/* Must be success */
11049 	WARN_ON(ret);
11050 
11051 	trace_amdgpu_dm_atomic_check_finish(state, ret);
11052 
11053 	return ret;
11054 
11055 fail:
11056 	if (ret == -EDEADLK)
11057 		DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n");
11058 	else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS)
11059 		DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n");
11060 	else
11061 		DRM_DEBUG_DRIVER("Atomic check failed with err: %d \n", ret);
11062 
11063 	trace_amdgpu_dm_atomic_check_finish(state, ret);
11064 
11065 	return ret;
11066 }
11067 
11068 static bool is_dp_capable_without_timing_msa(struct dc *dc,
11069 					     struct amdgpu_dm_connector *amdgpu_dm_connector)
11070 {
11071 	uint8_t dpcd_data;
11072 	bool capable = false;
11073 
11074 	if (amdgpu_dm_connector->dc_link &&
11075 		dm_helpers_dp_read_dpcd(
11076 				NULL,
11077 				amdgpu_dm_connector->dc_link,
11078 				DP_DOWN_STREAM_PORT_COUNT,
11079 				&dpcd_data,
11080 				sizeof(dpcd_data))) {
11081 		capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false;
11082 	}
11083 
11084 	return capable;
11085 }
11086 
11087 static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm,
11088 		unsigned int offset,
11089 		unsigned int total_length,
11090 		uint8_t *data,
11091 		unsigned int length,
11092 		struct amdgpu_hdmi_vsdb_info *vsdb)
11093 {
11094 	bool res;
11095 	union dmub_rb_cmd cmd;
11096 	struct dmub_cmd_send_edid_cea *input;
11097 	struct dmub_cmd_edid_cea_output *output;
11098 
11099 	if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES)
11100 		return false;
11101 
11102 	memset(&cmd, 0, sizeof(cmd));
11103 
11104 	input = &cmd.edid_cea.data.input;
11105 
11106 	cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA;
11107 	cmd.edid_cea.header.sub_type = 0;
11108 	cmd.edid_cea.header.payload_bytes =
11109 		sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header);
11110 	input->offset = offset;
11111 	input->length = length;
11112 	input->total_length = total_length;
11113 	memcpy(input->payload, data, length);
11114 
11115 	res = dc_dmub_srv_cmd_with_reply_data(dm->dc->ctx->dmub_srv, &cmd);
11116 	if (!res) {
11117 		DRM_ERROR("EDID CEA parser failed\n");
11118 		return false;
11119 	}
11120 
11121 	output = &cmd.edid_cea.data.output;
11122 
11123 	if (output->type == DMUB_CMD__EDID_CEA_ACK) {
11124 		if (!output->ack.success) {
11125 			DRM_ERROR("EDID CEA ack failed at offset %d\n",
11126 					output->ack.offset);
11127 		}
11128 	} else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) {
11129 		if (!output->amd_vsdb.vsdb_found)
11130 			return false;
11131 
11132 		vsdb->freesync_supported = output->amd_vsdb.freesync_supported;
11133 		vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version;
11134 		vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate;
11135 		vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate;
11136 	} else {
11137 		if (output->type != 0)
11138 			DRM_WARN("Unknown EDID CEA parser results\n");
11139 		return false;
11140 	}
11141 
11142 	return true;
11143 }
11144 
11145 static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm,
11146 		uint8_t *edid_ext, int len,
11147 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11148 {
11149 	int i;
11150 
11151 	/* send extension block to DMCU for parsing */
11152 	for (i = 0; i < len; i += 8) {
11153 		bool res;
11154 		int offset;
11155 
11156 		/* send 8 bytes a time */
11157 		if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8))
11158 			return false;
11159 
11160 		if (i+8 == len) {
11161 			/* EDID block sent completed, expect result */
11162 			int version, min_rate, max_rate;
11163 
11164 			res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate);
11165 			if (res) {
11166 				/* amd vsdb found */
11167 				vsdb_info->freesync_supported = 1;
11168 				vsdb_info->amd_vsdb_version = version;
11169 				vsdb_info->min_refresh_rate_hz = min_rate;
11170 				vsdb_info->max_refresh_rate_hz = max_rate;
11171 				return true;
11172 			}
11173 			/* not amd vsdb */
11174 			return false;
11175 		}
11176 
11177 		/* check for ack*/
11178 		res = dc_edid_parser_recv_cea_ack(dm->dc, &offset);
11179 		if (!res)
11180 			return false;
11181 	}
11182 
11183 	return false;
11184 }
11185 
11186 static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm,
11187 		uint8_t *edid_ext, int len,
11188 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11189 {
11190 	int i;
11191 
11192 	/* send extension block to DMCU for parsing */
11193 	for (i = 0; i < len; i += 8) {
11194 		/* send 8 bytes a time */
11195 		if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info))
11196 			return false;
11197 	}
11198 
11199 	return vsdb_info->freesync_supported;
11200 }
11201 
11202 static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector,
11203 		uint8_t *edid_ext, int len,
11204 		struct amdgpu_hdmi_vsdb_info *vsdb_info)
11205 {
11206 	struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev);
11207 
11208 	if (adev->dm.dmub_srv)
11209 		return parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info);
11210 	else
11211 		return parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info);
11212 }
11213 
11214 static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector,
11215 		struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info)
11216 {
11217 	uint8_t *edid_ext = NULL;
11218 	int i;
11219 	bool valid_vsdb_found = false;
11220 
11221 	/*----- drm_find_cea_extension() -----*/
11222 	/* No EDID or EDID extensions */
11223 	if (edid == NULL || edid->extensions == 0)
11224 		return -ENODEV;
11225 
11226 	/* Find CEA extension */
11227 	for (i = 0; i < edid->extensions; i++) {
11228 		edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1);
11229 		if (edid_ext[0] == CEA_EXT)
11230 			break;
11231 	}
11232 
11233 	if (i == edid->extensions)
11234 		return -ENODEV;
11235 
11236 	/*----- cea_db_offsets() -----*/
11237 	if (edid_ext[0] != CEA_EXT)
11238 		return -ENODEV;
11239 
11240 	valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info);
11241 
11242 	return valid_vsdb_found ? i : -ENODEV;
11243 }
11244 
11245 void amdgpu_dm_update_freesync_caps(struct drm_connector *connector,
11246 					struct edid *edid)
11247 {
11248 	int i = 0;
11249 	struct detailed_timing *timing;
11250 	struct detailed_non_pixel *data;
11251 	struct detailed_data_monitor_range *range;
11252 	struct amdgpu_dm_connector *amdgpu_dm_connector =
11253 			to_amdgpu_dm_connector(connector);
11254 	struct dm_connector_state *dm_con_state = NULL;
11255 
11256 	struct drm_device *dev = connector->dev;
11257 	struct amdgpu_device *adev = drm_to_adev(dev);
11258 	bool freesync_capable = false;
11259 	struct amdgpu_hdmi_vsdb_info vsdb_info = {0};
11260 
11261 	if (!connector->state) {
11262 		DRM_ERROR("%s - Connector has no state", __func__);
11263 		goto update;
11264 	}
11265 
11266 	if (!edid) {
11267 		dm_con_state = to_dm_connector_state(connector->state);
11268 
11269 		amdgpu_dm_connector->min_vfreq = 0;
11270 		amdgpu_dm_connector->max_vfreq = 0;
11271 		amdgpu_dm_connector->pixel_clock_mhz = 0;
11272 
11273 		goto update;
11274 	}
11275 
11276 	dm_con_state = to_dm_connector_state(connector->state);
11277 
11278 	if (!amdgpu_dm_connector->dc_sink) {
11279 		DRM_ERROR("dc_sink NULL, could not add free_sync module.\n");
11280 		goto update;
11281 	}
11282 	if (!adev->dm.freesync_module)
11283 		goto update;
11284 
11285 
11286 	if (amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT
11287 		|| amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_EDP) {
11288 		bool edid_check_required = false;
11289 
11290 		if (edid) {
11291 			edid_check_required = is_dp_capable_without_timing_msa(
11292 						adev->dm.dc,
11293 						amdgpu_dm_connector);
11294 		}
11295 
11296 		if (edid_check_required == true && (edid->version > 1 ||
11297 		   (edid->version == 1 && edid->revision > 1))) {
11298 			for (i = 0; i < 4; i++) {
11299 
11300 				timing	= &edid->detailed_timings[i];
11301 				data	= &timing->data.other_data;
11302 				range	= &data->data.range;
11303 				/*
11304 				 * Check if monitor has continuous frequency mode
11305 				 */
11306 				if (data->type != EDID_DETAIL_MONITOR_RANGE)
11307 					continue;
11308 				/*
11309 				 * Check for flag range limits only. If flag == 1 then
11310 				 * no additional timing information provided.
11311 				 * Default GTF, GTF Secondary curve and CVT are not
11312 				 * supported
11313 				 */
11314 				if (range->flags != 1)
11315 					continue;
11316 
11317 				amdgpu_dm_connector->min_vfreq = range->min_vfreq;
11318 				amdgpu_dm_connector->max_vfreq = range->max_vfreq;
11319 				amdgpu_dm_connector->pixel_clock_mhz =
11320 					range->pixel_clock_mhz * 10;
11321 
11322 				connector->display_info.monitor_range.min_vfreq = range->min_vfreq;
11323 				connector->display_info.monitor_range.max_vfreq = range->max_vfreq;
11324 
11325 				break;
11326 			}
11327 
11328 			if (amdgpu_dm_connector->max_vfreq -
11329 			    amdgpu_dm_connector->min_vfreq > 10) {
11330 
11331 				freesync_capable = true;
11332 			}
11333 		}
11334 	} else if (edid && amdgpu_dm_connector->dc_sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) {
11335 		i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info);
11336 		if (i >= 0 && vsdb_info.freesync_supported) {
11337 			timing  = &edid->detailed_timings[i];
11338 			data    = &timing->data.other_data;
11339 
11340 			amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz;
11341 			amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz;
11342 			if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10)
11343 				freesync_capable = true;
11344 
11345 			connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz;
11346 			connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz;
11347 		}
11348 	}
11349 
11350 update:
11351 	if (dm_con_state)
11352 		dm_con_state->freesync_capable = freesync_capable;
11353 
11354 	if (connector->vrr_capable_property)
11355 		drm_connector_set_vrr_capable_property(connector,
11356 						       freesync_capable);
11357 }
11358 
11359 void amdgpu_dm_trigger_timing_sync(struct drm_device *dev)
11360 {
11361 	struct amdgpu_device *adev = drm_to_adev(dev);
11362 	struct dc *dc = adev->dm.dc;
11363 	int i;
11364 
11365 	mutex_lock(&adev->dm.dc_lock);
11366 	if (dc->current_state) {
11367 		for (i = 0; i < dc->current_state->stream_count; ++i)
11368 			dc->current_state->streams[i]
11369 				->triggered_crtc_reset.enabled =
11370 				adev->dm.force_timing_sync;
11371 
11372 		dm_enable_per_frame_crtc_master_sync(dc->current_state);
11373 		dc_trigger_sync(dc, dc->current_state);
11374 	}
11375 	mutex_unlock(&adev->dm.dc_lock);
11376 }
11377 
11378 void dm_write_reg_func(const struct dc_context *ctx, uint32_t address,
11379 		       uint32_t value, const char *func_name)
11380 {
11381 #ifdef DM_CHECK_ADDR_0
11382 	if (address == 0) {
11383 		DC_ERR("invalid register write. address = 0");
11384 		return;
11385 	}
11386 #endif
11387 	cgs_write_register(ctx->cgs_device, address, value);
11388 	trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value);
11389 }
11390 
11391 uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address,
11392 			  const char *func_name)
11393 {
11394 	uint32_t value;
11395 #ifdef DM_CHECK_ADDR_0
11396 	if (address == 0) {
11397 		DC_ERR("invalid register read; address = 0\n");
11398 		return 0;
11399 	}
11400 #endif
11401 
11402 	if (ctx->dmub_srv &&
11403 	    ctx->dmub_srv->reg_helper_offload.gather_in_progress &&
11404 	    !ctx->dmub_srv->reg_helper_offload.should_burst_write) {
11405 		ASSERT(false);
11406 		return 0;
11407 	}
11408 
11409 	value = cgs_read_register(ctx->cgs_device, address);
11410 
11411 	trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value);
11412 
11413 	return value;
11414 }
11415 
11416 int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int linkIndex,
11417 				struct aux_payload *payload, enum aux_return_code_type *operation_result)
11418 {
11419 	struct amdgpu_device *adev = ctx->driver_context;
11420 	int ret = 0;
11421 
11422 	dc_process_dmub_aux_transfer_async(ctx->dc, linkIndex, payload);
11423 	ret = wait_for_completion_interruptible_timeout(&adev->dm.dmub_aux_transfer_done, 10*HZ);
11424 	if (ret == 0) {
11425 		*operation_result = AUX_RET_ERROR_TIMEOUT;
11426 		return -1;
11427 	}
11428 	*operation_result = (enum aux_return_code_type)adev->dm.dmub_notify->result;
11429 
11430 	if (adev->dm.dmub_notify->result == AUX_RET_SUCCESS) {
11431 		(*payload->reply) = adev->dm.dmub_notify->aux_reply.command;
11432 
11433 		// For read case, Copy data to payload
11434 		if (!payload->write && adev->dm.dmub_notify->aux_reply.length &&
11435 		(*payload->reply == AUX_TRANSACTION_REPLY_AUX_ACK))
11436 			memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data,
11437 			adev->dm.dmub_notify->aux_reply.length);
11438 	}
11439 
11440 	return adev->dm.dmub_notify->aux_reply.length;
11441 }
11442