xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c (revision 5d8649409859dc47f54bff09182b32faed774ffa)
1 /*	$NetBSD: amdgpu_dm_helpers.c,v 1.3 2021/12/19 12:01:30 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2015 Advanced Micro Devices, Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: AMD
25  *
26  */
27 
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: amdgpu_dm_helpers.c,v 1.3 2021/12/19 12:01:30 riastradh Exp $");
30 
31 #include <linux/string.h>
32 #include <linux/acpi.h>
33 #include <linux/version.h>
34 #include <linux/i2c.h>
35 
36 #include <drm/drm_probe_helper.h>
37 #include <drm/amdgpu_drm.h>
38 #include <drm/drm_edid.h>
39 
40 #include "dm_services.h"
41 #include "amdgpu.h"
42 #include "dc.h"
43 #include "amdgpu_dm.h"
44 #include "amdgpu_dm_irq.h"
45 #include "amdgpu_dm_mst_types.h"
46 
47 #include "dm_helpers.h"
48 
49 /* dm_helpers_parse_edid_caps
50  *
51  * Parse edid caps
52  *
53  * @edid:	[in] pointer to edid
54  *  edid_caps:	[in] pointer to edid caps
55  * @return
56  *	void
57  * */
dm_helpers_parse_edid_caps(struct dc_context * ctx,const struct dc_edid * edid,struct dc_edid_caps * edid_caps)58 enum dc_edid_status dm_helpers_parse_edid_caps(
59 		struct dc_context *ctx,
60 		const struct dc_edid *edid,
61 		struct dc_edid_caps *edid_caps)
62 {
63 	struct edid *edid_buf = (struct edid *) __UNCONST(edid->raw_edid);
64 	struct cea_sad *sads;
65 	int sad_count = -1;
66 	int sadb_count = -1;
67 	int i = 0;
68 	int j = 0;
69 	uint8_t *sadb = NULL;
70 
71 	enum dc_edid_status result = EDID_OK;
72 
73 	if (!edid_caps || !edid)
74 		return EDID_BAD_INPUT;
75 
76 	if (!drm_edid_is_valid(edid_buf))
77 		result = EDID_BAD_CHECKSUM;
78 
79 	edid_caps->manufacturer_id = (uint16_t) edid_buf->mfg_id[0] |
80 					((uint16_t) edid_buf->mfg_id[1])<<8;
81 	edid_caps->product_id = (uint16_t) edid_buf->prod_code[0] |
82 					((uint16_t) edid_buf->prod_code[1])<<8;
83 	edid_caps->serial_number = edid_buf->serial;
84 	edid_caps->manufacture_week = edid_buf->mfg_week;
85 	edid_caps->manufacture_year = edid_buf->mfg_year;
86 
87 	/* One of the four detailed_timings stores the monitor name. It's
88 	 * stored in an array of length 13. */
89 	for (i = 0; i < 4; i++) {
90 		if (edid_buf->detailed_timings[i].data.other_data.type == 0xfc) {
91 			while (j < 13 && edid_buf->detailed_timings[i].data.other_data.data.str.str[j]) {
92 				if (edid_buf->detailed_timings[i].data.other_data.data.str.str[j] == '\n')
93 					break;
94 
95 				edid_caps->display_name[j] =
96 					edid_buf->detailed_timings[i].data.other_data.data.str.str[j];
97 				j++;
98 			}
99 		}
100 	}
101 
102 	edid_caps->edid_hdmi = drm_detect_hdmi_monitor(
103 			(struct edid *) __UNCONST(edid->raw_edid));
104 
105 	sad_count = drm_edid_to_sad((struct edid *) __UNCONST(edid->raw_edid), &sads);
106 	if (sad_count <= 0)
107 		return result;
108 
109 	edid_caps->audio_mode_count = sad_count < DC_MAX_AUDIO_DESC_COUNT ? sad_count : DC_MAX_AUDIO_DESC_COUNT;
110 	for (i = 0; i < edid_caps->audio_mode_count; ++i) {
111 		struct cea_sad *sad = &sads[i];
112 
113 		edid_caps->audio_modes[i].format_code = sad->format;
114 		edid_caps->audio_modes[i].channel_count = sad->channels + 1;
115 		edid_caps->audio_modes[i].sample_rate = sad->freq;
116 		edid_caps->audio_modes[i].sample_size = sad->byte2;
117 	}
118 
119 	sadb_count = drm_edid_to_speaker_allocation((struct edid *) __UNCONST(edid->raw_edid), &sadb);
120 
121 	if (sadb_count < 0) {
122 		DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sadb_count);
123 		sadb_count = 0;
124 	}
125 
126 	if (sadb_count)
127 		edid_caps->speaker_flags = sadb[0];
128 	else
129 		edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION;
130 
131 	kfree(sads);
132 	kfree(sadb);
133 
134 	return result;
135 }
136 
get_payload_table(struct amdgpu_dm_connector * aconnector,struct dp_mst_stream_allocation_table * proposed_table)137 static void get_payload_table(
138 		struct amdgpu_dm_connector *aconnector,
139 		struct dp_mst_stream_allocation_table *proposed_table)
140 {
141 	int i;
142 	struct drm_dp_mst_topology_mgr *mst_mgr =
143 			&aconnector->mst_port->mst_mgr;
144 
145 	mutex_lock(&mst_mgr->payload_lock);
146 
147 	proposed_table->stream_count = 0;
148 
149 	/* number of active streams */
150 	for (i = 0; i < mst_mgr->max_payloads; i++) {
151 		if (mst_mgr->payloads[i].num_slots == 0)
152 			break; /* end of vcp_id table */
153 
154 		ASSERT(mst_mgr->payloads[i].payload_state !=
155 				DP_PAYLOAD_DELETE_LOCAL);
156 
157 		if (mst_mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL ||
158 			mst_mgr->payloads[i].payload_state ==
159 					DP_PAYLOAD_REMOTE) {
160 
161 			struct dp_mst_stream_allocation *sa =
162 					&proposed_table->stream_allocations[
163 						proposed_table->stream_count];
164 
165 			sa->slot_count = mst_mgr->payloads[i].num_slots;
166 			sa->vcp_id = mst_mgr->proposed_vcpis[i]->vcpi;
167 			proposed_table->stream_count++;
168 		}
169 	}
170 
171 	mutex_unlock(&mst_mgr->payload_lock);
172 }
173 
dm_helpers_dp_update_branch_info(struct dc_context * ctx,const struct dc_link * link)174 void dm_helpers_dp_update_branch_info(
175 	struct dc_context *ctx,
176 	const struct dc_link *link)
177 {}
178 
179 /*
180  * Writes payload allocation table in immediate downstream device.
181  */
dm_helpers_dp_mst_write_payload_allocation_table(struct dc_context * ctx,const struct dc_stream_state * stream,struct dp_mst_stream_allocation_table * proposed_table,bool enable)182 bool dm_helpers_dp_mst_write_payload_allocation_table(
183 		struct dc_context *ctx,
184 		const struct dc_stream_state *stream,
185 		struct dp_mst_stream_allocation_table *proposed_table,
186 		bool enable)
187 {
188 	struct amdgpu_dm_connector *aconnector;
189 	struct dm_connector_state *dm_conn_state;
190 	struct drm_dp_mst_topology_mgr *mst_mgr;
191 	struct drm_dp_mst_port *mst_port;
192 	bool ret;
193 
194 	aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
195 	/* Accessing the connector state is required for vcpi_slots allocation
196 	 * and directly relies on behaviour in commit check
197 	 * that blocks before commit guaranteeing that the state
198 	 * is not gonna be swapped while still in use in commit tail */
199 
200 	if (!aconnector || !aconnector->mst_port)
201 		return false;
202 
203 	dm_conn_state = to_dm_connector_state(aconnector->base.state);
204 
205 	mst_mgr = &aconnector->mst_port->mst_mgr;
206 
207 	if (!mst_mgr->mst_state)
208 		return false;
209 
210 	mst_port = aconnector->port;
211 
212 	if (enable) {
213 
214 		ret = drm_dp_mst_allocate_vcpi(mst_mgr, mst_port,
215 					       dm_conn_state->pbn,
216 					       dm_conn_state->vcpi_slots);
217 		if (!ret)
218 			return false;
219 
220 	} else {
221 		drm_dp_mst_reset_vcpi_slots(mst_mgr, mst_port);
222 	}
223 
224 	/* It's OK for this to fail */
225 	drm_dp_update_payload_part1(mst_mgr);
226 
227 	/* mst_mgr->->payloads are VC payload notify MST branch using DPCD or
228 	 * AUX message. The sequence is slot 1-63 allocated sequence for each
229 	 * stream. AMD ASIC stream slot allocation should follow the same
230 	 * sequence. copy DRM MST allocation to dc */
231 
232 	get_payload_table(aconnector, proposed_table);
233 
234 	return true;
235 }
236 
237 /*
238  * poll pending down reply
239  */
dm_helpers_dp_mst_poll_pending_down_reply(struct dc_context * ctx,const struct dc_link * link)240 void dm_helpers_dp_mst_poll_pending_down_reply(
241 	struct dc_context *ctx,
242 	const struct dc_link *link)
243 {}
244 
245 /*
246  * Clear payload allocation table before enable MST DP link.
247  */
dm_helpers_dp_mst_clear_payload_allocation_table(struct dc_context * ctx,const struct dc_link * link)248 void dm_helpers_dp_mst_clear_payload_allocation_table(
249 	struct dc_context *ctx,
250 	const struct dc_link *link)
251 {}
252 
253 /*
254  * Polls for ACT (allocation change trigger) handled and sends
255  * ALLOCATE_PAYLOAD message.
256  */
dm_helpers_dp_mst_poll_for_allocation_change_trigger(struct dc_context * ctx,const struct dc_stream_state * stream)257 enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger(
258 		struct dc_context *ctx,
259 		const struct dc_stream_state *stream)
260 {
261 	struct amdgpu_dm_connector *aconnector;
262 	struct drm_dp_mst_topology_mgr *mst_mgr;
263 	int ret;
264 
265 	aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
266 
267 	if (!aconnector || !aconnector->mst_port)
268 		return ACT_FAILED;
269 
270 	mst_mgr = &aconnector->mst_port->mst_mgr;
271 
272 	if (!mst_mgr->mst_state)
273 		return ACT_FAILED;
274 
275 	ret = drm_dp_check_act_status(mst_mgr);
276 
277 	if (ret)
278 		return ACT_FAILED;
279 
280 	return ACT_SUCCESS;
281 }
282 
dm_helpers_dp_mst_send_payload_allocation(struct dc_context * ctx,const struct dc_stream_state * stream,bool enable)283 bool dm_helpers_dp_mst_send_payload_allocation(
284 		struct dc_context *ctx,
285 		const struct dc_stream_state *stream,
286 		bool enable)
287 {
288 	struct amdgpu_dm_connector *aconnector;
289 	struct drm_dp_mst_topology_mgr *mst_mgr;
290 	struct drm_dp_mst_port *mst_port;
291 
292 	aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
293 
294 	if (!aconnector || !aconnector->mst_port)
295 		return false;
296 
297 	mst_port = aconnector->port;
298 
299 	mst_mgr = &aconnector->mst_port->mst_mgr;
300 
301 	if (!mst_mgr->mst_state)
302 		return false;
303 
304 	/* It's OK for this to fail */
305 	drm_dp_update_payload_part2(mst_mgr);
306 
307 	if (!enable)
308 		drm_dp_mst_deallocate_vcpi(mst_mgr, mst_port);
309 
310 	return true;
311 }
312 
dm_dtn_log_begin(struct dc_context * ctx,struct dc_log_buffer_ctx * log_ctx)313 void dm_dtn_log_begin(struct dc_context *ctx,
314 	struct dc_log_buffer_ctx *log_ctx)
315 {
316 	static const char msg[] = "[dtn begin]\n";
317 
318 	if (!log_ctx) {
319 		pr_info("%s", msg);
320 		return;
321 	}
322 
323 	dm_dtn_log_append_v(ctx, log_ctx, "%s", msg);
324 }
325 
dm_dtn_log_append_v(struct dc_context * ctx,struct dc_log_buffer_ctx * log_ctx,const char * msg,...)326 void dm_dtn_log_append_v(struct dc_context *ctx,
327 	struct dc_log_buffer_ctx *log_ctx,
328 	const char *msg, ...)
329 {
330 	va_list args;
331 	size_t total;
332 	int n;
333 
334 	if (!log_ctx) {
335 		/* No context, redirect to dmesg. */
336 		struct va_format vaf;
337 
338 		vaf.fmt = msg;
339 		vaf.va = &args;
340 
341 		va_start(args, msg);
342 		pr_info("%pV", &vaf);
343 		va_end(args);
344 
345 		return;
346 	}
347 
348 	/* Measure the output. */
349 	va_start(args, msg);
350 	n = vsnprintf(NULL, 0, msg, args);
351 	va_end(args);
352 
353 	if (n <= 0)
354 		return;
355 
356 	/* Reallocate the string buffer as needed. */
357 	total = log_ctx->pos + n + 1;
358 
359 	if (total > log_ctx->size) {
360 		char *buf = (char *)kvcalloc(total, sizeof(char), GFP_KERNEL);
361 
362 		if (buf) {
363 			memcpy(buf, log_ctx->buf, log_ctx->pos);
364 			kfree(log_ctx->buf);
365 
366 			log_ctx->buf = buf;
367 			log_ctx->size = total;
368 		}
369 	}
370 
371 	if (!log_ctx->buf)
372 		return;
373 
374 	/* Write the formatted string to the log buffer. */
375 	va_start(args, msg);
376 	n = vscnprintf(
377 		log_ctx->buf + log_ctx->pos,
378 		log_ctx->size - log_ctx->pos,
379 		msg,
380 		args);
381 	va_end(args);
382 
383 	if (n > 0)
384 		log_ctx->pos += n;
385 }
386 
dm_dtn_log_end(struct dc_context * ctx,struct dc_log_buffer_ctx * log_ctx)387 void dm_dtn_log_end(struct dc_context *ctx,
388 	struct dc_log_buffer_ctx *log_ctx)
389 {
390 	static const char msg[] = "[dtn end]\n";
391 
392 	if (!log_ctx) {
393 		pr_info("%s", msg);
394 		return;
395 	}
396 
397 	dm_dtn_log_append_v(ctx, log_ctx, "%s", msg);
398 }
399 
dm_helpers_dp_mst_start_top_mgr(struct dc_context * ctx,const struct dc_link * link,bool boot)400 bool dm_helpers_dp_mst_start_top_mgr(
401 		struct dc_context *ctx,
402 		const struct dc_link *link,
403 		bool boot)
404 {
405 	struct amdgpu_dm_connector *aconnector = link->priv;
406 
407 	if (!aconnector) {
408 			DRM_ERROR("Failed to found connector for link!");
409 			return false;
410 	}
411 
412 	if (boot) {
413 		DRM_INFO("DM_MST: Differing MST start on aconnector: %p [id: %d]\n",
414 					aconnector, aconnector->base.base.id);
415 		return true;
416 	}
417 
418 	DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n",
419 			aconnector, aconnector->base.base.id);
420 
421 	return (drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true) == 0);
422 }
423 
dm_helpers_dp_mst_stop_top_mgr(struct dc_context * ctx,const struct dc_link * link)424 void dm_helpers_dp_mst_stop_top_mgr(
425 		struct dc_context *ctx,
426 		const struct dc_link *link)
427 {
428 	struct amdgpu_dm_connector *aconnector = link->priv;
429 
430 	if (!aconnector) {
431 			DRM_ERROR("Failed to found connector for link!");
432 			return;
433 	}
434 
435 	DRM_INFO("DM_MST: stopping TM on aconnector: %p [id: %d]\n",
436 			aconnector, aconnector->base.base.id);
437 
438 	if (aconnector->mst_mgr.mst_state == true)
439 		drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, false);
440 }
441 
dm_helpers_dp_read_dpcd(struct dc_context * ctx,const struct dc_link * link,uint32_t address,uint8_t * data,uint32_t size)442 bool dm_helpers_dp_read_dpcd(
443 		struct dc_context *ctx,
444 		const struct dc_link *link,
445 		uint32_t address,
446 		uint8_t *data,
447 		uint32_t size)
448 {
449 
450 	struct amdgpu_dm_connector *aconnector = link->priv;
451 
452 	if (!aconnector) {
453 		DRM_ERROR("Failed to found connector for link!");
454 		return false;
455 	}
456 
457 	return drm_dp_dpcd_read(&aconnector->dm_dp_aux.aux, address,
458 			data, size) > 0;
459 }
460 
dm_helpers_dp_write_dpcd(struct dc_context * ctx,const struct dc_link * link,uint32_t address,const uint8_t * data,uint32_t size)461 bool dm_helpers_dp_write_dpcd(
462 		struct dc_context *ctx,
463 		const struct dc_link *link,
464 		uint32_t address,
465 		const uint8_t *data,
466 		uint32_t size)
467 {
468 	struct amdgpu_dm_connector *aconnector = link->priv;
469 
470 	if (!aconnector) {
471 		DRM_ERROR("Failed to found connector for link!");
472 		return false;
473 	}
474 
475 	return drm_dp_dpcd_write(&aconnector->dm_dp_aux.aux,
476 			address, __UNCONST(data), size) > 0;
477 }
478 
dm_helpers_submit_i2c(struct dc_context * ctx,const struct dc_link * link,struct i2c_command * cmd)479 bool dm_helpers_submit_i2c(
480 		struct dc_context *ctx,
481 		const struct dc_link *link,
482 		struct i2c_command *cmd)
483 {
484 	struct amdgpu_dm_connector *aconnector = link->priv;
485 	struct i2c_msg *msgs;
486 	int i = 0;
487 	int num = cmd->number_of_payloads;
488 	bool result;
489 
490 	if (!aconnector) {
491 		DRM_ERROR("Failed to found connector for link!");
492 		return false;
493 	}
494 
495 	msgs = kcalloc(num, sizeof(struct i2c_msg), GFP_KERNEL);
496 
497 	if (!msgs)
498 		return false;
499 
500 	for (i = 0; i < num; i++) {
501 		msgs[i].flags = cmd->payloads[i].write ? 0 : I2C_M_RD;
502 		msgs[i].addr = cmd->payloads[i].address;
503 		msgs[i].len = cmd->payloads[i].length;
504 		msgs[i].buf = cmd->payloads[i].data;
505 	}
506 
507 	result = i2c_transfer(&aconnector->i2c->base, msgs, num) == num;
508 
509 	kfree(msgs);
510 
511 	return result;
512 }
dm_helpers_dp_write_dsc_enable(struct dc_context * ctx,const struct dc_stream_state * stream,bool enable)513 bool dm_helpers_dp_write_dsc_enable(
514 		struct dc_context *ctx,
515 		const struct dc_stream_state *stream,
516 		bool enable
517 )
518 {
519 	uint8_t enable_dsc = enable ? 1 : 0;
520 	struct amdgpu_dm_connector *aconnector;
521 
522 	if (!stream)
523 		return false;
524 
525 	if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
526 		aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
527 
528 		if (!aconnector->dsc_aux)
529 			return false;
530 
531 		return (drm_dp_dpcd_write(aconnector->dsc_aux, DP_DSC_ENABLE, &enable_dsc, 1) >= 0);
532 	}
533 
534 	if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT)
535 		return dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1);
536 
537 	return false;
538 }
539 
dm_helpers_is_dp_sink_present(struct dc_link * link)540 bool dm_helpers_is_dp_sink_present(struct dc_link *link)
541 {
542 	bool dp_sink_present;
543 	struct amdgpu_dm_connector *aconnector = link->priv;
544 
545 	if (!aconnector) {
546 		BUG_ON("Failed to found connector for link!");
547 		return true;
548 	}
549 
550 	mutex_lock(&aconnector->dm_dp_aux.aux.hw_mutex);
551 	dp_sink_present = dc_link_is_dp_sink_present(link);
552 	mutex_unlock(&aconnector->dm_dp_aux.aux.hw_mutex);
553 	return dp_sink_present;
554 }
555 
dm_helpers_read_local_edid(struct dc_context * ctx,struct dc_link * link,struct dc_sink * sink)556 enum dc_edid_status dm_helpers_read_local_edid(
557 		struct dc_context *ctx,
558 		struct dc_link *link,
559 		struct dc_sink *sink)
560 {
561 	struct amdgpu_dm_connector *aconnector = link->priv;
562 	struct i2c_adapter *ddc;
563 	int retry = 3;
564 	enum dc_edid_status edid_status;
565 	struct edid *edid;
566 
567 	if (link->aux_mode)
568 		ddc = &aconnector->dm_dp_aux.aux.ddc;
569 	else
570 		ddc = &aconnector->i2c->base;
571 
572 	/* some dongles read edid incorrectly the first time,
573 	 * do check sum and retry to make sure read correct edid.
574 	 */
575 	do {
576 
577 		edid = drm_get_edid(&aconnector->base, ddc);
578 
579 		if (!edid)
580 			return EDID_NO_RESPONSE;
581 
582 		sink->dc_edid.length = EDID_LENGTH * (edid->extensions + 1);
583 		memmove(sink->dc_edid.raw_edid, (uint8_t *)edid, sink->dc_edid.length);
584 
585 		/* We don't need the original edid anymore */
586 		kfree(edid);
587 
588 		edid_status = dm_helpers_parse_edid_caps(
589 						ctx,
590 						&sink->dc_edid,
591 						&sink->edid_caps);
592 
593 	} while (edid_status == EDID_BAD_CHECKSUM && --retry > 0);
594 
595 	if (edid_status != EDID_OK)
596 		DRM_ERROR("EDID err: %d, on connector: %s",
597 				edid_status,
598 				aconnector->base.name);
599 	if (link->aux_mode) {
600 		union test_request test_request = { {0} };
601 		union test_response test_response = { {0} };
602 
603 		dm_helpers_dp_read_dpcd(ctx,
604 					link,
605 					DP_TEST_REQUEST,
606 					&test_request.raw,
607 					sizeof(union test_request));
608 
609 		if (!test_request.bits.EDID_READ)
610 			return edid_status;
611 
612 		test_response.bits.EDID_CHECKSUM_WRITE = 1;
613 
614 		dm_helpers_dp_write_dpcd(ctx,
615 					link,
616 					DP_TEST_EDID_CHECKSUM,
617 					&sink->dc_edid.raw_edid[sink->dc_edid.length-1],
618 					1);
619 
620 		dm_helpers_dp_write_dpcd(ctx,
621 					link,
622 					DP_TEST_RESPONSE,
623 					&test_response.raw,
624 					sizeof(test_response));
625 
626 	}
627 
628 	return edid_status;
629 }
630 
dm_set_dcn_clocks(struct dc_context * ctx,struct dc_clocks * clks)631 void dm_set_dcn_clocks(struct dc_context *ctx, struct dc_clocks *clks)
632 {
633 	/* TODO: something */
634 }
635