xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/amd/display/dc/dcn10/amdgpu_dcn10_hw_sequencer.c (revision b0cb4d44dd18a4c2aa4f36aa96fd7726e1cf9bbb)
1 /*	$NetBSD: amdgpu_dcn10_hw_sequencer.c,v 1.4 2021/12/19 11:59:31 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2016 Advanced Micro Devices, Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: AMD
25  *
26  */
27 
28 #include <sys/cdefs.h>
29 __KERNEL_RCSID(0, "$NetBSD: amdgpu_dcn10_hw_sequencer.c,v 1.4 2021/12/19 11:59:31 riastradh Exp $");
30 
31 #include <linux/delay.h>
32 #include "dm_services.h"
33 #include "basics/dc_common.h"
34 #include "core_types.h"
35 #include "resource.h"
36 #include "custom_float.h"
37 #include "dcn10_hw_sequencer.h"
38 #include "dcn10_hw_sequencer_debug.h"
39 #include "dce/dce_hwseq.h"
40 #include "abm.h"
41 #include "dmcu.h"
42 #include "dcn10_optc.h"
43 #include "dcn10_dpp.h"
44 #include "dcn10_mpc.h"
45 #include "timing_generator.h"
46 #include "opp.h"
47 #include "ipp.h"
48 #include "mpc.h"
49 #include "reg_helper.h"
50 #include "dcn10_hubp.h"
51 #include "dcn10_hubbub.h"
52 #include "dcn10_cm_common.h"
53 #include "dc_link_dp.h"
54 #include "dccg.h"
55 #include "clk_mgr.h"
56 
57 
58 #include "dsc.h"
59 
60 #define DC_LOGGER_INIT(logger)
61 
62 #define CTX \
63 	hws->ctx
64 #define REG(reg)\
65 	hws->regs->reg
66 
67 #undef FN
68 #define FN(reg_name, field_name) \
69 	hws->shifts->field_name, hws->masks->field_name
70 
71 /*print is 17 wide, first two characters are spaces*/
72 #define DTN_INFO_MICRO_SEC(ref_cycle) \
73 	print_microsec(dc_ctx, log_ctx, ref_cycle)
74 
75 #define GAMMA_HW_POINTS_NUM 256
76 
print_microsec(struct dc_context * dc_ctx,struct dc_log_buffer_ctx * log_ctx,uint32_t ref_cycle)77 void print_microsec(struct dc_context *dc_ctx,
78 	struct dc_log_buffer_ctx *log_ctx,
79 	uint32_t ref_cycle)
80 {
81 	const uint32_t ref_clk_mhz = dc_ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000;
82 	static const unsigned int frac = 1000;
83 	uint32_t us_x10 = (ref_cycle * frac) / ref_clk_mhz;
84 
85 	DTN_INFO("  %11d.%03d",
86 			us_x10 / frac,
87 			us_x10 % frac);
88 }
89 
dcn10_lock_all_pipes(struct dc * dc,struct dc_state * context,bool lock)90 static void dcn10_lock_all_pipes(struct dc *dc,
91 	struct dc_state *context,
92 	bool lock)
93 {
94 	struct pipe_ctx *pipe_ctx;
95 	struct timing_generator *tg;
96 	int i;
97 
98 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
99 		pipe_ctx = &context->res_ctx.pipe_ctx[i];
100 		tg = pipe_ctx->stream_res.tg;
101 		/*
102 		 * Only lock the top pipe's tg to prevent redundant
103 		 * (un)locking. Also skip if pipe is disabled.
104 		 */
105 		if (pipe_ctx->top_pipe ||
106 		    !pipe_ctx->stream || !pipe_ctx->plane_state ||
107 		    !tg->funcs->is_tg_enabled(tg))
108 			continue;
109 
110 		if (lock)
111 			tg->funcs->lock(tg);
112 		else
113 			tg->funcs->unlock(tg);
114 	}
115 }
116 
log_mpc_crc(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)117 static void log_mpc_crc(struct dc *dc,
118 	struct dc_log_buffer_ctx *log_ctx)
119 {
120 	struct dc_context *dc_ctx = dc->ctx;
121 	struct dce_hwseq *hws = dc->hwseq;
122 
123 	if (REG(MPC_CRC_RESULT_GB))
124 		DTN_INFO("MPC_CRC_RESULT_GB:%d MPC_CRC_RESULT_C:%d MPC_CRC_RESULT_AR:%d\n",
125 		REG_READ(MPC_CRC_RESULT_GB), REG_READ(MPC_CRC_RESULT_C), REG_READ(MPC_CRC_RESULT_AR));
126 	if (REG(DPP_TOP0_DPP_CRC_VAL_B_A))
127 		DTN_INFO("DPP_TOP0_DPP_CRC_VAL_B_A:%d DPP_TOP0_DPP_CRC_VAL_R_G:%d\n",
128 		REG_READ(DPP_TOP0_DPP_CRC_VAL_B_A), REG_READ(DPP_TOP0_DPP_CRC_VAL_R_G));
129 }
130 
dcn10_log_hubbub_state(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)131 void dcn10_log_hubbub_state(struct dc *dc, struct dc_log_buffer_ctx *log_ctx)
132 {
133 	struct dc_context *dc_ctx = dc->ctx;
134 	struct dcn_hubbub_wm wm;
135 	int i;
136 
137 	memset(&wm, 0, sizeof(struct dcn_hubbub_wm));
138 	dc->res_pool->hubbub->funcs->wm_read_state(dc->res_pool->hubbub, &wm);
139 
140 	DTN_INFO("HUBBUB WM:      data_urgent  pte_meta_urgent"
141 			"         sr_enter          sr_exit  dram_clk_change\n");
142 
143 	for (i = 0; i < 4; i++) {
144 		struct dcn_hubbub_wm_set *s;
145 
146 		s = &wm.sets[i];
147 		DTN_INFO("WM_Set[%d]:", s->wm_set);
148 		DTN_INFO_MICRO_SEC(s->data_urgent);
149 		DTN_INFO_MICRO_SEC(s->pte_meta_urgent);
150 		DTN_INFO_MICRO_SEC(s->sr_enter);
151 		DTN_INFO_MICRO_SEC(s->sr_exit);
152 		DTN_INFO_MICRO_SEC(s->dram_clk_chanage);
153 		DTN_INFO("\n");
154 	}
155 
156 	DTN_INFO("\n");
157 }
158 
dcn10_log_hubp_states(struct dc * dc,void * log_ctx)159 static void dcn10_log_hubp_states(struct dc *dc, void *log_ctx)
160 {
161 	struct dc_context *dc_ctx = dc->ctx;
162 	struct resource_pool *pool = dc->res_pool;
163 	int i;
164 
165 	DTN_INFO(
166 		"HUBP:  format  addr_hi  width  height  rot  mir  sw_mode  dcc_en  blank_en  clock_en  ttu_dis  underflow   min_ttu_vblank       qos_low_wm      qos_high_wm\n");
167 	for (i = 0; i < pool->pipe_count; i++) {
168 		struct hubp *hubp = pool->hubps[i];
169 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(hubp)->state);
170 
171 		hubp->funcs->hubp_read_state(hubp);
172 
173 		if (!s->blank_en) {
174 			DTN_INFO("[%2d]:  %5xh  %6xh  %5d  %6d  %2xh  %2xh  %6xh  %6d  %8d  %8d  %7d  %8xh",
175 					hubp->inst,
176 					s->pixel_format,
177 					s->inuse_addr_hi,
178 					s->viewport_width,
179 					s->viewport_height,
180 					s->rotation_angle,
181 					s->h_mirror_en,
182 					s->sw_mode,
183 					s->dcc_en,
184 					s->blank_en,
185 					s->clock_en,
186 					s->ttu_disable,
187 					s->underflow_status);
188 			DTN_INFO_MICRO_SEC(s->min_ttu_vblank);
189 			DTN_INFO_MICRO_SEC(s->qos_level_low_wm);
190 			DTN_INFO_MICRO_SEC(s->qos_level_high_wm);
191 			DTN_INFO("\n");
192 		}
193 	}
194 
195 	DTN_INFO("\n=========RQ========\n");
196 	DTN_INFO("HUBP:  drq_exp_m  prq_exp_m  mrq_exp_m  crq_exp_m  plane1_ba  L:chunk_s  min_chu_s  meta_ch_s"
197 		"  min_m_c_s  dpte_gr_s  mpte_gr_s  swath_hei  pte_row_h  C:chunk_s  min_chu_s  meta_ch_s"
198 		"  min_m_c_s  dpte_gr_s  mpte_gr_s  swath_hei  pte_row_h\n");
199 	for (i = 0; i < pool->pipe_count; i++) {
200 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
201 		struct _vcs_dpi_display_rq_regs_st *rq_regs = &s->rq_regs;
202 
203 		if (!s->blank_en)
204 			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
205 				pool->hubps[i]->inst, rq_regs->drq_expansion_mode, rq_regs->prq_expansion_mode, rq_regs->mrq_expansion_mode,
206 				rq_regs->crq_expansion_mode, rq_regs->plane1_base_address, rq_regs->rq_regs_l.chunk_size,
207 				rq_regs->rq_regs_l.min_chunk_size, rq_regs->rq_regs_l.meta_chunk_size,
208 				rq_regs->rq_regs_l.min_meta_chunk_size, rq_regs->rq_regs_l.dpte_group_size,
209 				rq_regs->rq_regs_l.mpte_group_size, rq_regs->rq_regs_l.swath_height,
210 				rq_regs->rq_regs_l.pte_row_height_linear, rq_regs->rq_regs_c.chunk_size, rq_regs->rq_regs_c.min_chunk_size,
211 				rq_regs->rq_regs_c.meta_chunk_size, rq_regs->rq_regs_c.min_meta_chunk_size,
212 				rq_regs->rq_regs_c.dpte_group_size, rq_regs->rq_regs_c.mpte_group_size,
213 				rq_regs->rq_regs_c.swath_height, rq_regs->rq_regs_c.pte_row_height_linear);
214 	}
215 
216 	DTN_INFO("========DLG========\n");
217 	DTN_INFO("HUBP:  rc_hbe     dlg_vbe    min_d_y_n  rc_per_ht  rc_x_a_s "
218 			"  dst_y_a_s  dst_y_pf   dst_y_vvb  dst_y_rvb  dst_y_vfl  dst_y_rfl  rf_pix_fq"
219 			"  vratio_pf  vrat_pf_c  rc_pg_vbl  rc_pg_vbc  rc_mc_vbl  rc_mc_vbc  rc_pg_fll"
220 			"  rc_pg_flc  rc_mc_fll  rc_mc_flc  pr_nom_l   pr_nom_c   rc_pg_nl   rc_pg_nc "
221 			"  mr_nom_l   mr_nom_c   rc_mc_nl   rc_mc_nc   rc_ld_pl   rc_ld_pc   rc_ld_l  "
222 			"  rc_ld_c    cha_cur0   ofst_cur1  cha_cur1   vr_af_vc0  ddrq_limt  x_rt_dlay"
223 			"  x_rp_dlay  x_rr_sfl\n");
224 	for (i = 0; i < pool->pipe_count; i++) {
225 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
226 		struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &s->dlg_attr;
227 
228 		if (!s->blank_en)
229 			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh"
230 				"%  8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh"
231 				"  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
232 				pool->hubps[i]->inst, dlg_regs->refcyc_h_blank_end, dlg_regs->dlg_vblank_end, dlg_regs->min_dst_y_next_start,
233 				dlg_regs->refcyc_per_htotal, dlg_regs->refcyc_x_after_scaler, dlg_regs->dst_y_after_scaler,
234 				dlg_regs->dst_y_prefetch, dlg_regs->dst_y_per_vm_vblank, dlg_regs->dst_y_per_row_vblank,
235 				dlg_regs->dst_y_per_vm_flip, dlg_regs->dst_y_per_row_flip, dlg_regs->ref_freq_to_pix_freq,
236 				dlg_regs->vratio_prefetch, dlg_regs->vratio_prefetch_c, dlg_regs->refcyc_per_pte_group_vblank_l,
237 				dlg_regs->refcyc_per_pte_group_vblank_c, dlg_regs->refcyc_per_meta_chunk_vblank_l,
238 				dlg_regs->refcyc_per_meta_chunk_vblank_c, dlg_regs->refcyc_per_pte_group_flip_l,
239 				dlg_regs->refcyc_per_pte_group_flip_c, dlg_regs->refcyc_per_meta_chunk_flip_l,
240 				dlg_regs->refcyc_per_meta_chunk_flip_c, dlg_regs->dst_y_per_pte_row_nom_l,
241 				dlg_regs->dst_y_per_pte_row_nom_c, dlg_regs->refcyc_per_pte_group_nom_l,
242 				dlg_regs->refcyc_per_pte_group_nom_c, dlg_regs->dst_y_per_meta_row_nom_l,
243 				dlg_regs->dst_y_per_meta_row_nom_c, dlg_regs->refcyc_per_meta_chunk_nom_l,
244 				dlg_regs->refcyc_per_meta_chunk_nom_c, dlg_regs->refcyc_per_line_delivery_pre_l,
245 				dlg_regs->refcyc_per_line_delivery_pre_c, dlg_regs->refcyc_per_line_delivery_l,
246 				dlg_regs->refcyc_per_line_delivery_c, dlg_regs->chunk_hdl_adjust_cur0, dlg_regs->dst_y_offset_cur1,
247 				dlg_regs->chunk_hdl_adjust_cur1, dlg_regs->vready_after_vcount0, dlg_regs->dst_y_delta_drq_limit,
248 				dlg_regs->xfc_reg_transfer_delay, dlg_regs->xfc_reg_precharge_delay,
249 				dlg_regs->xfc_reg_remote_surface_flip_latency);
250 	}
251 
252 	DTN_INFO("========TTU========\n");
253 	DTN_INFO("HUBP:  qos_ll_wm  qos_lh_wm  mn_ttu_vb  qos_l_flp  rc_rd_p_l  rc_rd_l    rc_rd_p_c"
254 			"  rc_rd_c    rc_rd_c0   rc_rd_pc0  rc_rd_c1   rc_rd_pc1  qos_lf_l   qos_rds_l"
255 			"  qos_lf_c   qos_rds_c  qos_lf_c0  qos_rds_c0 qos_lf_c1  qos_rds_c1\n");
256 	for (i = 0; i < pool->pipe_count; i++) {
257 		struct dcn_hubp_state *s = &(TO_DCN10_HUBP(pool->hubps[i])->state);
258 		struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &s->ttu_attr;
259 
260 		if (!s->blank_en)
261 			DTN_INFO("[%2d]:  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh  %8xh\n",
262 				pool->hubps[i]->inst, ttu_regs->qos_level_low_wm, ttu_regs->qos_level_high_wm, ttu_regs->min_ttu_vblank,
263 				ttu_regs->qos_level_flip, ttu_regs->refcyc_per_req_delivery_pre_l, ttu_regs->refcyc_per_req_delivery_l,
264 				ttu_regs->refcyc_per_req_delivery_pre_c, ttu_regs->refcyc_per_req_delivery_c, ttu_regs->refcyc_per_req_delivery_cur0,
265 				ttu_regs->refcyc_per_req_delivery_pre_cur0, ttu_regs->refcyc_per_req_delivery_cur1,
266 				ttu_regs->refcyc_per_req_delivery_pre_cur1, ttu_regs->qos_level_fixed_l, ttu_regs->qos_ramp_disable_l,
267 				ttu_regs->qos_level_fixed_c, ttu_regs->qos_ramp_disable_c, ttu_regs->qos_level_fixed_cur0,
268 				ttu_regs->qos_ramp_disable_cur0, ttu_regs->qos_level_fixed_cur1, ttu_regs->qos_ramp_disable_cur1);
269 	}
270 	DTN_INFO("\n");
271 }
272 
dcn10_log_hw_state(struct dc * dc,struct dc_log_buffer_ctx * log_ctx)273 void dcn10_log_hw_state(struct dc *dc,
274 	struct dc_log_buffer_ctx *log_ctx)
275 {
276 	struct dc_context *dc_ctx = dc->ctx;
277 	struct resource_pool *pool = dc->res_pool;
278 	int i;
279 
280 	DTN_INFO_BEGIN();
281 
282 	dcn10_log_hubbub_state(dc, log_ctx);
283 
284 	dcn10_log_hubp_states(dc, log_ctx);
285 
286 	DTN_INFO("DPP:    IGAM format  IGAM mode    DGAM mode    RGAM mode"
287 			"  GAMUT mode  C11 C12   C13 C14   C21 C22   C23 C24   "
288 			"C31 C32   C33 C34\n");
289 	for (i = 0; i < pool->pipe_count; i++) {
290 		struct dpp *dpp = pool->dpps[i];
291 		struct dcn_dpp_state s = {0};
292 
293 		dpp->funcs->dpp_read_state(dpp, &s);
294 
295 		if (!s.is_enabled)
296 			continue;
297 
298 		DTN_INFO("[%2d]:  %11xh  %-11s  %-11s  %-11s"
299 				"%8x    %08xh %08xh %08xh %08xh %08xh %08xh",
300 				dpp->inst,
301 				s.igam_input_format,
302 				(s.igam_lut_mode == 0) ? "BypassFixed" :
303 					((s.igam_lut_mode == 1) ? "BypassFloat" :
304 					((s.igam_lut_mode == 2) ? "RAM" :
305 					((s.igam_lut_mode == 3) ? "RAM" :
306 								 "Unknown"))),
307 				(s.dgam_lut_mode == 0) ? "Bypass" :
308 					((s.dgam_lut_mode == 1) ? "sRGB" :
309 					((s.dgam_lut_mode == 2) ? "Ycc" :
310 					((s.dgam_lut_mode == 3) ? "RAM" :
311 					((s.dgam_lut_mode == 4) ? "RAM" :
312 								 "Unknown")))),
313 				(s.rgam_lut_mode == 0) ? "Bypass" :
314 					((s.rgam_lut_mode == 1) ? "sRGB" :
315 					((s.rgam_lut_mode == 2) ? "Ycc" :
316 					((s.rgam_lut_mode == 3) ? "RAM" :
317 					((s.rgam_lut_mode == 4) ? "RAM" :
318 								 "Unknown")))),
319 				s.gamut_remap_mode,
320 				s.gamut_remap_c11_c12,
321 				s.gamut_remap_c13_c14,
322 				s.gamut_remap_c21_c22,
323 				s.gamut_remap_c23_c24,
324 				s.gamut_remap_c31_c32,
325 				s.gamut_remap_c33_c34);
326 		DTN_INFO("\n");
327 	}
328 	DTN_INFO("\n");
329 
330 	DTN_INFO("MPCC:  OPP  DPP  MPCCBOT  MODE  ALPHA_MODE  PREMULT  OVERLAP_ONLY  IDLE\n");
331 	for (i = 0; i < pool->pipe_count; i++) {
332 		struct mpcc_state s = {0};
333 
334 		pool->mpc->funcs->read_mpcc_state(pool->mpc, i, &s);
335 		if (s.opp_id != 0xf)
336 			DTN_INFO("[%2d]:  %2xh  %2xh  %6xh  %4d  %10d  %7d  %12d  %4d\n",
337 				i, s.opp_id, s.dpp_id, s.bot_mpcc_id,
338 				s.mode, s.alpha_mode, s.pre_multiplied_alpha, s.overlap_only,
339 				s.idle);
340 	}
341 	DTN_INFO("\n");
342 
343 	DTN_INFO("OTG:  v_bs  v_be  v_ss  v_se  vpol  vmax  vmin  vmax_sel  vmin_sel  h_bs  h_be  h_ss  h_se  hpol  htot  vtot  underflow blank_en\n");
344 
345 	for (i = 0; i < pool->timing_generator_count; i++) {
346 		struct timing_generator *tg = pool->timing_generators[i];
347 		struct dcn_otg_state s = {0};
348 		/* Read shared OTG state registers for all DCNx */
349 		optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s);
350 
351 		/*
352 		 * For DCN2 and greater, a register on the OPP is used to
353 		 * determine if the CRTC is blanked instead of the OTG. So use
354 		 * dpg_is_blanked() if exists, otherwise fallback on otg.
355 		 *
356 		 * TODO: Implement DCN-specific read_otg_state hooks.
357 		 */
358 		if (pool->opps[i]->funcs->dpg_is_blanked)
359 			s.blank_enabled = pool->opps[i]->funcs->dpg_is_blanked(pool->opps[i]);
360 		else
361 			s.blank_enabled = tg->funcs->is_blanked(tg);
362 
363 		//only print if OTG master is enabled
364 		if ((s.otg_enabled & 1) == 0)
365 			continue;
366 
367 		DTN_INFO("[%d]: %5d %5d %5d %5d %5d %5d %5d %9d %9d %5d %5d %5d %5d %5d %5d %5d  %9d %8d\n",
368 				tg->inst,
369 				s.v_blank_start,
370 				s.v_blank_end,
371 				s.v_sync_a_start,
372 				s.v_sync_a_end,
373 				s.v_sync_a_pol,
374 				s.v_total_max,
375 				s.v_total_min,
376 				s.v_total_max_sel,
377 				s.v_total_min_sel,
378 				s.h_blank_start,
379 				s.h_blank_end,
380 				s.h_sync_a_start,
381 				s.h_sync_a_end,
382 				s.h_sync_a_pol,
383 				s.h_total,
384 				s.v_total,
385 				s.underflow_occurred_status,
386 				s.blank_enabled);
387 
388 		// Clear underflow for debug purposes
389 		// We want to keep underflow sticky bit on for the longevity tests outside of test environment.
390 		// This function is called only from Windows or Diags test environment, hence it's safe to clear
391 		// it from here without affecting the original intent.
392 		tg->funcs->clear_optc_underflow(tg);
393 	}
394 	DTN_INFO("\n");
395 
396 	DTN_INFO("DSC: CLOCK_EN  SLICE_WIDTH  Bytes_pp\n");
397 	for (i = 0; i < pool->res_cap->num_dsc; i++) {
398 		struct display_stream_compressor *dsc = pool->dscs[i];
399 		struct dcn_dsc_state s = {0};
400 
401 		dsc->funcs->dsc_read_state(dsc, &s);
402 		DTN_INFO("[%d]: %-9d %-12d %-10d\n",
403 		dsc->inst,
404 			s.dsc_clock_en,
405 			s.dsc_slice_width,
406 			s.dsc_bytes_per_pixel);
407 		DTN_INFO("\n");
408 	}
409 	DTN_INFO("\n");
410 
411 	DTN_INFO("S_ENC: DSC_MODE  SEC_GSP7_LINE_NUM"
412 			"  VBID6_LINE_REFERENCE  VBID6_LINE_NUM  SEC_GSP7_ENABLE  SEC_STREAM_ENABLE\n");
413 	for (i = 0; i < pool->stream_enc_count; i++) {
414 		struct stream_encoder *enc = pool->stream_enc[i];
415 		struct enc_state s = {0};
416 
417 		if (enc->funcs->enc_read_state) {
418 			enc->funcs->enc_read_state(enc, &s);
419 			DTN_INFO("[%-3d]: %-9d %-18d %-21d %-15d %-16d %-17d\n",
420 				enc->id,
421 				s.dsc_mode,
422 				s.sec_gsp_pps_line_num,
423 				s.vbid6_line_reference,
424 				s.vbid6_line_num,
425 				s.sec_gsp_pps_enable,
426 				s.sec_stream_enable);
427 			DTN_INFO("\n");
428 		}
429 	}
430 	DTN_INFO("\n");
431 
432 	DTN_INFO("L_ENC: DPHY_FEC_EN  DPHY_FEC_READY_SHADOW  DPHY_FEC_ACTIVE_STATUS  DP_LINK_TRAINING_COMPLETE\n");
433 	for (i = 0; i < dc->link_count; i++) {
434 		struct link_encoder *lenc = dc->links[i]->link_enc;
435 
436 		struct link_enc_state s = {0};
437 
438 		if (lenc->funcs->read_state) {
439 			lenc->funcs->read_state(lenc, &s);
440 			DTN_INFO("[%-3d]: %-12d %-22d %-22d %-25d\n",
441 				i,
442 				s.dphy_fec_en,
443 				s.dphy_fec_ready_shadow,
444 				s.dphy_fec_active_status,
445 				s.dp_link_training_complete);
446 			DTN_INFO("\n");
447 		}
448 	}
449 	DTN_INFO("\n");
450 
451 	DTN_INFO("\nCALCULATED Clocks: dcfclk_khz:%d  dcfclk_deep_sleep_khz:%d  dispclk_khz:%d\n"
452 		"dppclk_khz:%d  max_supported_dppclk_khz:%d  fclk_khz:%d  socclk_khz:%d\n\n",
453 			dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_khz,
454 			dc->current_state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz,
455 			dc->current_state->bw_ctx.bw.dcn.clk.dispclk_khz,
456 			dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz,
457 			dc->current_state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz,
458 			dc->current_state->bw_ctx.bw.dcn.clk.fclk_khz,
459 			dc->current_state->bw_ctx.bw.dcn.clk.socclk_khz);
460 
461 	log_mpc_crc(dc, log_ctx);
462 
463 	DTN_INFO_END();
464 }
465 
dcn10_did_underflow_occur(struct dc * dc,struct pipe_ctx * pipe_ctx)466 bool dcn10_did_underflow_occur(struct dc *dc, struct pipe_ctx *pipe_ctx)
467 {
468 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
469 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
470 
471 	if (tg->funcs->is_optc_underflow_occurred(tg)) {
472 		tg->funcs->clear_optc_underflow(tg);
473 		return true;
474 	}
475 
476 	if (hubp->funcs->hubp_get_underflow_status(hubp)) {
477 		hubp->funcs->hubp_clear_underflow(hubp);
478 		return true;
479 	}
480 	return false;
481 }
482 
dcn10_enable_power_gating_plane(struct dce_hwseq * hws,bool enable)483 void dcn10_enable_power_gating_plane(
484 	struct dce_hwseq *hws,
485 	bool enable)
486 {
487 	bool force_on = true; /* disable power gating */
488 
489 	if (enable)
490 		force_on = false;
491 
492 	/* DCHUBP0/1/2/3 */
493 	REG_UPDATE(DOMAIN0_PG_CONFIG, DOMAIN0_POWER_FORCEON, force_on);
494 	REG_UPDATE(DOMAIN2_PG_CONFIG, DOMAIN2_POWER_FORCEON, force_on);
495 	REG_UPDATE(DOMAIN4_PG_CONFIG, DOMAIN4_POWER_FORCEON, force_on);
496 	REG_UPDATE(DOMAIN6_PG_CONFIG, DOMAIN6_POWER_FORCEON, force_on);
497 
498 	/* DPP0/1/2/3 */
499 	REG_UPDATE(DOMAIN1_PG_CONFIG, DOMAIN1_POWER_FORCEON, force_on);
500 	REG_UPDATE(DOMAIN3_PG_CONFIG, DOMAIN3_POWER_FORCEON, force_on);
501 	REG_UPDATE(DOMAIN5_PG_CONFIG, DOMAIN5_POWER_FORCEON, force_on);
502 	REG_UPDATE(DOMAIN7_PG_CONFIG, DOMAIN7_POWER_FORCEON, force_on);
503 }
504 
dcn10_disable_vga(struct dce_hwseq * hws)505 void dcn10_disable_vga(
506 	struct dce_hwseq *hws)
507 {
508 	unsigned int in_vga1_mode = 0;
509 	unsigned int in_vga2_mode = 0;
510 	unsigned int in_vga3_mode = 0;
511 	unsigned int in_vga4_mode = 0;
512 
513 	REG_GET(D1VGA_CONTROL, D1VGA_MODE_ENABLE, &in_vga1_mode);
514 	REG_GET(D2VGA_CONTROL, D2VGA_MODE_ENABLE, &in_vga2_mode);
515 	REG_GET(D3VGA_CONTROL, D3VGA_MODE_ENABLE, &in_vga3_mode);
516 	REG_GET(D4VGA_CONTROL, D4VGA_MODE_ENABLE, &in_vga4_mode);
517 
518 	if (in_vga1_mode == 0 && in_vga2_mode == 0 &&
519 			in_vga3_mode == 0 && in_vga4_mode == 0)
520 		return;
521 
522 	REG_WRITE(D1VGA_CONTROL, 0);
523 	REG_WRITE(D2VGA_CONTROL, 0);
524 	REG_WRITE(D3VGA_CONTROL, 0);
525 	REG_WRITE(D4VGA_CONTROL, 0);
526 
527 	/* HW Engineer's Notes:
528 	 *  During switch from vga->extended, if we set the VGA_TEST_ENABLE and
529 	 *  then hit the VGA_TEST_RENDER_START, then the DCHUBP timing gets updated correctly.
530 	 *
531 	 *  Then vBIOS will have it poll for the VGA_TEST_RENDER_DONE and unset
532 	 *  VGA_TEST_ENABLE, to leave it in the same state as before.
533 	 */
534 	REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_ENABLE, 1);
535 	REG_UPDATE(VGA_TEST_CONTROL, VGA_TEST_RENDER_START, 1);
536 }
537 
dcn10_dpp_pg_control(struct dce_hwseq * hws,unsigned int dpp_inst,bool power_on)538 void dcn10_dpp_pg_control(
539 		struct dce_hwseq *hws,
540 		unsigned int dpp_inst,
541 		bool power_on)
542 {
543 	uint32_t power_gate = power_on ? 0 : 1;
544 	uint32_t pwr_status = power_on ? 0 : 2;
545 
546 	if (hws->ctx->dc->debug.disable_dpp_power_gate)
547 		return;
548 	if (REG(DOMAIN1_PG_CONFIG) == 0)
549 		return;
550 
551 	switch (dpp_inst) {
552 	case 0: /* DPP0 */
553 		REG_UPDATE(DOMAIN1_PG_CONFIG,
554 				DOMAIN1_POWER_GATE, power_gate);
555 
556 		REG_WAIT(DOMAIN1_PG_STATUS,
557 				DOMAIN1_PGFSM_PWR_STATUS, pwr_status,
558 				1, 1000);
559 		break;
560 	case 1: /* DPP1 */
561 		REG_UPDATE(DOMAIN3_PG_CONFIG,
562 				DOMAIN3_POWER_GATE, power_gate);
563 
564 		REG_WAIT(DOMAIN3_PG_STATUS,
565 				DOMAIN3_PGFSM_PWR_STATUS, pwr_status,
566 				1, 1000);
567 		break;
568 	case 2: /* DPP2 */
569 		REG_UPDATE(DOMAIN5_PG_CONFIG,
570 				DOMAIN5_POWER_GATE, power_gate);
571 
572 		REG_WAIT(DOMAIN5_PG_STATUS,
573 				DOMAIN5_PGFSM_PWR_STATUS, pwr_status,
574 				1, 1000);
575 		break;
576 	case 3: /* DPP3 */
577 		REG_UPDATE(DOMAIN7_PG_CONFIG,
578 				DOMAIN7_POWER_GATE, power_gate);
579 
580 		REG_WAIT(DOMAIN7_PG_STATUS,
581 				DOMAIN7_PGFSM_PWR_STATUS, pwr_status,
582 				1, 1000);
583 		break;
584 	default:
585 		BREAK_TO_DEBUGGER();
586 		break;
587 	}
588 }
589 
dcn10_hubp_pg_control(struct dce_hwseq * hws,unsigned int hubp_inst,bool power_on)590 void dcn10_hubp_pg_control(
591 		struct dce_hwseq *hws,
592 		unsigned int hubp_inst,
593 		bool power_on)
594 {
595 	uint32_t power_gate = power_on ? 0 : 1;
596 	uint32_t pwr_status = power_on ? 0 : 2;
597 
598 	if (hws->ctx->dc->debug.disable_hubp_power_gate)
599 		return;
600 	if (REG(DOMAIN0_PG_CONFIG) == 0)
601 		return;
602 
603 	switch (hubp_inst) {
604 	case 0: /* DCHUBP0 */
605 		REG_UPDATE(DOMAIN0_PG_CONFIG,
606 				DOMAIN0_POWER_GATE, power_gate);
607 
608 		REG_WAIT(DOMAIN0_PG_STATUS,
609 				DOMAIN0_PGFSM_PWR_STATUS, pwr_status,
610 				1, 1000);
611 		break;
612 	case 1: /* DCHUBP1 */
613 		REG_UPDATE(DOMAIN2_PG_CONFIG,
614 				DOMAIN2_POWER_GATE, power_gate);
615 
616 		REG_WAIT(DOMAIN2_PG_STATUS,
617 				DOMAIN2_PGFSM_PWR_STATUS, pwr_status,
618 				1, 1000);
619 		break;
620 	case 2: /* DCHUBP2 */
621 		REG_UPDATE(DOMAIN4_PG_CONFIG,
622 				DOMAIN4_POWER_GATE, power_gate);
623 
624 		REG_WAIT(DOMAIN4_PG_STATUS,
625 				DOMAIN4_PGFSM_PWR_STATUS, pwr_status,
626 				1, 1000);
627 		break;
628 	case 3: /* DCHUBP3 */
629 		REG_UPDATE(DOMAIN6_PG_CONFIG,
630 				DOMAIN6_POWER_GATE, power_gate);
631 
632 		REG_WAIT(DOMAIN6_PG_STATUS,
633 				DOMAIN6_PGFSM_PWR_STATUS, pwr_status,
634 				1, 1000);
635 		break;
636 	default:
637 		BREAK_TO_DEBUGGER();
638 		break;
639 	}
640 }
641 
power_on_plane(struct dce_hwseq * hws,int plane_id)642 static void power_on_plane(
643 	struct dce_hwseq *hws,
644 	int plane_id)
645 {
646 	DC_LOGGER_INIT(hws->ctx->logger);
647 	if (REG(DC_IP_REQUEST_CNTL)) {
648 		REG_SET(DC_IP_REQUEST_CNTL, 0,
649 				IP_REQUEST_EN, 1);
650 		hws->funcs.dpp_pg_control(hws, plane_id, true);
651 		hws->funcs.hubp_pg_control(hws, plane_id, true);
652 		REG_SET(DC_IP_REQUEST_CNTL, 0,
653 				IP_REQUEST_EN, 0);
654 		DC_LOG_DEBUG(
655 				"Un-gated front end for pipe %d\n", plane_id);
656 	}
657 }
658 
undo_DEGVIDCN10_253_wa(struct dc * dc)659 static void undo_DEGVIDCN10_253_wa(struct dc *dc)
660 {
661 	struct dce_hwseq *hws = dc->hwseq;
662 	struct hubp *hubp = dc->res_pool->hubps[0];
663 
664 	if (!hws->wa_state.DEGVIDCN10_253_applied)
665 		return;
666 
667 	hubp->funcs->set_blank(hubp, true);
668 
669 	REG_SET(DC_IP_REQUEST_CNTL, 0,
670 			IP_REQUEST_EN, 1);
671 
672 	hws->funcs.hubp_pg_control(hws, 0, false);
673 	REG_SET(DC_IP_REQUEST_CNTL, 0,
674 			IP_REQUEST_EN, 0);
675 
676 	hws->wa_state.DEGVIDCN10_253_applied = false;
677 }
678 
apply_DEGVIDCN10_253_wa(struct dc * dc)679 static void apply_DEGVIDCN10_253_wa(struct dc *dc)
680 {
681 	struct dce_hwseq *hws = dc->hwseq;
682 	struct hubp *hubp = dc->res_pool->hubps[0];
683 	int i;
684 
685 	if (dc->debug.disable_stutter)
686 		return;
687 
688 	if (!hws->wa.DEGVIDCN10_253)
689 		return;
690 
691 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
692 		if (!dc->res_pool->hubps[i]->power_gated)
693 			return;
694 	}
695 
696 	/* all pipe power gated, apply work around to enable stutter. */
697 
698 	REG_SET(DC_IP_REQUEST_CNTL, 0,
699 			IP_REQUEST_EN, 1);
700 
701 	hws->funcs.hubp_pg_control(hws, 0, true);
702 	REG_SET(DC_IP_REQUEST_CNTL, 0,
703 			IP_REQUEST_EN, 0);
704 
705 	hubp->funcs->set_hubp_blank_en(hubp, false);
706 	hws->wa_state.DEGVIDCN10_253_applied = true;
707 }
708 
dcn10_bios_golden_init(struct dc * dc)709 void dcn10_bios_golden_init(struct dc *dc)
710 {
711 	struct dce_hwseq *hws = dc->hwseq;
712 	struct dc_bios *bp = dc->ctx->dc_bios;
713 	int i;
714 	bool allow_self_fresh_force_enable = true;
715 
716 	if (hws->funcs.s0i3_golden_init_wa && hws->funcs.s0i3_golden_init_wa(dc))
717 		return;
718 
719 	if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled)
720 		allow_self_fresh_force_enable =
721 				dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub);
722 
723 
724 	/* WA for making DF sleep when idle after resume from S0i3.
725 	 * DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE is set to 1 by
726 	 * command table, if DCHUBBUB_ARB_ALLOW_SELF_REFRESH_FORCE_ENABLE = 0
727 	 * before calling command table and it changed to 1 after,
728 	 * it should be set back to 0.
729 	 */
730 
731 	/* initialize dcn global */
732 	bp->funcs->enable_disp_power_gating(bp,
733 			CONTROLLER_ID_D0, ASIC_PIPE_INIT);
734 
735 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
736 		/* initialize dcn per pipe */
737 		bp->funcs->enable_disp_power_gating(bp,
738 				CONTROLLER_ID_D0 + i, ASIC_PIPE_DISABLE);
739 	}
740 
741 	if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
742 		if (allow_self_fresh_force_enable == false &&
743 				dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub))
744 			dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, true);
745 
746 }
747 
false_optc_underflow_wa(struct dc * dc,const struct dc_stream_state * stream,struct timing_generator * tg)748 static void false_optc_underflow_wa(
749 		struct dc *dc,
750 		const struct dc_stream_state *stream,
751 		struct timing_generator *tg)
752 {
753 	int i;
754 	bool underflow;
755 
756 	if (!dc->hwseq->wa.false_optc_underflow)
757 		return;
758 
759 	underflow = tg->funcs->is_optc_underflow_occurred(tg);
760 
761 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
762 		struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
763 
764 		if (old_pipe_ctx->stream != stream)
765 			continue;
766 
767 		dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, old_pipe_ctx);
768 	}
769 
770 	if (tg->funcs->set_blank_data_double_buffer)
771 		tg->funcs->set_blank_data_double_buffer(tg, true);
772 
773 	if (tg->funcs->is_optc_underflow_occurred(tg) && !underflow)
774 		tg->funcs->clear_optc_underflow(tg);
775 }
776 
dcn10_enable_stream_timing(struct pipe_ctx * pipe_ctx,struct dc_state * context,struct dc * dc)777 enum dc_status dcn10_enable_stream_timing(
778 		struct pipe_ctx *pipe_ctx,
779 		struct dc_state *context,
780 		struct dc *dc)
781 {
782 	struct dc_stream_state *stream = pipe_ctx->stream;
783 	enum dc_color_space color_space;
784 	struct tg_color black_color = {0};
785 
786 	/* by upper caller loop, pipe0 is parent pipe and be called first.
787 	 * back end is set up by for pipe0. Other children pipe share back end
788 	 * with pipe 0. No program is needed.
789 	 */
790 	if (pipe_ctx->top_pipe != NULL)
791 		return DC_OK;
792 
793 	/* TODO check if timing_changed, disable stream if timing changed */
794 
795 	/* HW program guide assume display already disable
796 	 * by unplug sequence. OTG assume stop.
797 	 */
798 	pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, true);
799 
800 	if (false == pipe_ctx->clock_source->funcs->program_pix_clk(
801 			pipe_ctx->clock_source,
802 			&pipe_ctx->stream_res.pix_clk_params,
803 			&pipe_ctx->pll_settings)) {
804 		BREAK_TO_DEBUGGER();
805 		return DC_ERROR_UNEXPECTED;
806 	}
807 
808 	pipe_ctx->stream_res.tg->funcs->program_timing(
809 			pipe_ctx->stream_res.tg,
810 			&stream->timing,
811 			pipe_ctx->pipe_dlg_param.vready_offset,
812 			pipe_ctx->pipe_dlg_param.vstartup_start,
813 			pipe_ctx->pipe_dlg_param.vupdate_offset,
814 			pipe_ctx->pipe_dlg_param.vupdate_width,
815 			pipe_ctx->stream->signal,
816 			true);
817 
818 #if 0 /* move to after enable_crtc */
819 	/* TODO: OPP FMT, ABM. etc. should be done here. */
820 	/* or FPGA now. instance 0 only. TODO: move to opp.c */
821 
822 	inst_offset = reg_offsets[pipe_ctx->stream_res.tg->inst].fmt;
823 
824 	pipe_ctx->stream_res.opp->funcs->opp_program_fmt(
825 				pipe_ctx->stream_res.opp,
826 				&stream->bit_depth_params,
827 				&stream->clamping);
828 #endif
829 	/* program otg blank color */
830 	color_space = stream->output_color_space;
831 	color_space_to_black_color(dc, color_space, &black_color);
832 
833 	if (pipe_ctx->stream_res.tg->funcs->set_blank_color)
834 		pipe_ctx->stream_res.tg->funcs->set_blank_color(
835 				pipe_ctx->stream_res.tg,
836 				&black_color);
837 
838 	if (pipe_ctx->stream_res.tg->funcs->is_blanked &&
839 			!pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) {
840 		pipe_ctx->stream_res.tg->funcs->set_blank(pipe_ctx->stream_res.tg, true);
841 		hwss_wait_for_blank_complete(pipe_ctx->stream_res.tg);
842 		false_optc_underflow_wa(dc, pipe_ctx->stream, pipe_ctx->stream_res.tg);
843 	}
844 
845 	/* VTG is  within DCHUB command block. DCFCLK is always on */
846 	if (false == pipe_ctx->stream_res.tg->funcs->enable_crtc(pipe_ctx->stream_res.tg)) {
847 		BREAK_TO_DEBUGGER();
848 		return DC_ERROR_UNEXPECTED;
849 	}
850 
851 	/* TODO program crtc source select for non-virtual signal*/
852 	/* TODO program FMT */
853 	/* TODO setup link_enc */
854 	/* TODO set stream attributes */
855 	/* TODO program audio */
856 	/* TODO enable stream if timing changed */
857 	/* TODO unblank stream if DP */
858 
859 	return DC_OK;
860 }
861 
dcn10_reset_back_end_for_pipe(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)862 static void dcn10_reset_back_end_for_pipe(
863 		struct dc *dc,
864 		struct pipe_ctx *pipe_ctx,
865 		struct dc_state *context)
866 {
867 	int i;
868 	struct dc_link *link;
869 	DC_LOGGER_INIT(dc->ctx->logger);
870 	if (pipe_ctx->stream_res.stream_enc == NULL) {
871 		pipe_ctx->stream = NULL;
872 		return;
873 	}
874 
875 	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
876 		link = pipe_ctx->stream->link;
877 		/* DPMS may already disable or */
878 		/* dpms_off status is incorrect due to fastboot
879 		 * feature. When system resume from S4 with second
880 		 * screen only, the dpms_off would be true but
881 		 * VBIOS lit up eDP, so check link status too.
882 		 */
883 		if (!pipe_ctx->stream->dpms_off || link->link_status.link_active)
884 			core_link_disable_stream(pipe_ctx);
885 		else if (pipe_ctx->stream_res.audio)
886 			dc->hwss.disable_audio_stream(pipe_ctx);
887 
888 		if (pipe_ctx->stream_res.audio) {
889 			/*disable az_endpoint*/
890 			pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
891 
892 			/*free audio*/
893 			if (dc->caps.dynamic_audio == true) {
894 				/*we have to dynamic arbitrate the audio endpoints*/
895 				/*we free the resource, need reset is_audio_acquired*/
896 				update_audio_usage(&dc->current_state->res_ctx, dc->res_pool,
897 						pipe_ctx->stream_res.audio, false);
898 				pipe_ctx->stream_res.audio = NULL;
899 			}
900 		}
901 	}
902 
903 	/* by upper caller loop, parent pipe: pipe0, will be reset last.
904 	 * back end share by all pipes and will be disable only when disable
905 	 * parent pipe.
906 	 */
907 	if (pipe_ctx->top_pipe == NULL) {
908 		pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
909 
910 		pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
911 		if (pipe_ctx->stream_res.tg->funcs->set_drr)
912 			pipe_ctx->stream_res.tg->funcs->set_drr(
913 					pipe_ctx->stream_res.tg, NULL);
914 	}
915 
916 	for (i = 0; i < dc->res_pool->pipe_count; i++)
917 		if (&dc->current_state->res_ctx.pipe_ctx[i] == pipe_ctx)
918 			break;
919 
920 	if (i == dc->res_pool->pipe_count)
921 		return;
922 
923 	pipe_ctx->stream = NULL;
924 	DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
925 					pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
926 }
927 
dcn10_hw_wa_force_recovery(struct dc * dc)928 static bool dcn10_hw_wa_force_recovery(struct dc *dc)
929 {
930 	struct hubp *hubp ;
931 	unsigned int i;
932 	bool need_recover = true;
933 
934 	if (!dc->debug.recovery_enabled)
935 		return false;
936 
937 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
938 		struct pipe_ctx *pipe_ctx =
939 			&dc->current_state->res_ctx.pipe_ctx[i];
940 		if (pipe_ctx != NULL) {
941 			hubp = pipe_ctx->plane_res.hubp;
942 			if (hubp != NULL && hubp->funcs->hubp_get_underflow_status) {
943 				if (hubp->funcs->hubp_get_underflow_status(hubp) != 0) {
944 					/* one pipe underflow, we will reset all the pipes*/
945 					need_recover = true;
946 				}
947 			}
948 		}
949 	}
950 	if (!need_recover)
951 		return false;
952 	/*
953 	DCHUBP_CNTL:HUBP_BLANK_EN=1
954 	DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1
955 	DCHUBP_CNTL:HUBP_DISABLE=1
956 	DCHUBP_CNTL:HUBP_DISABLE=0
957 	DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0
958 	DCSURF_PRIMARY_SURFACE_ADDRESS
959 	DCHUBP_CNTL:HUBP_BLANK_EN=0
960 	*/
961 
962 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
963 		struct pipe_ctx *pipe_ctx =
964 			&dc->current_state->res_ctx.pipe_ctx[i];
965 		if (pipe_ctx != NULL) {
966 			hubp = pipe_ctx->plane_res.hubp;
967 			/*DCHUBP_CNTL:HUBP_BLANK_EN=1*/
968 			if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
969 				hubp->funcs->set_hubp_blank_en(hubp, true);
970 		}
971 	}
972 	/*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=1*/
973 	hubbub1_soft_reset(dc->res_pool->hubbub, true);
974 
975 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
976 		struct pipe_ctx *pipe_ctx =
977 			&dc->current_state->res_ctx.pipe_ctx[i];
978 		if (pipe_ctx != NULL) {
979 			hubp = pipe_ctx->plane_res.hubp;
980 			/*DCHUBP_CNTL:HUBP_DISABLE=1*/
981 			if (hubp != NULL && hubp->funcs->hubp_disable_control)
982 				hubp->funcs->hubp_disable_control(hubp, true);
983 		}
984 	}
985 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
986 		struct pipe_ctx *pipe_ctx =
987 			&dc->current_state->res_ctx.pipe_ctx[i];
988 		if (pipe_ctx != NULL) {
989 			hubp = pipe_ctx->plane_res.hubp;
990 			/*DCHUBP_CNTL:HUBP_DISABLE=0*/
991 			if (hubp != NULL && hubp->funcs->hubp_disable_control)
992 				hubp->funcs->hubp_disable_control(hubp, true);
993 		}
994 	}
995 	/*DCHUBBUB_SOFT_RESET:DCHUBBUB_GLOBAL_SOFT_RESET=0*/
996 	hubbub1_soft_reset(dc->res_pool->hubbub, false);
997 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
998 		struct pipe_ctx *pipe_ctx =
999 			&dc->current_state->res_ctx.pipe_ctx[i];
1000 		if (pipe_ctx != NULL) {
1001 			hubp = pipe_ctx->plane_res.hubp;
1002 			/*DCHUBP_CNTL:HUBP_BLANK_EN=0*/
1003 			if (hubp != NULL && hubp->funcs->set_hubp_blank_en)
1004 				hubp->funcs->set_hubp_blank_en(hubp, true);
1005 		}
1006 	}
1007 	return true;
1008 
1009 }
1010 
1011 
dcn10_verify_allow_pstate_change_high(struct dc * dc)1012 void dcn10_verify_allow_pstate_change_high(struct dc *dc)
1013 {
1014 	static bool should_log_hw_state; /* prevent hw state log by default */
1015 
1016 	if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub)) {
1017 		if (should_log_hw_state) {
1018 			dcn10_log_hw_state(dc, NULL);
1019 		}
1020 		BREAK_TO_DEBUGGER();
1021 		if (dcn10_hw_wa_force_recovery(dc)) {
1022 		/*check again*/
1023 			if (!hubbub1_verify_allow_pstate_change_high(dc->res_pool->hubbub))
1024 				BREAK_TO_DEBUGGER();
1025 		}
1026 	}
1027 }
1028 
1029 /* trigger HW to start disconnect plane from stream on the next vsync */
dcn10_plane_atomic_disconnect(struct dc * dc,struct pipe_ctx * pipe_ctx)1030 void dcn10_plane_atomic_disconnect(struct dc *dc, struct pipe_ctx *pipe_ctx)
1031 {
1032 	struct dce_hwseq *hws = dc->hwseq;
1033 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
1034 	int dpp_id = pipe_ctx->plane_res.dpp->inst;
1035 	struct mpc *mpc = dc->res_pool->mpc;
1036 	struct mpc_tree *mpc_tree_params;
1037 	struct mpcc *mpcc_to_remove = NULL;
1038 	struct output_pixel_processor *opp = pipe_ctx->stream_res.opp;
1039 
1040 	mpc_tree_params = &(opp->mpc_tree_params);
1041 	mpcc_to_remove = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, dpp_id);
1042 
1043 	/*Already reset*/
1044 	if (mpcc_to_remove == NULL)
1045 		return;
1046 
1047 	mpc->funcs->remove_mpcc(mpc, mpc_tree_params, mpcc_to_remove);
1048 	if (opp != NULL)
1049 		opp->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1050 
1051 	dc->optimized_required = true;
1052 
1053 	if (hubp->funcs->hubp_disconnect)
1054 		hubp->funcs->hubp_disconnect(hubp);
1055 
1056 	if (dc->debug.sanity_checks)
1057 		hws->funcs.verify_allow_pstate_change_high(dc);
1058 }
1059 
dcn10_plane_atomic_power_down(struct dc * dc,struct dpp * dpp,struct hubp * hubp)1060 void dcn10_plane_atomic_power_down(struct dc *dc,
1061 		struct dpp *dpp,
1062 		struct hubp *hubp)
1063 {
1064 	struct dce_hwseq *hws = dc->hwseq;
1065 	DC_LOGGER_INIT(dc->ctx->logger);
1066 
1067 	if (REG(DC_IP_REQUEST_CNTL)) {
1068 		REG_SET(DC_IP_REQUEST_CNTL, 0,
1069 				IP_REQUEST_EN, 1);
1070 		hws->funcs.dpp_pg_control(hws, dpp->inst, false);
1071 		hws->funcs.hubp_pg_control(hws, hubp->inst, false);
1072 		dpp->funcs->dpp_reset(dpp);
1073 		REG_SET(DC_IP_REQUEST_CNTL, 0,
1074 				IP_REQUEST_EN, 0);
1075 		DC_LOG_DEBUG(
1076 				"Power gated front end %d\n", hubp->inst);
1077 	}
1078 }
1079 
1080 /* disable HW used by plane.
1081  * note:  cannot disable until disconnect is complete
1082  */
dcn10_plane_atomic_disable(struct dc * dc,struct pipe_ctx * pipe_ctx)1083 void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
1084 {
1085 	struct dce_hwseq *hws = dc->hwseq;
1086 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
1087 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
1088 	int opp_id = hubp->opp_id;
1089 
1090 	dc->hwss.wait_for_mpcc_disconnect(dc, dc->res_pool, pipe_ctx);
1091 
1092 	hubp->funcs->hubp_clk_cntl(hubp, false);
1093 
1094 	dpp->funcs->dpp_dppclk_control(dpp, false, false);
1095 
1096 	if (opp_id != 0xf && pipe_ctx->stream_res.opp->mpc_tree_params.opp_list == NULL)
1097 		pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
1098 				pipe_ctx->stream_res.opp,
1099 				false);
1100 
1101 	hubp->power_gated = true;
1102 	dc->optimized_required = false; /* We're powering off, no need to optimize */
1103 
1104 	hws->funcs.plane_atomic_power_down(dc,
1105 			pipe_ctx->plane_res.dpp,
1106 			pipe_ctx->plane_res.hubp);
1107 
1108 	pipe_ctx->stream = NULL;
1109 	memset(&pipe_ctx->stream_res, 0, sizeof(pipe_ctx->stream_res));
1110 	memset(&pipe_ctx->plane_res, 0, sizeof(pipe_ctx->plane_res));
1111 	pipe_ctx->top_pipe = NULL;
1112 	pipe_ctx->bottom_pipe = NULL;
1113 	pipe_ctx->plane_state = NULL;
1114 }
1115 
dcn10_disable_plane(struct dc * dc,struct pipe_ctx * pipe_ctx)1116 void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx)
1117 {
1118 	struct dce_hwseq *hws = dc->hwseq;
1119 	DC_LOGGER_INIT(dc->ctx->logger);
1120 
1121 	if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
1122 		return;
1123 
1124 	hws->funcs.plane_atomic_disable(dc, pipe_ctx);
1125 
1126 	apply_DEGVIDCN10_253_wa(dc);
1127 
1128 	DC_LOG_DC("Power down front end %d\n",
1129 					pipe_ctx->pipe_idx);
1130 }
1131 
dcn10_init_pipes(struct dc * dc,struct dc_state * context)1132 void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
1133 {
1134 	int i;
1135 	struct dce_hwseq *hws = dc->hwseq;
1136 	bool can_apply_seamless_boot = false;
1137 
1138 	for (i = 0; i < context->stream_count; i++) {
1139 		if (context->streams[i]->apply_seamless_boot_optimization) {
1140 			can_apply_seamless_boot = true;
1141 			break;
1142 		}
1143 	}
1144 
1145 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1146 		struct timing_generator *tg = dc->res_pool->timing_generators[i];
1147 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1148 
1149 		/* There is assumption that pipe_ctx is not mapping irregularly
1150 		 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1151 		 * we will use the pipe, so don't disable
1152 		 */
1153 		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1154 			continue;
1155 
1156 		/* Blank controller using driver code instead of
1157 		 * command table.
1158 		 */
1159 		if (tg->funcs->is_tg_enabled(tg)) {
1160 			if (hws->funcs.init_blank != NULL) {
1161 				hws->funcs.init_blank(dc, tg);
1162 				tg->funcs->lock(tg);
1163 			} else {
1164 				tg->funcs->lock(tg);
1165 				tg->funcs->set_blank(tg, true);
1166 				hwss_wait_for_blank_complete(tg);
1167 			}
1168 		}
1169 	}
1170 
1171 	/* num_opp will be equal to number of mpcc */
1172 	for (i = 0; i < dc->res_pool->res_cap->num_opp; i++) {
1173 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1174 
1175 		/* Cannot reset the MPC mux if seamless boot */
1176 		if (pipe_ctx->stream != NULL && can_apply_seamless_boot)
1177 			continue;
1178 
1179 		dc->res_pool->mpc->funcs->mpc_init_single_inst(
1180 				dc->res_pool->mpc, i);
1181 	}
1182 
1183 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
1184 		struct timing_generator *tg = dc->res_pool->timing_generators[i];
1185 		struct hubp *hubp = dc->res_pool->hubps[i];
1186 		struct dpp *dpp = dc->res_pool->dpps[i];
1187 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1188 
1189 		/* There is assumption that pipe_ctx is not mapping irregularly
1190 		 * to non-preferred front end. If pipe_ctx->stream is not NULL,
1191 		 * we will use the pipe, so don't disable
1192 		 */
1193 		if (can_apply_seamless_boot &&
1194 			pipe_ctx->stream != NULL &&
1195 			pipe_ctx->stream_res.tg->funcs->is_tg_enabled(
1196 				pipe_ctx->stream_res.tg)) {
1197 			// Enable double buffering for OTG_BLANK no matter if
1198 			// seamless boot is enabled or not to suppress global sync
1199 			// signals when OTG blanked. This is to prevent pipe from
1200 			// requesting data while in PSR.
1201 			tg->funcs->tg_init(tg);
1202 			continue;
1203 		}
1204 
1205 		/* Disable on the current state so the new one isn't cleared. */
1206 		pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
1207 
1208 		dpp->funcs->dpp_reset(dpp);
1209 
1210 		pipe_ctx->stream_res.tg = tg;
1211 		pipe_ctx->pipe_idx = i;
1212 
1213 		pipe_ctx->plane_res.hubp = hubp;
1214 		pipe_ctx->plane_res.dpp = dpp;
1215 		pipe_ctx->plane_res.mpcc_inst = dpp->inst;
1216 		hubp->mpcc_id = dpp->inst;
1217 		hubp->opp_id = OPP_ID_INVALID;
1218 		hubp->power_gated = false;
1219 
1220 		dc->res_pool->opps[i]->mpc_tree_params.opp_id = dc->res_pool->opps[i]->inst;
1221 		dc->res_pool->opps[i]->mpc_tree_params.opp_list = NULL;
1222 		dc->res_pool->opps[i]->mpcc_disconnect_pending[pipe_ctx->plane_res.mpcc_inst] = true;
1223 		pipe_ctx->stream_res.opp = dc->res_pool->opps[i];
1224 
1225 		hws->funcs.plane_atomic_disconnect(dc, pipe_ctx);
1226 
1227 		if (tg->funcs->is_tg_enabled(tg))
1228 			tg->funcs->unlock(tg);
1229 
1230 		dc->hwss.disable_plane(dc, pipe_ctx);
1231 
1232 		pipe_ctx->stream_res.tg = NULL;
1233 		pipe_ctx->plane_res.hubp = NULL;
1234 
1235 		tg->funcs->tg_init(tg);
1236 	}
1237 }
1238 
dcn10_init_hw(struct dc * dc)1239 void dcn10_init_hw(struct dc *dc)
1240 {
1241 	int i;
1242 	struct abm *abm = dc->res_pool->abm;
1243 	struct dmcu *dmcu = dc->res_pool->dmcu;
1244 	struct dce_hwseq *hws = dc->hwseq;
1245 	struct dc_bios *dcb = dc->ctx->dc_bios;
1246 	struct resource_pool *res_pool = dc->res_pool;
1247 
1248 	if (dc->clk_mgr && dc->clk_mgr->funcs->init_clocks)
1249 		dc->clk_mgr->funcs->init_clocks(dc->clk_mgr);
1250 
1251 	// Initialize the dccg
1252 	if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->dccg_init)
1253 		dc->res_pool->dccg->funcs->dccg_init(res_pool->dccg);
1254 
1255 	if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1256 
1257 		REG_WRITE(REFCLK_CNTL, 0);
1258 		REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
1259 		REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1260 
1261 		if (!dc->debug.disable_clock_gate) {
1262 			/* enable all DCN clock gating */
1263 			REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1264 
1265 			REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1266 
1267 			REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1268 		}
1269 
1270 		//Enable ability to power gate / don't force power on permanently
1271 		hws->funcs.enable_power_gating_plane(hws, true);
1272 
1273 		return;
1274 	}
1275 
1276 	if (!dcb->funcs->is_accelerated_mode(dcb))
1277 		hws->funcs.disable_vga(dc->hwseq);
1278 
1279 	hws->funcs.bios_golden_init(dc);
1280 	if (dc->ctx->dc_bios->fw_info_valid) {
1281 		res_pool->ref_clocks.xtalin_clock_inKhz =
1282 				dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency;
1283 
1284 		if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
1285 			if (res_pool->dccg && res_pool->hubbub) {
1286 
1287 				(res_pool->dccg->funcs->get_dccg_ref_freq)(res_pool->dccg,
1288 						dc->ctx->dc_bios->fw_info.pll_info.crystal_frequency,
1289 						&res_pool->ref_clocks.dccg_ref_clock_inKhz);
1290 
1291 				(res_pool->hubbub->funcs->get_dchub_ref_freq)(res_pool->hubbub,
1292 						res_pool->ref_clocks.dccg_ref_clock_inKhz,
1293 						&res_pool->ref_clocks.dchub_ref_clock_inKhz);
1294 			} else {
1295 				// Not all ASICs have DCCG sw component
1296 				res_pool->ref_clocks.dccg_ref_clock_inKhz =
1297 						res_pool->ref_clocks.xtalin_clock_inKhz;
1298 				res_pool->ref_clocks.dchub_ref_clock_inKhz =
1299 						res_pool->ref_clocks.xtalin_clock_inKhz;
1300 			}
1301 		}
1302 	} else
1303 		ASSERT_CRITICAL(false);
1304 
1305 	for (i = 0; i < dc->link_count; i++) {
1306 		/* Power up AND update implementation according to the
1307 		 * required signal (which may be different from the
1308 		 * default signal on connector).
1309 		 */
1310 		struct dc_link *link = dc->links[i];
1311 
1312 		link->link_enc->funcs->hw_init(link->link_enc);
1313 
1314 		/* Check for enabled DIG to identify enabled display */
1315 		if (link->link_enc->funcs->is_dig_enabled &&
1316 			link->link_enc->funcs->is_dig_enabled(link->link_enc))
1317 			link->link_status.link_active = true;
1318 	}
1319 
1320 	/* Power gate DSCs */
1321 	for (i = 0; i < res_pool->res_cap->num_dsc; i++)
1322 		if (hws->funcs.dsc_pg_control != NULL)
1323 			hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
1324 
1325 	/* If taking control over from VBIOS, we may want to optimize our first
1326 	 * mode set, so we need to skip powering down pipes until we know which
1327 	 * pipes we want to use.
1328 	 * Otherwise, if taking control is not possible, we need to power
1329 	 * everything down.
1330 	 */
1331 	if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) {
1332 		hws->funcs.init_pipes(dc, dc->current_state);
1333 	}
1334 
1335 	for (i = 0; i < res_pool->audio_count; i++) {
1336 		struct audio *audio = res_pool->audios[i];
1337 
1338 		audio->funcs->hw_init(audio);
1339 	}
1340 
1341 	if (abm != NULL) {
1342 		abm->funcs->init_backlight(abm);
1343 		abm->funcs->abm_init(abm);
1344 	}
1345 
1346 	if (dmcu != NULL && !dmcu->auto_load_dmcu)
1347 		dmcu->funcs->dmcu_init(dmcu);
1348 
1349 	if (abm != NULL && dmcu != NULL)
1350 		abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu);
1351 
1352 	/* power AFMT HDMI memory TODO: may move to dis/en output save power*/
1353 	REG_WRITE(DIO_MEM_PWR_CTRL, 0);
1354 
1355 	if (!dc->debug.disable_clock_gate) {
1356 		/* enable all DCN clock gating */
1357 		REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
1358 
1359 		REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
1360 
1361 		REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
1362 	}
1363 
1364 	hws->funcs.enable_power_gating_plane(dc->hwseq, true);
1365 
1366 	if (dc->clk_mgr->funcs->notify_wm_ranges)
1367 		dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
1368 
1369 }
1370 
dcn10_reset_hw_ctx_wrap(struct dc * dc,struct dc_state * context)1371 void dcn10_reset_hw_ctx_wrap(
1372 		struct dc *dc,
1373 		struct dc_state *context)
1374 {
1375 	int i;
1376 	struct dce_hwseq *hws = dc->hwseq;
1377 
1378 	/* Reset Back End*/
1379 	for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
1380 		struct pipe_ctx *pipe_ctx_old =
1381 			&dc->current_state->res_ctx.pipe_ctx[i];
1382 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
1383 
1384 		if (!pipe_ctx_old->stream)
1385 			continue;
1386 
1387 		if (pipe_ctx_old->top_pipe)
1388 			continue;
1389 
1390 		if (!pipe_ctx->stream ||
1391 				pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
1392 			struct clock_source *old_clk = pipe_ctx_old->clock_source;
1393 
1394 			dcn10_reset_back_end_for_pipe(dc, pipe_ctx_old, dc->current_state);
1395 			if (hws->funcs.enable_stream_gating)
1396 				hws->funcs.enable_stream_gating(dc, pipe_ctx);
1397 			if (old_clk)
1398 				old_clk->funcs->cs_power_down(old_clk);
1399 		}
1400 	}
1401 }
1402 
patch_address_for_sbs_tb_stereo(struct pipe_ctx * pipe_ctx,PHYSICAL_ADDRESS_LOC * addr)1403 static bool patch_address_for_sbs_tb_stereo(
1404 		struct pipe_ctx *pipe_ctx, PHYSICAL_ADDRESS_LOC *addr)
1405 {
1406 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1407 	bool sec_split = pipe_ctx->top_pipe &&
1408 			pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
1409 	if (sec_split && plane_state->address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
1410 		(pipe_ctx->stream->timing.timing_3d_format ==
1411 		 TIMING_3D_FORMAT_SIDE_BY_SIDE ||
1412 		 pipe_ctx->stream->timing.timing_3d_format ==
1413 		 TIMING_3D_FORMAT_TOP_AND_BOTTOM)) {
1414 		*addr = plane_state->address.grph_stereo.left_addr;
1415 		plane_state->address.grph_stereo.left_addr =
1416 		plane_state->address.grph_stereo.right_addr;
1417 		return true;
1418 	} else {
1419 		if (pipe_ctx->stream->view_format != VIEW_3D_FORMAT_NONE &&
1420 			plane_state->address.type != PLN_ADDR_TYPE_GRPH_STEREO) {
1421 			plane_state->address.type = PLN_ADDR_TYPE_GRPH_STEREO;
1422 			plane_state->address.grph_stereo.right_addr =
1423 			plane_state->address.grph_stereo.left_addr;
1424 		}
1425 	}
1426 	return false;
1427 }
1428 
dcn10_update_plane_addr(const struct dc * dc,struct pipe_ctx * pipe_ctx)1429 void dcn10_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx)
1430 {
1431 	bool addr_patched = false;
1432 	PHYSICAL_ADDRESS_LOC addr;
1433 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
1434 
1435 	if (plane_state == NULL)
1436 		return;
1437 
1438 	addr_patched = patch_address_for_sbs_tb_stereo(pipe_ctx, &addr);
1439 
1440 	pipe_ctx->plane_res.hubp->funcs->hubp_program_surface_flip_and_addr(
1441 			pipe_ctx->plane_res.hubp,
1442 			&plane_state->address,
1443 			plane_state->flip_immediate);
1444 
1445 	plane_state->status.requested_address = plane_state->address;
1446 
1447 	if (plane_state->flip_immediate)
1448 		plane_state->status.current_address = plane_state->address;
1449 
1450 	if (addr_patched)
1451 		pipe_ctx->plane_state->address.grph_stereo.left_addr = addr;
1452 }
1453 
dcn10_set_input_transfer_func(struct dc * dc,struct pipe_ctx * pipe_ctx,const struct dc_plane_state * plane_state)1454 bool dcn10_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1455 			const struct dc_plane_state *plane_state)
1456 {
1457 	struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
1458 	const struct dc_transfer_func *tf = NULL;
1459 	bool result = true;
1460 
1461 	if (dpp_base == NULL)
1462 		return false;
1463 
1464 	if (plane_state->in_transfer_func)
1465 		tf = plane_state->in_transfer_func;
1466 
1467 	if (plane_state->gamma_correction &&
1468 		!dpp_base->ctx->dc->debug.always_use_regamma
1469 		&& !plane_state->gamma_correction->is_identity
1470 			&& dce_use_lut(plane_state->format))
1471 		dpp_base->funcs->dpp_program_input_lut(dpp_base, plane_state->gamma_correction);
1472 
1473 	if (tf == NULL)
1474 		dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1475 	else if (tf->type == TF_TYPE_PREDEFINED) {
1476 		switch (tf->tf) {
1477 		case TRANSFER_FUNCTION_SRGB:
1478 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_sRGB);
1479 			break;
1480 		case TRANSFER_FUNCTION_BT709:
1481 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_HW_xvYCC);
1482 			break;
1483 		case TRANSFER_FUNCTION_LINEAR:
1484 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1485 			break;
1486 		case TRANSFER_FUNCTION_PQ:
1487 			dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_USER_PWL);
1488 			cm_helper_translate_curve_to_degamma_hw_format(tf, &dpp_base->degamma_params);
1489 			dpp_base->funcs->dpp_program_degamma_pwl(dpp_base, &dpp_base->degamma_params);
1490 			result = true;
1491 			break;
1492 		default:
1493 			result = false;
1494 			break;
1495 		}
1496 	} else if (tf->type == TF_TYPE_BYPASS) {
1497 		dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
1498 	} else {
1499 		cm_helper_translate_curve_to_degamma_hw_format(tf,
1500 					&dpp_base->degamma_params);
1501 		dpp_base->funcs->dpp_program_degamma_pwl(dpp_base,
1502 				&dpp_base->degamma_params);
1503 		result = true;
1504 	}
1505 
1506 	return result;
1507 }
1508 
1509 #define MAX_NUM_HW_POINTS 0x200
1510 
log_tf(struct dc_context * ctx,struct dc_transfer_func * tf,uint32_t hw_points_num)1511 static void log_tf(struct dc_context *ctx,
1512 				struct dc_transfer_func *tf, uint32_t hw_points_num)
1513 {
1514 	// DC_LOG_GAMMA is default logging of all hw points
1515 	// DC_LOG_ALL_GAMMA logs all points, not only hw points
1516 	// DC_LOG_ALL_TF_POINTS logs all channels of the tf
1517 	int i = 0;
1518 
1519 	DC_LOGGER_INIT(ctx->logger);
1520 	DC_LOG_GAMMA("Gamma Correction TF");
1521 	DC_LOG_ALL_GAMMA("Logging all tf points...");
1522 	DC_LOG_ALL_TF_CHANNELS("Logging all channels...");
1523 
1524 	for (i = 0; i < hw_points_num; i++) {
1525 		DC_LOG_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1526 		DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1527 		DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1528 	}
1529 
1530 	for (i = hw_points_num; i < MAX_NUM_HW_POINTS; i++) {
1531 		DC_LOG_ALL_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value);
1532 		DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value);
1533 		DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value);
1534 	}
1535 }
1536 
dcn10_set_output_transfer_func(struct dc * dc,struct pipe_ctx * pipe_ctx,const struct dc_stream_state * stream)1537 bool dcn10_set_output_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
1538 				const struct dc_stream_state *stream)
1539 {
1540 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
1541 
1542 	if (dpp == NULL)
1543 		return false;
1544 
1545 	dpp->regamma_params.hw_points_num = GAMMA_HW_POINTS_NUM;
1546 
1547 	if (stream->out_transfer_func &&
1548 	    stream->out_transfer_func->type == TF_TYPE_PREDEFINED &&
1549 	    stream->out_transfer_func->tf == TRANSFER_FUNCTION_SRGB)
1550 		dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_SRGB);
1551 
1552 	/* dcn10_translate_regamma_to_hw_format takes 750us, only do it when full
1553 	 * update.
1554 	 */
1555 	else if (cm_helper_translate_curve_to_hw_format(
1556 			stream->out_transfer_func,
1557 			&dpp->regamma_params, false)) {
1558 		dpp->funcs->dpp_program_regamma_pwl(
1559 				dpp,
1560 				&dpp->regamma_params, OPP_REGAMMA_USER);
1561 	} else
1562 		dpp->funcs->dpp_program_regamma_pwl(dpp, NULL, OPP_REGAMMA_BYPASS);
1563 
1564 	if (stream != NULL && stream->ctx != NULL &&
1565 			stream->out_transfer_func != NULL) {
1566 		log_tf(stream->ctx,
1567 				stream->out_transfer_func,
1568 				dpp->regamma_params.hw_points_num);
1569 	}
1570 
1571 	return true;
1572 }
1573 
dcn10_pipe_control_lock(struct dc * dc,struct pipe_ctx * pipe,bool lock)1574 void dcn10_pipe_control_lock(
1575 	struct dc *dc,
1576 	struct pipe_ctx *pipe,
1577 	bool lock)
1578 {
1579 	struct dce_hwseq *hws = dc->hwseq;
1580 
1581 	/* use TG master update lock to lock everything on the TG
1582 	 * therefore only top pipe need to lock
1583 	 */
1584 	if (pipe->top_pipe)
1585 		return;
1586 
1587 	if (dc->debug.sanity_checks)
1588 		hws->funcs.verify_allow_pstate_change_high(dc);
1589 
1590 	if (lock)
1591 		pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
1592 	else
1593 		pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
1594 
1595 	if (dc->debug.sanity_checks)
1596 		hws->funcs.verify_allow_pstate_change_high(dc);
1597 }
1598 
wait_for_reset_trigger_to_occur(struct dc_context * dc_ctx,struct timing_generator * tg)1599 static bool wait_for_reset_trigger_to_occur(
1600 	struct dc_context *dc_ctx,
1601 	struct timing_generator *tg)
1602 {
1603 	bool rc = false;
1604 
1605 	/* To avoid endless loop we wait at most
1606 	 * frames_to_wait_on_triggered_reset frames for the reset to occur. */
1607 	const uint32_t frames_to_wait_on_triggered_reset = 10;
1608 	int i;
1609 
1610 	for (i = 0; i < frames_to_wait_on_triggered_reset; i++) {
1611 
1612 		if (!tg->funcs->is_counter_moving(tg)) {
1613 			DC_ERROR("TG counter is not moving!\n");
1614 			break;
1615 		}
1616 
1617 		if (tg->funcs->did_triggered_reset_occur(tg)) {
1618 			rc = true;
1619 			/* usually occurs at i=1 */
1620 			DC_SYNC_INFO("GSL: reset occurred at wait count: %d\n",
1621 					i);
1622 			break;
1623 		}
1624 
1625 		/* Wait for one frame. */
1626 		tg->funcs->wait_for_state(tg, CRTC_STATE_VACTIVE);
1627 		tg->funcs->wait_for_state(tg, CRTC_STATE_VBLANK);
1628 	}
1629 
1630 	if (false == rc)
1631 		DC_ERROR("GSL: Timeout on reset trigger!\n");
1632 
1633 	return rc;
1634 }
1635 
dcn10_enable_timing_synchronization(struct dc * dc,int group_index,int group_size,struct pipe_ctx * grouped_pipes[])1636 void dcn10_enable_timing_synchronization(
1637 	struct dc *dc,
1638 	int group_index,
1639 	int group_size,
1640 	struct pipe_ctx *grouped_pipes[])
1641 {
1642 	struct dc_context *dc_ctx = dc->ctx;
1643 	int i;
1644 
1645 	DC_SYNC_INFO("Setting up OTG reset trigger\n");
1646 
1647 	for (i = 1; i < group_size; i++)
1648 		grouped_pipes[i]->stream_res.tg->funcs->enable_reset_trigger(
1649 				grouped_pipes[i]->stream_res.tg,
1650 				grouped_pipes[0]->stream_res.tg->inst);
1651 
1652 	DC_SYNC_INFO("Waiting for trigger\n");
1653 
1654 	/* Need to get only check 1 pipe for having reset as all the others are
1655 	 * synchronized. Look at last pipe programmed to reset.
1656 	 */
1657 
1658 	wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[1]->stream_res.tg);
1659 	for (i = 1; i < group_size; i++)
1660 		grouped_pipes[i]->stream_res.tg->funcs->disable_reset_trigger(
1661 				grouped_pipes[i]->stream_res.tg);
1662 
1663 	DC_SYNC_INFO("Sync complete\n");
1664 }
1665 
dcn10_enable_per_frame_crtc_position_reset(struct dc * dc,int group_size,struct pipe_ctx * grouped_pipes[])1666 void dcn10_enable_per_frame_crtc_position_reset(
1667 	struct dc *dc,
1668 	int group_size,
1669 	struct pipe_ctx *grouped_pipes[])
1670 {
1671 	struct dc_context *dc_ctx = dc->ctx;
1672 	int i;
1673 
1674 	DC_SYNC_INFO("Setting up\n");
1675 	for (i = 0; i < group_size; i++)
1676 		if (grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset)
1677 			grouped_pipes[i]->stream_res.tg->funcs->enable_crtc_reset(
1678 					grouped_pipes[i]->stream_res.tg,
1679 					0,
1680 					&grouped_pipes[i]->stream->triggered_crtc_reset);
1681 
1682 	DC_SYNC_INFO("Waiting for trigger\n");
1683 
1684 	for (i = 0; i < group_size; i++)
1685 		wait_for_reset_trigger_to_occur(dc_ctx, grouped_pipes[i]->stream_res.tg);
1686 
1687 	DC_SYNC_INFO("Multi-display sync is complete\n");
1688 }
1689 
1690 /*static void print_rq_dlg_ttu(
1691 		struct dc *dc,
1692 		struct pipe_ctx *pipe_ctx)
1693 {
1694 	DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
1695 			"\n============== DML TTU Output parameters [%d] ==============\n"
1696 			"qos_level_low_wm: %d, \n"
1697 			"qos_level_high_wm: %d, \n"
1698 			"min_ttu_vblank: %d, \n"
1699 			"qos_level_flip: %d, \n"
1700 			"refcyc_per_req_delivery_l: %d, \n"
1701 			"qos_level_fixed_l: %d, \n"
1702 			"qos_ramp_disable_l: %d, \n"
1703 			"refcyc_per_req_delivery_pre_l: %d, \n"
1704 			"refcyc_per_req_delivery_c: %d, \n"
1705 			"qos_level_fixed_c: %d, \n"
1706 			"qos_ramp_disable_c: %d, \n"
1707 			"refcyc_per_req_delivery_pre_c: %d\n"
1708 			"=============================================================\n",
1709 			pipe_ctx->pipe_idx,
1710 			pipe_ctx->ttu_regs.qos_level_low_wm,
1711 			pipe_ctx->ttu_regs.qos_level_high_wm,
1712 			pipe_ctx->ttu_regs.min_ttu_vblank,
1713 			pipe_ctx->ttu_regs.qos_level_flip,
1714 			pipe_ctx->ttu_regs.refcyc_per_req_delivery_l,
1715 			pipe_ctx->ttu_regs.qos_level_fixed_l,
1716 			pipe_ctx->ttu_regs.qos_ramp_disable_l,
1717 			pipe_ctx->ttu_regs.refcyc_per_req_delivery_pre_l,
1718 			pipe_ctx->ttu_regs.refcyc_per_req_delivery_c,
1719 			pipe_ctx->ttu_regs.qos_level_fixed_c,
1720 			pipe_ctx->ttu_regs.qos_ramp_disable_c,
1721 			pipe_ctx->ttu_regs.refcyc_per_req_delivery_pre_c
1722 			);
1723 
1724 	DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
1725 			"\n============== DML DLG Output parameters [%d] ==============\n"
1726 			"refcyc_h_blank_end: %d, \n"
1727 			"dlg_vblank_end: %d, \n"
1728 			"min_dst_y_next_start: %d, \n"
1729 			"refcyc_per_htotal: %d, \n"
1730 			"refcyc_x_after_scaler: %d, \n"
1731 			"dst_y_after_scaler: %d, \n"
1732 			"dst_y_prefetch: %d, \n"
1733 			"dst_y_per_vm_vblank: %d, \n"
1734 			"dst_y_per_row_vblank: %d, \n"
1735 			"ref_freq_to_pix_freq: %d, \n"
1736 			"vratio_prefetch: %d, \n"
1737 			"refcyc_per_pte_group_vblank_l: %d, \n"
1738 			"refcyc_per_meta_chunk_vblank_l: %d, \n"
1739 			"dst_y_per_pte_row_nom_l: %d, \n"
1740 			"refcyc_per_pte_group_nom_l: %d, \n",
1741 			pipe_ctx->pipe_idx,
1742 			pipe_ctx->dlg_regs.refcyc_h_blank_end,
1743 			pipe_ctx->dlg_regs.dlg_vblank_end,
1744 			pipe_ctx->dlg_regs.min_dst_y_next_start,
1745 			pipe_ctx->dlg_regs.refcyc_per_htotal,
1746 			pipe_ctx->dlg_regs.refcyc_x_after_scaler,
1747 			pipe_ctx->dlg_regs.dst_y_after_scaler,
1748 			pipe_ctx->dlg_regs.dst_y_prefetch,
1749 			pipe_ctx->dlg_regs.dst_y_per_vm_vblank,
1750 			pipe_ctx->dlg_regs.dst_y_per_row_vblank,
1751 			pipe_ctx->dlg_regs.ref_freq_to_pix_freq,
1752 			pipe_ctx->dlg_regs.vratio_prefetch,
1753 			pipe_ctx->dlg_regs.refcyc_per_pte_group_vblank_l,
1754 			pipe_ctx->dlg_regs.refcyc_per_meta_chunk_vblank_l,
1755 			pipe_ctx->dlg_regs.dst_y_per_pte_row_nom_l,
1756 			pipe_ctx->dlg_regs.refcyc_per_pte_group_nom_l
1757 			);
1758 
1759 	DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
1760 			"\ndst_y_per_meta_row_nom_l: %d, \n"
1761 			"refcyc_per_meta_chunk_nom_l: %d, \n"
1762 			"refcyc_per_line_delivery_pre_l: %d, \n"
1763 			"refcyc_per_line_delivery_l: %d, \n"
1764 			"vratio_prefetch_c: %d, \n"
1765 			"refcyc_per_pte_group_vblank_c: %d, \n"
1766 			"refcyc_per_meta_chunk_vblank_c: %d, \n"
1767 			"dst_y_per_pte_row_nom_c: %d, \n"
1768 			"refcyc_per_pte_group_nom_c: %d, \n"
1769 			"dst_y_per_meta_row_nom_c: %d, \n"
1770 			"refcyc_per_meta_chunk_nom_c: %d, \n"
1771 			"refcyc_per_line_delivery_pre_c: %d, \n"
1772 			"refcyc_per_line_delivery_c: %d \n"
1773 			"========================================================\n",
1774 			pipe_ctx->dlg_regs.dst_y_per_meta_row_nom_l,
1775 			pipe_ctx->dlg_regs.refcyc_per_meta_chunk_nom_l,
1776 			pipe_ctx->dlg_regs.refcyc_per_line_delivery_pre_l,
1777 			pipe_ctx->dlg_regs.refcyc_per_line_delivery_l,
1778 			pipe_ctx->dlg_regs.vratio_prefetch_c,
1779 			pipe_ctx->dlg_regs.refcyc_per_pte_group_vblank_c,
1780 			pipe_ctx->dlg_regs.refcyc_per_meta_chunk_vblank_c,
1781 			pipe_ctx->dlg_regs.dst_y_per_pte_row_nom_c,
1782 			pipe_ctx->dlg_regs.refcyc_per_pte_group_nom_c,
1783 			pipe_ctx->dlg_regs.dst_y_per_meta_row_nom_c,
1784 			pipe_ctx->dlg_regs.refcyc_per_meta_chunk_nom_c,
1785 			pipe_ctx->dlg_regs.refcyc_per_line_delivery_pre_c,
1786 			pipe_ctx->dlg_regs.refcyc_per_line_delivery_c
1787 			);
1788 
1789 	DC_LOG_BANDWIDTH_CALCS(dc->ctx->logger,
1790 			"\n============== DML RQ Output parameters [%d] ==============\n"
1791 			"chunk_size: %d \n"
1792 			"min_chunk_size: %d \n"
1793 			"meta_chunk_size: %d \n"
1794 			"min_meta_chunk_size: %d \n"
1795 			"dpte_group_size: %d \n"
1796 			"mpte_group_size: %d \n"
1797 			"swath_height: %d \n"
1798 			"pte_row_height_linear: %d \n"
1799 			"========================================================\n",
1800 			pipe_ctx->pipe_idx,
1801 			pipe_ctx->rq_regs.rq_regs_l.chunk_size,
1802 			pipe_ctx->rq_regs.rq_regs_l.min_chunk_size,
1803 			pipe_ctx->rq_regs.rq_regs_l.meta_chunk_size,
1804 			pipe_ctx->rq_regs.rq_regs_l.min_meta_chunk_size,
1805 			pipe_ctx->rq_regs.rq_regs_l.dpte_group_size,
1806 			pipe_ctx->rq_regs.rq_regs_l.mpte_group_size,
1807 			pipe_ctx->rq_regs.rq_regs_l.swath_height,
1808 			pipe_ctx->rq_regs.rq_regs_l.pte_row_height_linear
1809 			);
1810 }
1811 */
1812 
mmhub_read_vm_system_aperture_settings(struct dcn10_hubp * hubp1,struct vm_system_aperture_param * apt,struct dce_hwseq * hws)1813 static void mmhub_read_vm_system_aperture_settings(struct dcn10_hubp *hubp1,
1814 		struct vm_system_aperture_param *apt,
1815 		struct dce_hwseq *hws)
1816 {
1817 	PHYSICAL_ADDRESS_LOC physical_page_number;
1818 	uint32_t logical_addr_low;
1819 	uint32_t logical_addr_high;
1820 
1821 	REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
1822 			PHYSICAL_PAGE_NUMBER_MSB, &physical_page_number.high_part);
1823 	REG_GET(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
1824 			PHYSICAL_PAGE_NUMBER_LSB, &physical_page_number.low_part);
1825 
1826 	REG_GET(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
1827 			LOGICAL_ADDR, &logical_addr_low);
1828 
1829 	REG_GET(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1830 			LOGICAL_ADDR, &logical_addr_high);
1831 
1832 	apt->sys_default.quad_part =  physical_page_number.quad_part << 12;
1833 	apt->sys_low.quad_part =  (int64_t)logical_addr_low << 18;
1834 	apt->sys_high.quad_part =  (int64_t)logical_addr_high << 18;
1835 }
1836 
1837 /* Temporary read settings, future will get values from kmd directly */
mmhub_read_vm_context0_settings(struct dcn10_hubp * hubp1,struct vm_context0_param * vm0,struct dce_hwseq * hws)1838 static void mmhub_read_vm_context0_settings(struct dcn10_hubp *hubp1,
1839 		struct vm_context0_param *vm0,
1840 		struct dce_hwseq *hws)
1841 {
1842 	PHYSICAL_ADDRESS_LOC fb_base;
1843 	PHYSICAL_ADDRESS_LOC fb_offset;
1844 	uint32_t fb_base_value;
1845 	uint32_t fb_offset_value;
1846 
1847 	REG_GET(DCHUBBUB_SDPIF_FB_BASE, SDPIF_FB_BASE, &fb_base_value);
1848 	REG_GET(DCHUBBUB_SDPIF_FB_OFFSET, SDPIF_FB_OFFSET, &fb_offset_value);
1849 
1850 	REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32,
1851 			PAGE_DIRECTORY_ENTRY_HI32, &vm0->pte_base.high_part);
1852 	REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32,
1853 			PAGE_DIRECTORY_ENTRY_LO32, &vm0->pte_base.low_part);
1854 
1855 	REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32,
1856 			LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_start.high_part);
1857 	REG_GET(VM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32,
1858 			LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_start.low_part);
1859 
1860 	REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32,
1861 			LOGICAL_PAGE_NUMBER_HI4, &vm0->pte_end.high_part);
1862 	REG_GET(VM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32,
1863 			LOGICAL_PAGE_NUMBER_LO32, &vm0->pte_end.low_part);
1864 
1865 	REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32,
1866 			PHYSICAL_PAGE_ADDR_HI4, &vm0->fault_default.high_part);
1867 	REG_GET(VM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
1868 			PHYSICAL_PAGE_ADDR_LO32, &vm0->fault_default.low_part);
1869 
1870 	/*
1871 	 * The values in VM_CONTEXT0_PAGE_TABLE_BASE_ADDR is in UMA space.
1872 	 * Therefore we need to do
1873 	 * DCN_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
1874 	 * - DCHUBBUB_SDPIF_FB_OFFSET + DCHUBBUB_SDPIF_FB_BASE
1875 	 */
1876 	fb_base.quad_part = (uint64_t)fb_base_value << 24;
1877 	fb_offset.quad_part = (uint64_t)fb_offset_value << 24;
1878 	vm0->pte_base.quad_part += fb_base.quad_part;
1879 	vm0->pte_base.quad_part -= fb_offset.quad_part;
1880 }
1881 
1882 
dcn10_program_pte_vm(struct dce_hwseq * hws,struct hubp * hubp)1883 void dcn10_program_pte_vm(struct dce_hwseq *hws, struct hubp *hubp)
1884 {
1885 	struct dcn10_hubp *hubp1 = TO_DCN10_HUBP(hubp);
1886 	struct vm_system_aperture_param apt = { {{ 0 } } };
1887 	struct vm_context0_param vm0 = { { { 0 } } };
1888 
1889 	mmhub_read_vm_system_aperture_settings(hubp1, &apt, hws);
1890 	mmhub_read_vm_context0_settings(hubp1, &vm0, hws);
1891 
1892 	hubp->funcs->hubp_set_vm_system_aperture_settings(hubp, &apt);
1893 	hubp->funcs->hubp_set_vm_context0_settings(hubp, &vm0);
1894 }
1895 
dcn10_enable_plane(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)1896 static void dcn10_enable_plane(
1897 	struct dc *dc,
1898 	struct pipe_ctx *pipe_ctx,
1899 	struct dc_state *context)
1900 {
1901 	struct dce_hwseq *hws = dc->hwseq;
1902 
1903 	if (dc->debug.sanity_checks) {
1904 		hws->funcs.verify_allow_pstate_change_high(dc);
1905 	}
1906 
1907 	undo_DEGVIDCN10_253_wa(dc);
1908 
1909 	power_on_plane(dc->hwseq,
1910 		pipe_ctx->plane_res.hubp->inst);
1911 
1912 	/* enable DCFCLK current DCHUB */
1913 	pipe_ctx->plane_res.hubp->funcs->hubp_clk_cntl(pipe_ctx->plane_res.hubp, true);
1914 
1915 	/* make sure OPP_PIPE_CLOCK_EN = 1 */
1916 	pipe_ctx->stream_res.opp->funcs->opp_pipe_clock_control(
1917 			pipe_ctx->stream_res.opp,
1918 			true);
1919 
1920 /* TODO: enable/disable in dm as per update type.
1921 	if (plane_state) {
1922 		DC_LOG_DC(dc->ctx->logger,
1923 				"Pipe:%d 0x%x: addr hi:0x%x, "
1924 				"addr low:0x%x, "
1925 				"src: %d, %d, %d,"
1926 				" %d; dst: %d, %d, %d, %d;\n",
1927 				pipe_ctx->pipe_idx,
1928 				plane_state,
1929 				plane_state->address.grph.addr.high_part,
1930 				plane_state->address.grph.addr.low_part,
1931 				plane_state->src_rect.x,
1932 				plane_state->src_rect.y,
1933 				plane_state->src_rect.width,
1934 				plane_state->src_rect.height,
1935 				plane_state->dst_rect.x,
1936 				plane_state->dst_rect.y,
1937 				plane_state->dst_rect.width,
1938 				plane_state->dst_rect.height);
1939 
1940 		DC_LOG_DC(dc->ctx->logger,
1941 				"Pipe %d: width, height, x, y         format:%d\n"
1942 				"viewport:%d, %d, %d, %d\n"
1943 				"recout:  %d, %d, %d, %d\n",
1944 				pipe_ctx->pipe_idx,
1945 				plane_state->format,
1946 				pipe_ctx->plane_res.scl_data.viewport.width,
1947 				pipe_ctx->plane_res.scl_data.viewport.height,
1948 				pipe_ctx->plane_res.scl_data.viewport.x,
1949 				pipe_ctx->plane_res.scl_data.viewport.y,
1950 				pipe_ctx->plane_res.scl_data.recout.width,
1951 				pipe_ctx->plane_res.scl_data.recout.height,
1952 				pipe_ctx->plane_res.scl_data.recout.x,
1953 				pipe_ctx->plane_res.scl_data.recout.y);
1954 		print_rq_dlg_ttu(dc, pipe_ctx);
1955 	}
1956 */
1957 	if (dc->config.gpu_vm_support)
1958 		dcn10_program_pte_vm(hws, pipe_ctx->plane_res.hubp);
1959 
1960 	if (dc->debug.sanity_checks) {
1961 		hws->funcs.verify_allow_pstate_change_high(dc);
1962 	}
1963 }
1964 
dcn10_program_gamut_remap(struct pipe_ctx * pipe_ctx)1965 void dcn10_program_gamut_remap(struct pipe_ctx *pipe_ctx)
1966 {
1967 	int i = 0;
1968 	struct dpp_grph_csc_adjustment adjust;
1969 	memset(&adjust, 0, sizeof(adjust));
1970 	adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
1971 
1972 
1973 	if (pipe_ctx->stream->gamut_remap_matrix.enable_remap == true) {
1974 		adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
1975 		for (i = 0; i < CSC_TEMPERATURE_MATRIX_SIZE; i++)
1976 			adjust.temperature_matrix[i] =
1977 				pipe_ctx->stream->gamut_remap_matrix.matrix[i];
1978 	}
1979 
1980 	pipe_ctx->plane_res.dpp->funcs->dpp_set_gamut_remap(pipe_ctx->plane_res.dpp, &adjust);
1981 }
1982 
1983 
dcn10_is_rear_mpo_fix_required(struct pipe_ctx * pipe_ctx,enum dc_color_space colorspace)1984 static bool dcn10_is_rear_mpo_fix_required(struct pipe_ctx *pipe_ctx, enum dc_color_space colorspace)
1985 {
1986 	if (pipe_ctx->plane_state && pipe_ctx->plane_state->layer_index > 0 && is_rgb_cspace(colorspace)) {
1987 		if (pipe_ctx->top_pipe) {
1988 			struct pipe_ctx *top = pipe_ctx->top_pipe;
1989 
1990 			while (top->top_pipe)
1991 				top = top->top_pipe; // Traverse to top pipe_ctx
1992 			if (top->plane_state && top->plane_state->layer_index == 0)
1993 				return true; // Front MPO plane not hidden
1994 		}
1995 	}
1996 	return false;
1997 }
1998 
dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx * pipe_ctx,uint16_t * matrix)1999 static void dcn10_set_csc_adjustment_rgb_mpo_fix(struct pipe_ctx *pipe_ctx, uint16_t *matrix)
2000 {
2001 	// Override rear plane RGB bias to fix MPO brightness
2002 	uint16_t rgb_bias = matrix[3];
2003 
2004 	matrix[3] = 0;
2005 	matrix[7] = 0;
2006 	matrix[11] = 0;
2007 	pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2008 	matrix[3] = rgb_bias;
2009 	matrix[7] = rgb_bias;
2010 	matrix[11] = rgb_bias;
2011 }
2012 
dcn10_program_output_csc(struct dc * dc,struct pipe_ctx * pipe_ctx,enum dc_color_space colorspace,uint16_t * matrix,int opp_id)2013 void dcn10_program_output_csc(struct dc *dc,
2014 		struct pipe_ctx *pipe_ctx,
2015 		enum dc_color_space colorspace,
2016 		uint16_t *matrix,
2017 		int opp_id)
2018 {
2019 	if (pipe_ctx->stream->csc_color_matrix.enable_adjustment == true) {
2020 		if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment != NULL) {
2021 
2022 			/* MPO is broken with RGB colorspaces when OCSC matrix
2023 			 * brightness offset >= 0 on DCN1 due to OCSC before MPC
2024 			 * Blending adds offsets from front + rear to rear plane
2025 			 *
2026 			 * Fix is to set RGB bias to 0 on rear plane, top plane
2027 			 * black value pixels add offset instead of rear + front
2028 			 */
2029 
2030 			int16_t rgb_bias = matrix[3];
2031 			// matrix[3/7/11] are all the same offset value
2032 
2033 			if (rgb_bias > 0 && dcn10_is_rear_mpo_fix_required(pipe_ctx, colorspace)) {
2034 				dcn10_set_csc_adjustment_rgb_mpo_fix(pipe_ctx, matrix);
2035 			} else {
2036 				pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_adjustment(pipe_ctx->plane_res.dpp, matrix);
2037 			}
2038 		}
2039 	} else {
2040 		if (pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default != NULL)
2041 			pipe_ctx->plane_res.dpp->funcs->dpp_set_csc_default(pipe_ctx->plane_res.dpp, colorspace);
2042 	}
2043 }
2044 
dcn10_get_surface_visual_confirm_color(const struct pipe_ctx * pipe_ctx,struct tg_color * color)2045 void dcn10_get_surface_visual_confirm_color(
2046 		const struct pipe_ctx *pipe_ctx,
2047 		struct tg_color *color)
2048 {
2049 	uint32_t color_value = MAX_TG_COLOR_VALUE;
2050 
2051 	switch (pipe_ctx->plane_res.scl_data.format) {
2052 	case PIXEL_FORMAT_ARGB8888:
2053 		/* set boarder color to red */
2054 		color->color_r_cr = color_value;
2055 		break;
2056 
2057 	case PIXEL_FORMAT_ARGB2101010:
2058 		/* set boarder color to blue */
2059 		color->color_b_cb = color_value;
2060 		break;
2061 	case PIXEL_FORMAT_420BPP8:
2062 		/* set boarder color to green */
2063 		color->color_g_y = color_value;
2064 		break;
2065 	case PIXEL_FORMAT_420BPP10:
2066 		/* set boarder color to yellow */
2067 		color->color_g_y = color_value;
2068 		color->color_r_cr = color_value;
2069 		break;
2070 	case PIXEL_FORMAT_FP16:
2071 		/* set boarder color to white */
2072 		color->color_r_cr = color_value;
2073 		color->color_b_cb = color_value;
2074 		color->color_g_y = color_value;
2075 		break;
2076 	default:
2077 		break;
2078 	}
2079 }
2080 
dcn10_get_hdr_visual_confirm_color(struct pipe_ctx * pipe_ctx,struct tg_color * color)2081 void dcn10_get_hdr_visual_confirm_color(
2082 		struct pipe_ctx *pipe_ctx,
2083 		struct tg_color *color)
2084 {
2085 	uint32_t color_value = MAX_TG_COLOR_VALUE;
2086 
2087 	// Determine the overscan color based on the top-most (desktop) plane's context
2088 	struct pipe_ctx *top_pipe_ctx  = pipe_ctx;
2089 
2090 	while (top_pipe_ctx->top_pipe != NULL)
2091 		top_pipe_ctx = top_pipe_ctx->top_pipe;
2092 
2093 	switch (top_pipe_ctx->plane_res.scl_data.format) {
2094 	case PIXEL_FORMAT_ARGB2101010:
2095 		if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
2096 			/* HDR10, ARGB2101010 - set boarder color to red */
2097 			color->color_r_cr = color_value;
2098 		}
2099 		break;
2100 	case PIXEL_FORMAT_FP16:
2101 		if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
2102 			/* HDR10, FP16 - set boarder color to blue */
2103 			color->color_b_cb = color_value;
2104 		} else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
2105 			/* FreeSync 2 HDR - set boarder color to green */
2106 			color->color_g_y = color_value;
2107 		}
2108 		break;
2109 	default:
2110 		/* SDR - set boarder color to Gray */
2111 		color->color_r_cr = color_value/2;
2112 		color->color_b_cb = color_value/2;
2113 		color->color_g_y = color_value/2;
2114 		break;
2115 	}
2116 }
2117 
dcn10_update_dpp(struct dpp * dpp,struct dc_plane_state * plane_state)2118 static void dcn10_update_dpp(struct dpp *dpp, struct dc_plane_state *plane_state)
2119 {
2120 	struct dc_bias_and_scale bns_params = {0};
2121 
2122 	// program the input csc
2123 	dpp->funcs->dpp_setup(dpp,
2124 			plane_state->format,
2125 			EXPANSION_MODE_ZERO,
2126 			plane_state->input_csc_color_matrix,
2127 			plane_state->color_space,
2128 			NULL);
2129 
2130 	//set scale and bias registers
2131 	build_prescale_params(&bns_params, plane_state);
2132 	if (dpp->funcs->dpp_program_bias_and_scale)
2133 		dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params);
2134 }
2135 
dcn10_update_mpcc(struct dc * dc,struct pipe_ctx * pipe_ctx)2136 void dcn10_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx)
2137 {
2138 	struct dce_hwseq *hws = dc->hwseq;
2139 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
2140 	struct mpcc_blnd_cfg blnd_cfg = {{0}};
2141 	bool per_pixel_alpha = pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2142 	int mpcc_id;
2143 	struct mpcc *new_mpcc;
2144 	struct mpc *mpc = dc->res_pool->mpc;
2145 	struct mpc_tree *mpc_tree_params = &(pipe_ctx->stream_res.opp->mpc_tree_params);
2146 
2147 	if (dc->debug.visual_confirm == VISUAL_CONFIRM_HDR) {
2148 		hws->funcs.get_hdr_visual_confirm_color(
2149 				pipe_ctx, &blnd_cfg.black_color);
2150 	} else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) {
2151 		hws->funcs.get_surface_visual_confirm_color(
2152 				pipe_ctx, &blnd_cfg.black_color);
2153 	} else {
2154 		color_space_to_black_color(
2155 				dc, pipe_ctx->stream->output_color_space,
2156 				&blnd_cfg.black_color);
2157 	}
2158 
2159 	if (per_pixel_alpha)
2160 		blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_PER_PIXEL_ALPHA;
2161 	else
2162 		blnd_cfg.alpha_mode = MPCC_ALPHA_BLEND_MODE_GLOBAL_ALPHA;
2163 
2164 	blnd_cfg.overlap_only = false;
2165 	blnd_cfg.global_gain = 0xff;
2166 
2167 	if (pipe_ctx->plane_state->global_alpha)
2168 		blnd_cfg.global_alpha = pipe_ctx->plane_state->global_alpha_value;
2169 	else
2170 		blnd_cfg.global_alpha = 0xff;
2171 
2172 	/* DCN1.0 has output CM before MPC which seems to screw with
2173 	 * pre-multiplied alpha.
2174 	 */
2175 	blnd_cfg.pre_multiplied_alpha = is_rgb_cspace(
2176 			pipe_ctx->stream->output_color_space)
2177 					&& per_pixel_alpha;
2178 
2179 
2180 	/*
2181 	 * TODO: remove hack
2182 	 * Note: currently there is a bug in init_hw such that
2183 	 * on resume from hibernate, BIOS sets up MPCC0, and
2184 	 * we do mpcc_remove but the mpcc cannot go to idle
2185 	 * after remove. This cause us to pick mpcc1 here,
2186 	 * which causes a pstate hang for yet unknown reason.
2187 	 */
2188 	mpcc_id = hubp->inst;
2189 
2190 	/* If there is no full update, don't need to touch MPC tree*/
2191 	if (!pipe_ctx->plane_state->update_flags.bits.full_update) {
2192 		mpc->funcs->update_blending(mpc, &blnd_cfg, mpcc_id);
2193 		return;
2194 	}
2195 
2196 	/* check if this MPCC is already being used */
2197 	new_mpcc = mpc->funcs->get_mpcc_for_dpp(mpc_tree_params, mpcc_id);
2198 	/* remove MPCC if being used */
2199 	if (new_mpcc != NULL)
2200 		mpc->funcs->remove_mpcc(mpc, mpc_tree_params, new_mpcc);
2201 	else
2202 		if (dc->debug.sanity_checks)
2203 			mpc->funcs->assert_mpcc_idle_before_connect(
2204 					dc->res_pool->mpc, mpcc_id);
2205 
2206 	/* Call MPC to insert new plane */
2207 	new_mpcc = mpc->funcs->insert_plane(dc->res_pool->mpc,
2208 			mpc_tree_params,
2209 			&blnd_cfg,
2210 			NULL,
2211 			NULL,
2212 			hubp->inst,
2213 			mpcc_id);
2214 
2215 	ASSERT(new_mpcc != NULL);
2216 
2217 	hubp->opp_id = pipe_ctx->stream_res.opp->inst;
2218 	hubp->mpcc_id = mpcc_id;
2219 }
2220 
update_scaler(struct pipe_ctx * pipe_ctx)2221 static void update_scaler(struct pipe_ctx *pipe_ctx)
2222 {
2223 	bool per_pixel_alpha =
2224 			pipe_ctx->plane_state->per_pixel_alpha && pipe_ctx->bottom_pipe;
2225 
2226 	pipe_ctx->plane_res.scl_data.lb_params.alpha_en = per_pixel_alpha;
2227 	pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;
2228 	/* scaler configuration */
2229 	pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler(
2230 			pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data);
2231 }
2232 
dcn10_update_dchubp_dpp(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2233 static void dcn10_update_dchubp_dpp(
2234 	struct dc *dc,
2235 	struct pipe_ctx *pipe_ctx,
2236 	struct dc_state *context)
2237 {
2238 	struct dce_hwseq *hws = dc->hwseq;
2239 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
2240 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
2241 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2242 	struct plane_size size = plane_state->plane_size;
2243 	unsigned int compat_level = 0;
2244 
2245 	/* depends on DML calculation, DPP clock value may change dynamically */
2246 	/* If request max dpp clk is lower than current dispclk, no need to
2247 	 * divided by 2
2248 	 */
2249 	if (plane_state->update_flags.bits.full_update) {
2250 		bool should_divided_by_2 = context->bw_ctx.bw.dcn.clk.dppclk_khz <=
2251 				dc->clk_mgr->clks.dispclk_khz / 2;
2252 
2253 		dpp->funcs->dpp_dppclk_control(
2254 				dpp,
2255 				should_divided_by_2,
2256 				true);
2257 
2258 		if (dc->res_pool->dccg)
2259 			dc->res_pool->dccg->funcs->update_dpp_dto(
2260 					dc->res_pool->dccg,
2261 					dpp->inst,
2262 					pipe_ctx->plane_res.bw.dppclk_khz);
2263 		else
2264 			dc->clk_mgr->clks.dppclk_khz = should_divided_by_2 ?
2265 						dc->clk_mgr->clks.dispclk_khz / 2 :
2266 							dc->clk_mgr->clks.dispclk_khz;
2267 	}
2268 
2269 	/* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG
2270 	 * VTG is within DCHUBBUB which is commond block share by each pipe HUBP.
2271 	 * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG
2272 	 */
2273 	if (plane_state->update_flags.bits.full_update) {
2274 		hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst);
2275 
2276 		hubp->funcs->hubp_setup(
2277 			hubp,
2278 			&pipe_ctx->dlg_regs,
2279 			&pipe_ctx->ttu_regs,
2280 			&pipe_ctx->rq_regs,
2281 			&pipe_ctx->pipe_dlg_param);
2282 		hubp->funcs->hubp_setup_interdependent(
2283 			hubp,
2284 			&pipe_ctx->dlg_regs,
2285 			&pipe_ctx->ttu_regs);
2286 	}
2287 
2288 	size.surface_size = pipe_ctx->plane_res.scl_data.viewport;
2289 
2290 	if (plane_state->update_flags.bits.full_update ||
2291 		plane_state->update_flags.bits.bpp_change)
2292 		dcn10_update_dpp(dpp, plane_state);
2293 
2294 	if (plane_state->update_flags.bits.full_update ||
2295 		plane_state->update_flags.bits.per_pixel_alpha_change ||
2296 		plane_state->update_flags.bits.global_alpha_change)
2297 		hws->funcs.update_mpcc(dc, pipe_ctx);
2298 
2299 	if (plane_state->update_flags.bits.full_update ||
2300 		plane_state->update_flags.bits.per_pixel_alpha_change ||
2301 		plane_state->update_flags.bits.global_alpha_change ||
2302 		plane_state->update_flags.bits.scaling_change ||
2303 		plane_state->update_flags.bits.position_change) {
2304 		update_scaler(pipe_ctx);
2305 	}
2306 
2307 	if (plane_state->update_flags.bits.full_update ||
2308 		plane_state->update_flags.bits.scaling_change ||
2309 		plane_state->update_flags.bits.position_change) {
2310 		hubp->funcs->mem_program_viewport(
2311 			hubp,
2312 			&pipe_ctx->plane_res.scl_data.viewport,
2313 			&pipe_ctx->plane_res.scl_data.viewport_c);
2314 	}
2315 
2316 	if (pipe_ctx->stream->cursor_attributes.address.quad_part != 0) {
2317 		dc->hwss.set_cursor_position(pipe_ctx);
2318 		dc->hwss.set_cursor_attribute(pipe_ctx);
2319 
2320 		if (dc->hwss.set_cursor_sdr_white_level)
2321 			dc->hwss.set_cursor_sdr_white_level(pipe_ctx);
2322 	}
2323 
2324 	if (plane_state->update_flags.bits.full_update) {
2325 		/*gamut remap*/
2326 		dc->hwss.program_gamut_remap(pipe_ctx);
2327 
2328 		dc->hwss.program_output_csc(dc,
2329 				pipe_ctx,
2330 				pipe_ctx->stream->output_color_space,
2331 				pipe_ctx->stream->csc_color_matrix.matrix,
2332 				pipe_ctx->stream_res.opp->inst);
2333 	}
2334 
2335 	if (plane_state->update_flags.bits.full_update ||
2336 		plane_state->update_flags.bits.pixel_format_change ||
2337 		plane_state->update_flags.bits.horizontal_mirror_change ||
2338 		plane_state->update_flags.bits.rotation_change ||
2339 		plane_state->update_flags.bits.swizzle_change ||
2340 		plane_state->update_flags.bits.dcc_change ||
2341 		plane_state->update_flags.bits.bpp_change ||
2342 		plane_state->update_flags.bits.scaling_change ||
2343 		plane_state->update_flags.bits.plane_size_change) {
2344 		hubp->funcs->hubp_program_surface_config(
2345 			hubp,
2346 			plane_state->format,
2347 			&plane_state->tiling_info,
2348 			&size,
2349 			plane_state->rotation,
2350 			&plane_state->dcc,
2351 			plane_state->horizontal_mirror,
2352 			compat_level);
2353 	}
2354 
2355 	hubp->power_gated = false;
2356 
2357 	hws->funcs.update_plane_addr(dc, pipe_ctx);
2358 
2359 	if (is_pipe_tree_visible(pipe_ctx))
2360 		hubp->funcs->set_blank(hubp, false);
2361 }
2362 
dcn10_blank_pixel_data(struct dc * dc,struct pipe_ctx * pipe_ctx,bool blank)2363 void dcn10_blank_pixel_data(
2364 		struct dc *dc,
2365 		struct pipe_ctx *pipe_ctx,
2366 		bool blank)
2367 {
2368 	enum dc_color_space color_space;
2369 	struct tg_color black_color = {0};
2370 	struct stream_resource *stream_res = &pipe_ctx->stream_res;
2371 	struct dc_stream_state *stream = pipe_ctx->stream;
2372 
2373 	/* program otg blank color */
2374 	color_space = stream->output_color_space;
2375 	color_space_to_black_color(dc, color_space, &black_color);
2376 
2377 	/*
2378 	 * The way 420 is packed, 2 channels carry Y component, 1 channel
2379 	 * alternate between Cb and Cr, so both channels need the pixel
2380 	 * value for Y
2381 	 */
2382 	if (stream->timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
2383 		black_color.color_r_cr = black_color.color_g_y;
2384 
2385 
2386 	if (stream_res->tg->funcs->set_blank_color)
2387 		stream_res->tg->funcs->set_blank_color(
2388 				stream_res->tg,
2389 				&black_color);
2390 
2391 	if (!blank) {
2392 		if (stream_res->tg->funcs->set_blank)
2393 			stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2394 		if (stream_res->abm) {
2395 			stream_res->abm->funcs->set_pipe(stream_res->abm, stream_res->tg->inst + 1);
2396 			stream_res->abm->funcs->set_abm_level(stream_res->abm, stream->abm_level);
2397 		}
2398 	} else if (blank) {
2399 		if (stream_res->abm)
2400 			stream_res->abm->funcs->set_abm_immediate_disable(stream_res->abm);
2401 		if (stream_res->tg->funcs->set_blank)
2402 			stream_res->tg->funcs->set_blank(stream_res->tg, blank);
2403 	}
2404 }
2405 
dcn10_set_hdr_multiplier(struct pipe_ctx * pipe_ctx)2406 void dcn10_set_hdr_multiplier(struct pipe_ctx *pipe_ctx)
2407 {
2408 	struct fixed31_32 multiplier = pipe_ctx->plane_state->hdr_mult;
2409 	uint32_t hw_mult = 0x1f000; // 1.0 default multiplier
2410 	struct custom_float_format fmt;
2411 
2412 	fmt.exponenta_bits = 6;
2413 	fmt.mantissa_bits = 12;
2414 	fmt.sign = true;
2415 
2416 
2417 	if (!dc_fixpt_eq(multiplier, dc_fixpt_from_int(0))) // check != 0
2418 		convert_to_custom_float_format(multiplier, &fmt, &hw_mult);
2419 
2420 	pipe_ctx->plane_res.dpp->funcs->dpp_set_hdr_multiplier(
2421 			pipe_ctx->plane_res.dpp, hw_mult);
2422 }
2423 
dcn10_program_pipe(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2424 void dcn10_program_pipe(
2425 		struct dc *dc,
2426 		struct pipe_ctx *pipe_ctx,
2427 		struct dc_state *context)
2428 {
2429 	struct dce_hwseq *hws = dc->hwseq;
2430 
2431 	if (pipe_ctx->plane_state->update_flags.bits.full_update)
2432 		dcn10_enable_plane(dc, pipe_ctx, context);
2433 
2434 	dcn10_update_dchubp_dpp(dc, pipe_ctx, context);
2435 
2436 	hws->funcs.set_hdr_multiplier(pipe_ctx);
2437 
2438 	if (pipe_ctx->plane_state->update_flags.bits.full_update ||
2439 			pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
2440 			pipe_ctx->plane_state->update_flags.bits.gamma_change)
2441 		hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
2442 
2443 	/* dcn10_translate_regamma_to_hw_format takes 750us to finish
2444 	 * only do gamma programming for full update.
2445 	 * TODO: This can be further optimized/cleaned up
2446 	 * Always call this for now since it does memcmp inside before
2447 	 * doing heavy calculation and programming
2448 	 */
2449 	if (pipe_ctx->plane_state->update_flags.bits.full_update)
2450 		hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
2451 }
2452 
dcn10_program_all_pipe_in_tree(struct dc * dc,struct pipe_ctx * pipe_ctx,struct dc_state * context)2453 static void dcn10_program_all_pipe_in_tree(
2454 		struct dc *dc,
2455 		struct pipe_ctx *pipe_ctx,
2456 		struct dc_state *context)
2457 {
2458 	struct dce_hwseq *hws = dc->hwseq;
2459 
2460 	if (pipe_ctx->top_pipe == NULL) {
2461 		bool blank = !is_pipe_tree_visible(pipe_ctx);
2462 
2463 		pipe_ctx->stream_res.tg->funcs->program_global_sync(
2464 				pipe_ctx->stream_res.tg,
2465 				pipe_ctx->pipe_dlg_param.vready_offset,
2466 				pipe_ctx->pipe_dlg_param.vstartup_start,
2467 				pipe_ctx->pipe_dlg_param.vupdate_offset,
2468 				pipe_ctx->pipe_dlg_param.vupdate_width);
2469 
2470 		pipe_ctx->stream_res.tg->funcs->set_vtg_params(
2471 				pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing);
2472 
2473 		if (hws->funcs.setup_vupdate_interrupt)
2474 			hws->funcs.setup_vupdate_interrupt(dc, pipe_ctx);
2475 
2476 		hws->funcs.blank_pixel_data(dc, pipe_ctx, blank);
2477 	}
2478 
2479 	if (pipe_ctx->plane_state != NULL)
2480 		hws->funcs.program_pipe(dc, pipe_ctx, context);
2481 
2482 	if (pipe_ctx->bottom_pipe != NULL && pipe_ctx->bottom_pipe != pipe_ctx)
2483 		dcn10_program_all_pipe_in_tree(dc, pipe_ctx->bottom_pipe, context);
2484 }
2485 
dcn10_find_top_pipe_for_stream(struct dc * dc,struct dc_state * context,const struct dc_stream_state * stream)2486 static struct pipe_ctx *dcn10_find_top_pipe_for_stream(
2487 		struct dc *dc,
2488 		struct dc_state *context,
2489 		const struct dc_stream_state *stream)
2490 {
2491 	int i;
2492 
2493 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2494 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2495 		struct pipe_ctx *old_pipe_ctx =
2496 				&dc->current_state->res_ctx.pipe_ctx[i];
2497 
2498 		if (!pipe_ctx->plane_state && !old_pipe_ctx->plane_state)
2499 			continue;
2500 
2501 		if (pipe_ctx->stream != stream)
2502 			continue;
2503 
2504 		if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe)
2505 			return pipe_ctx;
2506 	}
2507 	return NULL;
2508 }
2509 
dcn10_apply_ctx_for_surface(struct dc * dc,const struct dc_stream_state * stream,int num_planes,struct dc_state * context)2510 void dcn10_apply_ctx_for_surface(
2511 		struct dc *dc,
2512 		const struct dc_stream_state *stream,
2513 		int num_planes,
2514 		struct dc_state *context)
2515 {
2516 	struct dce_hwseq *hws = dc->hwseq;
2517 	int i;
2518 	struct timing_generator *tg;
2519 	uint32_t underflow_check_delay_us;
2520 	bool removed_pipe[4] = { false };
2521 	bool interdependent_update = false;
2522 	struct pipe_ctx *top_pipe_to_program =
2523 			dcn10_find_top_pipe_for_stream(dc, context, stream);
2524 	DC_LOGGER_INIT(dc->ctx->logger);
2525 
2526 	if (!top_pipe_to_program)
2527 		return;
2528 
2529 	tg = top_pipe_to_program->stream_res.tg;
2530 
2531 	interdependent_update = top_pipe_to_program->plane_state &&
2532 		top_pipe_to_program->plane_state->update_flags.bits.full_update;
2533 
2534 	underflow_check_delay_us = dc->debug.underflow_assert_delay_us;
2535 
2536 	if (underflow_check_delay_us != 0xFFFFFFFF && hws->funcs.did_underflow_occur)
2537 		ASSERT(hws->funcs.did_underflow_occur(dc, top_pipe_to_program));
2538 
2539 	if (interdependent_update)
2540 		dcn10_lock_all_pipes(dc, context, true);
2541 	else
2542 		dcn10_pipe_control_lock(dc, top_pipe_to_program, true);
2543 
2544 	if (underflow_check_delay_us != 0xFFFFFFFF)
2545 		udelay(underflow_check_delay_us);
2546 
2547 	if (underflow_check_delay_us != 0xFFFFFFFF && hws->funcs.did_underflow_occur)
2548 		ASSERT(hws->funcs.did_underflow_occur(dc, top_pipe_to_program));
2549 
2550 	if (num_planes == 0) {
2551 		/* OTG blank before remove all front end */
2552 		hws->funcs.blank_pixel_data(dc, top_pipe_to_program, true);
2553 	}
2554 
2555 	/* Disconnect unused mpcc */
2556 	for (i = 0; i < dc->res_pool->pipe_count; i++) {
2557 		struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2558 		struct pipe_ctx *old_pipe_ctx =
2559 				&dc->current_state->res_ctx.pipe_ctx[i];
2560 		/*
2561 		 * Powergate reused pipes that are not powergated
2562 		 * fairly hacky right now, using opp_id as indicator
2563 		 * TODO: After move dc_post to dc_update, this will
2564 		 * be removed.
2565 		 */
2566 		if (pipe_ctx->plane_state && !old_pipe_ctx->plane_state) {
2567 			if (old_pipe_ctx->stream_res.tg == tg &&
2568 			    old_pipe_ctx->plane_res.hubp &&
2569 			    old_pipe_ctx->plane_res.hubp->opp_id != OPP_ID_INVALID)
2570 				dc->hwss.disable_plane(dc, old_pipe_ctx);
2571 		}
2572 
2573 		if ((!pipe_ctx->plane_state ||
2574 		     pipe_ctx->stream_res.tg != old_pipe_ctx->stream_res.tg) &&
2575 		    old_pipe_ctx->plane_state &&
2576 		    old_pipe_ctx->stream_res.tg == tg) {
2577 
2578 			hws->funcs.plane_atomic_disconnect(dc, old_pipe_ctx);
2579 			removed_pipe[i] = true;
2580 
2581 			DC_LOG_DC("Reset mpcc for pipe %d\n",
2582 					old_pipe_ctx->pipe_idx);
2583 		}
2584 	}
2585 
2586 	if (num_planes > 0)
2587 		dcn10_program_all_pipe_in_tree(dc, top_pipe_to_program, context);
2588 
2589 	/* Program secondary blending tree and writeback pipes */
2590 	if ((stream->num_wb_info > 0) && (hws->funcs.program_all_writeback_pipes_in_tree))
2591 		hws->funcs.program_all_writeback_pipes_in_tree(dc, stream, context);
2592 	if (interdependent_update)
2593 		for (i = 0; i < dc->res_pool->pipe_count; i++) {
2594 			struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
2595 			/* Skip inactive pipes and ones already updated */
2596 			if (!pipe_ctx->stream || pipe_ctx->stream == stream ||
2597 			    !pipe_ctx->plane_state || !tg->funcs->is_tg_enabled(tg))
2598 				continue;
2599 
2600 			pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent(
2601 				pipe_ctx->plane_res.hubp,
2602 				&pipe_ctx->dlg_regs,
2603 				&pipe_ctx->ttu_regs);
2604 		}
2605 
2606 	if (interdependent_update)
2607 		dcn10_lock_all_pipes(dc, context, false);
2608 	else
2609 		dcn10_pipe_control_lock(dc, top_pipe_to_program, false);
2610 
2611 	if (num_planes == 0)
2612 		false_optc_underflow_wa(dc, stream, tg);
2613 
2614 	for (i = 0; i < dc->res_pool->pipe_count; i++)
2615 		if (removed_pipe[i])
2616 			dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
2617 
2618 	for (i = 0; i < dc->res_pool->pipe_count; i++)
2619 		if (removed_pipe[i]) {
2620 			dc->hwss.optimize_bandwidth(dc, context);
2621 			break;
2622 		}
2623 
2624 	if (dc->hwseq->wa.DEGVIDCN10_254)
2625 		hubbub1_wm_change_req_wa(dc->res_pool->hubbub);
2626 }
2627 
dcn10_stereo_hw_frame_pack_wa(struct dc * dc,struct dc_state * context)2628 static void dcn10_stereo_hw_frame_pack_wa(struct dc *dc, struct dc_state *context)
2629 {
2630 	uint8_t i;
2631 
2632 	for (i = 0; i < context->stream_count; i++) {
2633 		if (context->streams[i]->timing.timing_3d_format
2634 				== TIMING_3D_FORMAT_HW_FRAME_PACKING) {
2635 			/*
2636 			 * Disable stutter
2637 			 */
2638 			hubbub1_allow_self_refresh_control(dc->res_pool->hubbub, false);
2639 			break;
2640 		}
2641 	}
2642 }
2643 
dcn10_prepare_bandwidth(struct dc * dc,struct dc_state * context)2644 void dcn10_prepare_bandwidth(
2645 		struct dc *dc,
2646 		struct dc_state *context)
2647 {
2648 	struct dce_hwseq *hws = dc->hwseq;
2649 	struct hubbub *hubbub = dc->res_pool->hubbub;
2650 
2651 	if (dc->debug.sanity_checks)
2652 		hws->funcs.verify_allow_pstate_change_high(dc);
2653 
2654 	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
2655 		if (context->stream_count == 0)
2656 			context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
2657 
2658 		dc->clk_mgr->funcs->update_clocks(
2659 				dc->clk_mgr,
2660 				context,
2661 				false);
2662 	}
2663 
2664 	hubbub->funcs->program_watermarks(hubbub,
2665 			&context->bw_ctx.bw.dcn.watermarks,
2666 			dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
2667 			true);
2668 	dcn10_stereo_hw_frame_pack_wa(dc, context);
2669 
2670 	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
2671 		dcn_bw_notify_pplib_of_wm_ranges(dc);
2672 
2673 	if (dc->debug.sanity_checks)
2674 		hws->funcs.verify_allow_pstate_change_high(dc);
2675 }
2676 
dcn10_optimize_bandwidth(struct dc * dc,struct dc_state * context)2677 void dcn10_optimize_bandwidth(
2678 		struct dc *dc,
2679 		struct dc_state *context)
2680 {
2681 	struct dce_hwseq *hws = dc->hwseq;
2682 	struct hubbub *hubbub = dc->res_pool->hubbub;
2683 
2684 	if (dc->debug.sanity_checks)
2685 		hws->funcs.verify_allow_pstate_change_high(dc);
2686 
2687 	if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) {
2688 		if (context->stream_count == 0)
2689 			context->bw_ctx.bw.dcn.clk.phyclk_khz = 0;
2690 
2691 		dc->clk_mgr->funcs->update_clocks(
2692 				dc->clk_mgr,
2693 				context,
2694 				true);
2695 	}
2696 
2697 	hubbub->funcs->program_watermarks(hubbub,
2698 			&context->bw_ctx.bw.dcn.watermarks,
2699 			dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
2700 			true);
2701 	dcn10_stereo_hw_frame_pack_wa(dc, context);
2702 
2703 	if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
2704 		dcn_bw_notify_pplib_of_wm_ranges(dc);
2705 
2706 	if (dc->debug.sanity_checks)
2707 		hws->funcs.verify_allow_pstate_change_high(dc);
2708 }
2709 
dcn10_set_drr(struct pipe_ctx ** pipe_ctx,int num_pipes,unsigned int vmin,unsigned int vmax,unsigned int vmid,unsigned int vmid_frame_number)2710 void dcn10_set_drr(struct pipe_ctx **pipe_ctx,
2711 		int num_pipes, unsigned int vmin, unsigned int vmax,
2712 		unsigned int vmid, unsigned int vmid_frame_number)
2713 {
2714 	int i = 0;
2715 	struct drr_params params = {0};
2716 	// DRR set trigger event mapped to OTG_TRIG_A (bit 11) for manual control flow
2717 	unsigned int event_triggers = 0x800;
2718 	// Note DRR trigger events are generated regardless of whether num frames met.
2719 	unsigned int num_frames = 2;
2720 
2721 	params.vertical_total_max = vmax;
2722 	params.vertical_total_min = vmin;
2723 	params.vertical_total_mid = vmid;
2724 	params.vertical_total_mid_frame_num = vmid_frame_number;
2725 
2726 	/* TODO: If multiple pipes are to be supported, you need
2727 	 * some GSL stuff. Static screen triggers may be programmed differently
2728 	 * as well.
2729 	 */
2730 	for (i = 0; i < num_pipes; i++) {
2731 		pipe_ctx[i]->stream_res.tg->funcs->set_drr(
2732 			pipe_ctx[i]->stream_res.tg, &params);
2733 		if (vmax != 0 && vmin != 0)
2734 			pipe_ctx[i]->stream_res.tg->funcs->set_static_screen_control(
2735 					pipe_ctx[i]->stream_res.tg,
2736 					event_triggers, num_frames);
2737 	}
2738 }
2739 
dcn10_get_position(struct pipe_ctx ** pipe_ctx,int num_pipes,struct crtc_position * position)2740 void dcn10_get_position(struct pipe_ctx **pipe_ctx,
2741 		int num_pipes,
2742 		struct crtc_position *position)
2743 {
2744 	int i = 0;
2745 
2746 	/* TODO: handle pipes > 1
2747 	 */
2748 	for (i = 0; i < num_pipes; i++)
2749 		pipe_ctx[i]->stream_res.tg->funcs->get_position(pipe_ctx[i]->stream_res.tg, position);
2750 }
2751 
dcn10_set_static_screen_control(struct pipe_ctx ** pipe_ctx,int num_pipes,const struct dc_static_screen_params * params)2752 void dcn10_set_static_screen_control(struct pipe_ctx **pipe_ctx,
2753 		int num_pipes, const struct dc_static_screen_params *params)
2754 {
2755 	unsigned int i;
2756 	unsigned int triggers = 0;
2757 
2758 	if (params->triggers.surface_update)
2759 		triggers |= 0x80;
2760 	if (params->triggers.cursor_update)
2761 		triggers |= 0x2;
2762 	if (params->triggers.force_trigger)
2763 		triggers |= 0x1;
2764 
2765 	for (i = 0; i < num_pipes; i++)
2766 		pipe_ctx[i]->stream_res.tg->funcs->
2767 			set_static_screen_control(pipe_ctx[i]->stream_res.tg,
2768 					triggers, params->num_frames);
2769 }
2770 
dcn10_config_stereo_parameters(struct dc_stream_state * stream,struct crtc_stereo_flags * flags)2771 static void dcn10_config_stereo_parameters(
2772 		struct dc_stream_state *stream, struct crtc_stereo_flags *flags)
2773 {
2774 	enum view_3d_format view_format = stream->view_format;
2775 	enum dc_timing_3d_format timing_3d_format =\
2776 			stream->timing.timing_3d_format;
2777 	bool non_stereo_timing = false;
2778 
2779 	if (timing_3d_format == TIMING_3D_FORMAT_NONE ||
2780 		timing_3d_format == TIMING_3D_FORMAT_SIDE_BY_SIDE ||
2781 		timing_3d_format == TIMING_3D_FORMAT_TOP_AND_BOTTOM)
2782 		non_stereo_timing = true;
2783 
2784 	if (non_stereo_timing == false &&
2785 		view_format == VIEW_3D_FORMAT_FRAME_SEQUENTIAL) {
2786 
2787 		flags->PROGRAM_STEREO         = 1;
2788 		flags->PROGRAM_POLARITY       = 1;
2789 		if (timing_3d_format == TIMING_3D_FORMAT_INBAND_FA ||
2790 			timing_3d_format == TIMING_3D_FORMAT_DP_HDMI_INBAND_FA ||
2791 			timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
2792 			enum display_dongle_type dongle = \
2793 					stream->link->ddc->dongle_type;
2794 			if (dongle == DISPLAY_DONGLE_DP_VGA_CONVERTER ||
2795 				dongle == DISPLAY_DONGLE_DP_DVI_CONVERTER ||
2796 				dongle == DISPLAY_DONGLE_DP_HDMI_CONVERTER)
2797 				flags->DISABLE_STEREO_DP_SYNC = 1;
2798 		}
2799 		flags->RIGHT_EYE_POLARITY =\
2800 				stream->timing.flags.RIGHT_EYE_3D_POLARITY;
2801 		if (timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING)
2802 			flags->FRAME_PACKED = 1;
2803 	}
2804 
2805 	return;
2806 }
2807 
dcn10_setup_stereo(struct pipe_ctx * pipe_ctx,struct dc * dc)2808 void dcn10_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc)
2809 {
2810 	struct crtc_stereo_flags flags = { 0 };
2811 	struct dc_stream_state *stream = pipe_ctx->stream;
2812 
2813 	dcn10_config_stereo_parameters(stream, &flags);
2814 
2815 	if (stream->timing.timing_3d_format == TIMING_3D_FORMAT_SIDEBAND_FA) {
2816 		if (!dc_set_generic_gpio_for_stereo(true, dc->ctx->gpio_service))
2817 			dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
2818 	} else {
2819 		dc_set_generic_gpio_for_stereo(false, dc->ctx->gpio_service);
2820 	}
2821 
2822 	pipe_ctx->stream_res.opp->funcs->opp_program_stereo(
2823 		pipe_ctx->stream_res.opp,
2824 		flags.PROGRAM_STEREO == 1 ? true:false,
2825 		&stream->timing);
2826 
2827 	pipe_ctx->stream_res.tg->funcs->program_stereo(
2828 		pipe_ctx->stream_res.tg,
2829 		&stream->timing,
2830 		&flags);
2831 
2832 	return;
2833 }
2834 
get_hubp_by_inst(struct resource_pool * res_pool,int mpcc_inst)2835 static struct hubp *get_hubp_by_inst(struct resource_pool *res_pool, int mpcc_inst)
2836 {
2837 	int i;
2838 
2839 	for (i = 0; i < res_pool->pipe_count; i++) {
2840 		if (res_pool->hubps[i]->inst == mpcc_inst)
2841 			return res_pool->hubps[i];
2842 	}
2843 	ASSERT(false);
2844 	return NULL;
2845 }
2846 
dcn10_wait_for_mpcc_disconnect(struct dc * dc,struct resource_pool * res_pool,struct pipe_ctx * pipe_ctx)2847 void dcn10_wait_for_mpcc_disconnect(
2848 		struct dc *dc,
2849 		struct resource_pool *res_pool,
2850 		struct pipe_ctx *pipe_ctx)
2851 {
2852 	struct dce_hwseq *hws = dc->hwseq;
2853 	int mpcc_inst;
2854 
2855 	if (dc->debug.sanity_checks) {
2856 		hws->funcs.verify_allow_pstate_change_high(dc);
2857 	}
2858 
2859 	if (!pipe_ctx->stream_res.opp)
2860 		return;
2861 
2862 	for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) {
2863 		if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
2864 			struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
2865 
2866 			res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
2867 			pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
2868 			hubp->funcs->set_blank(hubp, true);
2869 		}
2870 	}
2871 
2872 	if (dc->debug.sanity_checks) {
2873 		hws->funcs.verify_allow_pstate_change_high(dc);
2874 	}
2875 
2876 }
2877 
dcn10_dummy_display_power_gating(struct dc * dc,uint8_t controller_id,struct dc_bios * dcb,enum pipe_gating_control power_gating)2878 bool dcn10_dummy_display_power_gating(
2879 	struct dc *dc,
2880 	uint8_t controller_id,
2881 	struct dc_bios *dcb,
2882 	enum pipe_gating_control power_gating)
2883 {
2884 	return true;
2885 }
2886 
dcn10_update_pending_status(struct pipe_ctx * pipe_ctx)2887 void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
2888 {
2889 	struct dc_plane_state *plane_state = pipe_ctx->plane_state;
2890 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
2891 	bool flip_pending;
2892 
2893 	if (plane_state == NULL)
2894 		return;
2895 
2896 	flip_pending = pipe_ctx->plane_res.hubp->funcs->hubp_is_flip_pending(
2897 					pipe_ctx->plane_res.hubp);
2898 
2899 	plane_state->status.is_flip_pending = plane_state->status.is_flip_pending || flip_pending;
2900 
2901 	if (!flip_pending)
2902 		plane_state->status.current_address = plane_state->status.requested_address;
2903 
2904 	if (plane_state->status.current_address.type == PLN_ADDR_TYPE_GRPH_STEREO &&
2905 			tg->funcs->is_stereo_left_eye) {
2906 		plane_state->status.is_right_eye =
2907 				!tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
2908 	}
2909 }
2910 
dcn10_update_dchub(struct dce_hwseq * hws,struct dchub_init_data * dh_data)2911 void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
2912 {
2913 	struct hubbub *hubbub = hws->ctx->dc->res_pool->hubbub;
2914 
2915 	/* In DCN, this programming sequence is owned by the hubbub */
2916 	hubbub->funcs->update_dchub(hubbub, dh_data);
2917 }
2918 
dcn10_can_pipe_disable_cursor(struct pipe_ctx * pipe_ctx)2919 static bool dcn10_can_pipe_disable_cursor(struct pipe_ctx *pipe_ctx)
2920 {
2921 	struct pipe_ctx *test_pipe;
2922 	const struct rect *r1 = &pipe_ctx->plane_res.scl_data.recout, *r2;
2923 	int r1_r = r1->x + r1->width, r1_b = r1->y + r1->height, r2_r, r2_b;
2924 
2925 	/**
2926 	 * Disable the cursor if there's another pipe above this with a
2927 	 * plane that contains this pipe's viewport to prevent double cursor
2928 	 * and incorrect scaling artifacts.
2929 	 */
2930 	for (test_pipe = pipe_ctx->top_pipe; test_pipe;
2931 	     test_pipe = test_pipe->top_pipe) {
2932 		if (!test_pipe->plane_state->visible)
2933 			continue;
2934 
2935 		r2 = &test_pipe->plane_res.scl_data.recout;
2936 		r2_r = r2->x + r2->width;
2937 		r2_b = r2->y + r2->height;
2938 
2939 		if (r1->x >= r2->x && r1->y >= r2->y && r1_r <= r2_r && r1_b <= r2_b)
2940 			return true;
2941 	}
2942 
2943 	return false;
2944 }
2945 
dcn10_set_cursor_position(struct pipe_ctx * pipe_ctx)2946 void dcn10_set_cursor_position(struct pipe_ctx *pipe_ctx)
2947 {
2948 	struct dc_cursor_position pos_cpy = pipe_ctx->stream->cursor_position;
2949 	struct hubp *hubp = pipe_ctx->plane_res.hubp;
2950 	struct dpp *dpp = pipe_ctx->plane_res.dpp;
2951 	struct dc_cursor_mi_param param = {
2952 		.pixel_clk_khz = pipe_ctx->stream->timing.pix_clk_100hz / 10,
2953 		.ref_clk_khz = pipe_ctx->stream->ctx->dc->res_pool->ref_clocks.dchub_ref_clock_inKhz,
2954 		.viewport = pipe_ctx->plane_res.scl_data.viewport,
2955 		.h_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.horz,
2956 		.v_scale_ratio = pipe_ctx->plane_res.scl_data.ratios.vert,
2957 		.rotation = pipe_ctx->plane_state->rotation,
2958 		.mirror = pipe_ctx->plane_state->horizontal_mirror
2959 	};
2960 	bool pipe_split_on = (pipe_ctx->top_pipe != NULL) ||
2961 		(pipe_ctx->bottom_pipe != NULL);
2962 
2963 	int x_plane = pipe_ctx->plane_state->dst_rect.x;
2964 	int y_plane = pipe_ctx->plane_state->dst_rect.y;
2965 	int x_pos = pos_cpy.x;
2966 	int y_pos = pos_cpy.y;
2967 
2968 	// translate cursor from stream space to plane space
2969 	x_pos = (x_pos - x_plane) * pipe_ctx->plane_state->src_rect.width /
2970 			pipe_ctx->plane_state->dst_rect.width;
2971 	y_pos = (y_pos - y_plane) * pipe_ctx->plane_state->src_rect.height /
2972 			pipe_ctx->plane_state->dst_rect.height;
2973 
2974 	if (x_pos < 0) {
2975 		pos_cpy.x_hotspot -= x_pos;
2976 		x_pos = 0;
2977 	}
2978 
2979 	if (y_pos < 0) {
2980 		pos_cpy.y_hotspot -= y_pos;
2981 		y_pos = 0;
2982 	}
2983 
2984 	pos_cpy.x = (uint32_t)x_pos;
2985 	pos_cpy.y = (uint32_t)y_pos;
2986 
2987 	if (pipe_ctx->plane_state->address.type
2988 			== PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
2989 		pos_cpy.enable = false;
2990 
2991 	if (pos_cpy.enable && dcn10_can_pipe_disable_cursor(pipe_ctx))
2992 		pos_cpy.enable = false;
2993 
2994 	// Swap axis and mirror horizontally
2995 	if (param.rotation == ROTATION_ANGLE_90) {
2996 		uint32_t temp_x = pos_cpy.x;
2997 
2998 		pos_cpy.x = pipe_ctx->plane_res.scl_data.viewport.width -
2999 				(pos_cpy.y - pipe_ctx->plane_res.scl_data.viewport.x) + pipe_ctx->plane_res.scl_data.viewport.x;
3000 		pos_cpy.y = temp_x;
3001 	}
3002 	// Swap axis and mirror vertically
3003 	else if (param.rotation == ROTATION_ANGLE_270) {
3004 		uint32_t temp_y = pos_cpy.y;
3005 		int viewport_height =
3006 			pipe_ctx->plane_res.scl_data.viewport.height;
3007 
3008 		if (pipe_split_on) {
3009 			if (pos_cpy.x > viewport_height) {
3010 				pos_cpy.x = pos_cpy.x - viewport_height;
3011 				pos_cpy.y = viewport_height - pos_cpy.x;
3012 			} else {
3013 				pos_cpy.y = 2 * viewport_height - pos_cpy.x;
3014 			}
3015 		} else
3016 			pos_cpy.y = viewport_height - pos_cpy.x;
3017 		pos_cpy.x = temp_y;
3018 	}
3019 	// Mirror horizontally and vertically
3020 	else if (param.rotation == ROTATION_ANGLE_180) {
3021 		int viewport_width =
3022 			pipe_ctx->plane_res.scl_data.viewport.width;
3023 		int viewport_x =
3024 			pipe_ctx->plane_res.scl_data.viewport.x;
3025 
3026 		if (pipe_split_on) {
3027 			if (pos_cpy.x >= viewport_width + viewport_x) {
3028 				pos_cpy.x = 2 * viewport_width
3029 						- pos_cpy.x + 2 * viewport_x;
3030 			} else {
3031 				uint32_t temp_x = pos_cpy.x;
3032 
3033 				pos_cpy.x = 2 * viewport_x - pos_cpy.x;
3034 				if (temp_x >= viewport_x +
3035 					(int)hubp->curs_attr.width || pos_cpy.x
3036 					<= (int)hubp->curs_attr.width +
3037 					pipe_ctx->plane_state->src_rect.x) {
3038 					pos_cpy.x = temp_x + viewport_width;
3039 				}
3040 			}
3041 		} else {
3042 			pos_cpy.x = viewport_width - pos_cpy.x + 2 * viewport_x;
3043 		}
3044 		pos_cpy.y = pipe_ctx->plane_res.scl_data.viewport.height - pos_cpy.y;
3045 	}
3046 
3047 	hubp->funcs->set_cursor_position(hubp, &pos_cpy, &param);
3048 	dpp->funcs->set_cursor_position(dpp, &pos_cpy, &param, hubp->curs_attr.width, hubp->curs_attr.height);
3049 }
3050 
dcn10_set_cursor_attribute(struct pipe_ctx * pipe_ctx)3051 void dcn10_set_cursor_attribute(struct pipe_ctx *pipe_ctx)
3052 {
3053 	struct dc_cursor_attributes *attributes = &pipe_ctx->stream->cursor_attributes;
3054 
3055 	pipe_ctx->plane_res.hubp->funcs->set_cursor_attributes(
3056 			pipe_ctx->plane_res.hubp, attributes);
3057 	pipe_ctx->plane_res.dpp->funcs->set_cursor_attributes(
3058 		pipe_ctx->plane_res.dpp, attributes);
3059 }
3060 
dcn10_set_cursor_sdr_white_level(struct pipe_ctx * pipe_ctx)3061 void dcn10_set_cursor_sdr_white_level(struct pipe_ctx *pipe_ctx)
3062 {
3063 	uint32_t sdr_white_level = pipe_ctx->stream->cursor_attributes.sdr_white_level;
3064 	struct fixed31_32 multiplier;
3065 	struct dpp_cursor_attributes opt_attr = { 0 };
3066 	uint32_t hw_scale = 0x3c00; // 1.0 default multiplier
3067 	struct custom_float_format fmt;
3068 
3069 	if (!pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes)
3070 		return;
3071 
3072 	fmt.exponenta_bits = 5;
3073 	fmt.mantissa_bits = 10;
3074 	fmt.sign = true;
3075 
3076 	if (sdr_white_level > 80) {
3077 		multiplier = dc_fixpt_from_fraction(sdr_white_level, 80);
3078 		convert_to_custom_float_format(multiplier, &fmt, &hw_scale);
3079 	}
3080 
3081 	opt_attr.scale = hw_scale;
3082 	opt_attr.bias = 0;
3083 
3084 	pipe_ctx->plane_res.dpp->funcs->set_optional_cursor_attributes(
3085 			pipe_ctx->plane_res.dpp, &opt_attr);
3086 }
3087 
3088 /*
3089  * apply_front_porch_workaround  TODO FPGA still need?
3090  *
3091  * This is a workaround for a bug that has existed since R5xx and has not been
3092  * fixed keep Front porch at minimum 2 for Interlaced mode or 1 for progressive.
3093  */
apply_front_porch_workaround(struct dc_crtc_timing * timing)3094 static void apply_front_porch_workaround(
3095 	struct dc_crtc_timing *timing)
3096 {
3097 	if (timing->flags.INTERLACE == 1) {
3098 		if (timing->v_front_porch < 2)
3099 			timing->v_front_porch = 2;
3100 	} else {
3101 		if (timing->v_front_porch < 1)
3102 			timing->v_front_porch = 1;
3103 	}
3104 }
3105 
dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx * pipe_ctx)3106 int dcn10_get_vupdate_offset_from_vsync(struct pipe_ctx *pipe_ctx)
3107 {
3108 	const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3109 	struct dc_crtc_timing patched_crtc_timing;
3110 	int vesa_sync_start;
3111 	int asic_blank_end;
3112 	int interlace_factor;
3113 	int vertical_line_start;
3114 
3115 	patched_crtc_timing = *dc_crtc_timing;
3116 	apply_front_porch_workaround(&patched_crtc_timing);
3117 
3118 	interlace_factor = patched_crtc_timing.flags.INTERLACE ? 2 : 1;
3119 
3120 	vesa_sync_start = patched_crtc_timing.v_addressable +
3121 			patched_crtc_timing.v_border_bottom +
3122 			patched_crtc_timing.v_front_porch;
3123 
3124 	asic_blank_end = (patched_crtc_timing.v_total -
3125 			vesa_sync_start -
3126 			patched_crtc_timing.v_border_top)
3127 			* interlace_factor;
3128 
3129 	vertical_line_start = asic_blank_end -
3130 			pipe_ctx->pipe_dlg_param.vstartup_start + 1;
3131 
3132 	return vertical_line_start;
3133 }
3134 
dcn10_calc_vupdate_position(struct dc * dc,struct pipe_ctx * pipe_ctx,uint32_t * start_line,uint32_t * end_line)3135 static void dcn10_calc_vupdate_position(
3136 		struct dc *dc,
3137 		struct pipe_ctx *pipe_ctx,
3138 		uint32_t *start_line,
3139 		uint32_t *end_line)
3140 {
3141 	const struct dc_crtc_timing *dc_crtc_timing = &pipe_ctx->stream->timing;
3142 	int vline_int_offset_from_vupdate =
3143 			pipe_ctx->stream->periodic_interrupt0.lines_offset;
3144 	int vupdate_offset_from_vsync = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3145 	int start_position;
3146 
3147 	if (vline_int_offset_from_vupdate > 0)
3148 		vline_int_offset_from_vupdate--;
3149 	else if (vline_int_offset_from_vupdate < 0)
3150 		vline_int_offset_from_vupdate++;
3151 
3152 	start_position = vline_int_offset_from_vupdate + vupdate_offset_from_vsync;
3153 
3154 	if (start_position >= 0)
3155 		*start_line = start_position;
3156 	else
3157 		*start_line = dc_crtc_timing->v_total + start_position - 1;
3158 
3159 	*end_line = *start_line + 2;
3160 
3161 	if (*end_line >= dc_crtc_timing->v_total)
3162 		*end_line = 2;
3163 }
3164 
dcn10_cal_vline_position(struct dc * dc,struct pipe_ctx * pipe_ctx,enum vline_select vline,uint32_t * start_line,uint32_t * end_line)3165 static void dcn10_cal_vline_position(
3166 		struct dc *dc,
3167 		struct pipe_ctx *pipe_ctx,
3168 		enum vline_select vline,
3169 		uint32_t *start_line,
3170 		uint32_t *end_line)
3171 {
3172 	enum vertical_interrupt_ref_point ref_point = INVALID_POINT;
3173 
3174 	if (vline == VLINE0)
3175 		ref_point = pipe_ctx->stream->periodic_interrupt0.ref_point;
3176 	else if (vline == VLINE1)
3177 		ref_point = pipe_ctx->stream->periodic_interrupt1.ref_point;
3178 
3179 	switch (ref_point) {
3180 	case START_V_UPDATE:
3181 		dcn10_calc_vupdate_position(
3182 				dc,
3183 				pipe_ctx,
3184 				start_line,
3185 				end_line);
3186 		break;
3187 	case START_V_SYNC:
3188 		// Suppose to do nothing because vsync is 0;
3189 		break;
3190 	default:
3191 		ASSERT(0);
3192 		break;
3193 	}
3194 }
3195 
dcn10_setup_periodic_interrupt(struct dc * dc,struct pipe_ctx * pipe_ctx,enum vline_select vline)3196 void dcn10_setup_periodic_interrupt(
3197 		struct dc *dc,
3198 		struct pipe_ctx *pipe_ctx,
3199 		enum vline_select vline)
3200 {
3201 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3202 
3203 	if (vline == VLINE0) {
3204 		uint32_t start_line = 0;
3205 		uint32_t end_line = 0;
3206 
3207 		dcn10_cal_vline_position(dc, pipe_ctx, vline, &start_line, &end_line);
3208 
3209 		tg->funcs->setup_vertical_interrupt0(tg, start_line, end_line);
3210 
3211 	} else if (vline == VLINE1) {
3212 		pipe_ctx->stream_res.tg->funcs->setup_vertical_interrupt1(
3213 				tg,
3214 				pipe_ctx->stream->periodic_interrupt1.lines_offset);
3215 	}
3216 }
3217 
dcn10_setup_vupdate_interrupt(struct dc * dc,struct pipe_ctx * pipe_ctx)3218 void dcn10_setup_vupdate_interrupt(struct dc *dc, struct pipe_ctx *pipe_ctx)
3219 {
3220 	struct timing_generator *tg = pipe_ctx->stream_res.tg;
3221 	int start_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
3222 
3223 	if (start_line < 0) {
3224 		ASSERT(0);
3225 		start_line = 0;
3226 	}
3227 
3228 	if (tg->funcs->setup_vertical_interrupt2)
3229 		tg->funcs->setup_vertical_interrupt2(tg, start_line);
3230 }
3231 
dcn10_unblank_stream(struct pipe_ctx * pipe_ctx,struct dc_link_settings * link_settings)3232 void dcn10_unblank_stream(struct pipe_ctx *pipe_ctx,
3233 		struct dc_link_settings *link_settings)
3234 {
3235 	struct encoder_unblank_param params = { { 0 } };
3236 	struct dc_stream_state *stream = pipe_ctx->stream;
3237 	struct dc_link *link = stream->link;
3238 	struct dce_hwseq *hws = link->dc->hwseq;
3239 
3240 	/* only 3 items below are used by unblank */
3241 	params.timing = pipe_ctx->stream->timing;
3242 
3243 	params.link_settings.link_rate = link_settings->link_rate;
3244 
3245 	if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3246 		if (params.timing.pixel_encoding == PIXEL_ENCODING_YCBCR420)
3247 			params.timing.pix_clk_100hz /= 2;
3248 		pipe_ctx->stream_res.stream_enc->funcs->dp_unblank(pipe_ctx->stream_res.stream_enc, &params);
3249 	}
3250 
3251 	if (link->local_sink && link->local_sink->sink_signal == SIGNAL_TYPE_EDP) {
3252 		hws->funcs.edp_backlight_control(link, true);
3253 	}
3254 }
3255 
dcn10_send_immediate_sdp_message(struct pipe_ctx * pipe_ctx,const uint8_t * custom_sdp_message,unsigned int sdp_message_size)3256 void dcn10_send_immediate_sdp_message(struct pipe_ctx *pipe_ctx,
3257 				const uint8_t *custom_sdp_message,
3258 				unsigned int sdp_message_size)
3259 {
3260 	if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
3261 		pipe_ctx->stream_res.stream_enc->funcs->send_immediate_sdp_message(
3262 				pipe_ctx->stream_res.stream_enc,
3263 				custom_sdp_message,
3264 				sdp_message_size);
3265 	}
3266 }
dcn10_set_clock(struct dc * dc,enum dc_clock_type clock_type,uint32_t clk_khz,uint32_t stepping)3267 enum dc_status dcn10_set_clock(struct dc *dc,
3268 			enum dc_clock_type clock_type,
3269 			uint32_t clk_khz,
3270 			uint32_t stepping)
3271 {
3272 	struct dc_state *context = dc->current_state;
3273 	struct dc_clock_config clock_cfg = {0};
3274 	struct dc_clocks *current_clocks = &context->bw_ctx.bw.dcn.clk;
3275 
3276 	if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
3277 				dc->clk_mgr->funcs->get_clock(dc->clk_mgr,
3278 						context, clock_type, &clock_cfg);
3279 
3280 	if (!dc->clk_mgr->funcs->get_clock)
3281 		return DC_FAIL_UNSUPPORTED_1;
3282 
3283 	if (clk_khz > clock_cfg.max_clock_khz)
3284 		return DC_FAIL_CLK_EXCEED_MAX;
3285 
3286 	if (clk_khz < clock_cfg.min_clock_khz)
3287 		return DC_FAIL_CLK_BELOW_MIN;
3288 
3289 	if (clk_khz < clock_cfg.bw_requirequired_clock_khz)
3290 		return DC_FAIL_CLK_BELOW_CFG_REQUIRED;
3291 
3292 	/*update internal request clock for update clock use*/
3293 	if (clock_type == DC_CLOCK_TYPE_DISPCLK)
3294 		current_clocks->dispclk_khz = clk_khz;
3295 	else if (clock_type == DC_CLOCK_TYPE_DPPCLK)
3296 		current_clocks->dppclk_khz = clk_khz;
3297 	else
3298 		return DC_ERROR_UNEXPECTED;
3299 
3300 	if (dc->clk_mgr && dc->clk_mgr->funcs->update_clocks)
3301 				dc->clk_mgr->funcs->update_clocks(dc->clk_mgr,
3302 				context, true);
3303 	return DC_OK;
3304 
3305 }
3306 
dcn10_get_clock(struct dc * dc,enum dc_clock_type clock_type,struct dc_clock_config * clock_cfg)3307 void dcn10_get_clock(struct dc *dc,
3308 			enum dc_clock_type clock_type,
3309 			struct dc_clock_config *clock_cfg)
3310 {
3311 	struct dc_state *context = dc->current_state;
3312 
3313 	if (dc->clk_mgr && dc->clk_mgr->funcs->get_clock)
3314 				dc->clk_mgr->funcs->get_clock(dc->clk_mgr, context, clock_type, clock_cfg);
3315 
3316 }
3317