xref: /dflybsd-src/sys/dev/drm/amd/amdgpu/amdgpu_pll.c (revision b843c749addef9340ee7d4e250b09fdd492602a1)
1*b843c749SSergey Zigachev /*
2*b843c749SSergey Zigachev  * Copyright 2014 Advanced Micro Devices, Inc.
3*b843c749SSergey Zigachev  *
4*b843c749SSergey Zigachev  * Permission is hereby granted, free of charge, to any person obtaining a
5*b843c749SSergey Zigachev  * copy of this software and associated documentation files (the "Software"),
6*b843c749SSergey Zigachev  * to deal in the Software without restriction, including without limitation
7*b843c749SSergey Zigachev  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8*b843c749SSergey Zigachev  * and/or sell copies of the Software, and to permit persons to whom the
9*b843c749SSergey Zigachev  * Software is furnished to do so, subject to the following conditions:
10*b843c749SSergey Zigachev  *
11*b843c749SSergey Zigachev  * The above copyright notice and this permission notice shall be included in
12*b843c749SSergey Zigachev  * all copies or substantial portions of the Software.
13*b843c749SSergey Zigachev  *
14*b843c749SSergey Zigachev  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15*b843c749SSergey Zigachev  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16*b843c749SSergey Zigachev  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17*b843c749SSergey Zigachev  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18*b843c749SSergey Zigachev  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19*b843c749SSergey Zigachev  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20*b843c749SSergey Zigachev  * OTHER DEALINGS IN THE SOFTWARE.
21*b843c749SSergey Zigachev  *
22*b843c749SSergey Zigachev  */
23*b843c749SSergey Zigachev #include <drm/drmP.h>
24*b843c749SSergey Zigachev #include <drm/amdgpu_drm.h>
25*b843c749SSergey Zigachev #include "amdgpu.h"
26*b843c749SSergey Zigachev #include "atom.h"
27*b843c749SSergey Zigachev #include "atombios_encoders.h"
28*b843c749SSergey Zigachev #include "amdgpu_pll.h"
29*b843c749SSergey Zigachev #include <asm/div64.h>
30*b843c749SSergey Zigachev #include <linux/gcd.h>
31*b843c749SSergey Zigachev 
32*b843c749SSergey Zigachev /**
33*b843c749SSergey Zigachev  * amdgpu_pll_reduce_ratio - fractional number reduction
34*b843c749SSergey Zigachev  *
35*b843c749SSergey Zigachev  * @nom: nominator
36*b843c749SSergey Zigachev  * @den: denominator
37*b843c749SSergey Zigachev  * @nom_min: minimum value for nominator
38*b843c749SSergey Zigachev  * @den_min: minimum value for denominator
39*b843c749SSergey Zigachev  *
40*b843c749SSergey Zigachev  * Find the greatest common divisor and apply it on both nominator and
41*b843c749SSergey Zigachev  * denominator, but make nominator and denominator are at least as large
42*b843c749SSergey Zigachev  * as their minimum values.
43*b843c749SSergey Zigachev  */
amdgpu_pll_reduce_ratio(unsigned * nom,unsigned * den,unsigned nom_min,unsigned den_min)44*b843c749SSergey Zigachev static void amdgpu_pll_reduce_ratio(unsigned *nom, unsigned *den,
45*b843c749SSergey Zigachev 				    unsigned nom_min, unsigned den_min)
46*b843c749SSergey Zigachev {
47*b843c749SSergey Zigachev 	unsigned tmp;
48*b843c749SSergey Zigachev 
49*b843c749SSergey Zigachev 	/* reduce the numbers to a simpler ratio */
50*b843c749SSergey Zigachev 	tmp = gcd(*nom, *den);
51*b843c749SSergey Zigachev 	*nom /= tmp;
52*b843c749SSergey Zigachev 	*den /= tmp;
53*b843c749SSergey Zigachev 
54*b843c749SSergey Zigachev 	/* make sure nominator is large enough */
55*b843c749SSergey Zigachev 	if (*nom < nom_min) {
56*b843c749SSergey Zigachev 		tmp = DIV_ROUND_UP(nom_min, *nom);
57*b843c749SSergey Zigachev 		*nom *= tmp;
58*b843c749SSergey Zigachev 		*den *= tmp;
59*b843c749SSergey Zigachev 	}
60*b843c749SSergey Zigachev 
61*b843c749SSergey Zigachev 	/* make sure the denominator is large enough */
62*b843c749SSergey Zigachev 	if (*den < den_min) {
63*b843c749SSergey Zigachev 		tmp = DIV_ROUND_UP(den_min, *den);
64*b843c749SSergey Zigachev 		*nom *= tmp;
65*b843c749SSergey Zigachev 		*den *= tmp;
66*b843c749SSergey Zigachev 	}
67*b843c749SSergey Zigachev }
68*b843c749SSergey Zigachev 
69*b843c749SSergey Zigachev /**
70*b843c749SSergey Zigachev  * amdgpu_pll_get_fb_ref_div - feedback and ref divider calculation
71*b843c749SSergey Zigachev  *
72*b843c749SSergey Zigachev  * @nom: nominator
73*b843c749SSergey Zigachev  * @den: denominator
74*b843c749SSergey Zigachev  * @post_div: post divider
75*b843c749SSergey Zigachev  * @fb_div_max: feedback divider maximum
76*b843c749SSergey Zigachev  * @ref_div_max: reference divider maximum
77*b843c749SSergey Zigachev  * @fb_div: resulting feedback divider
78*b843c749SSergey Zigachev  * @ref_div: resulting reference divider
79*b843c749SSergey Zigachev  *
80*b843c749SSergey Zigachev  * Calculate feedback and reference divider for a given post divider. Makes
81*b843c749SSergey Zigachev  * sure we stay within the limits.
82*b843c749SSergey Zigachev  */
amdgpu_pll_get_fb_ref_div(unsigned nom,unsigned den,unsigned post_div,unsigned fb_div_max,unsigned ref_div_max,unsigned * fb_div,unsigned * ref_div)83*b843c749SSergey Zigachev static void amdgpu_pll_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div,
84*b843c749SSergey Zigachev 				      unsigned fb_div_max, unsigned ref_div_max,
85*b843c749SSergey Zigachev 				      unsigned *fb_div, unsigned *ref_div)
86*b843c749SSergey Zigachev {
87*b843c749SSergey Zigachev 	/* limit reference * post divider to a maximum */
88*b843c749SSergey Zigachev 	ref_div_max = min(128 / post_div, ref_div_max);
89*b843c749SSergey Zigachev 
90*b843c749SSergey Zigachev 	/* get matching reference and feedback divider */
91*b843c749SSergey Zigachev 	*ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max);
92*b843c749SSergey Zigachev 	*fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den);
93*b843c749SSergey Zigachev 
94*b843c749SSergey Zigachev 	/* limit fb divider to its maximum */
95*b843c749SSergey Zigachev 	if (*fb_div > fb_div_max) {
96*b843c749SSergey Zigachev 		*ref_div = DIV_ROUND_CLOSEST(*ref_div * fb_div_max, *fb_div);
97*b843c749SSergey Zigachev 		*fb_div = fb_div_max;
98*b843c749SSergey Zigachev 	}
99*b843c749SSergey Zigachev }
100*b843c749SSergey Zigachev 
101*b843c749SSergey Zigachev /**
102*b843c749SSergey Zigachev  * amdgpu_pll_compute - compute PLL paramaters
103*b843c749SSergey Zigachev  *
104*b843c749SSergey Zigachev  * @pll: information about the PLL
105*b843c749SSergey Zigachev  * @dot_clock_p: resulting pixel clock
106*b843c749SSergey Zigachev  * fb_div_p: resulting feedback divider
107*b843c749SSergey Zigachev  * frac_fb_div_p: fractional part of the feedback divider
108*b843c749SSergey Zigachev  * ref_div_p: resulting reference divider
109*b843c749SSergey Zigachev  * post_div_p: resulting reference divider
110*b843c749SSergey Zigachev  *
111*b843c749SSergey Zigachev  * Try to calculate the PLL parameters to generate the given frequency:
112*b843c749SSergey Zigachev  * dot_clock = (ref_freq * feedback_div) / (ref_div * post_div)
113*b843c749SSergey Zigachev  */
amdgpu_pll_compute(struct amdgpu_pll * pll,u32 freq,u32 * dot_clock_p,u32 * fb_div_p,u32 * frac_fb_div_p,u32 * ref_div_p,u32 * post_div_p)114*b843c749SSergey Zigachev void amdgpu_pll_compute(struct amdgpu_pll *pll,
115*b843c749SSergey Zigachev 			u32 freq,
116*b843c749SSergey Zigachev 			u32 *dot_clock_p,
117*b843c749SSergey Zigachev 			u32 *fb_div_p,
118*b843c749SSergey Zigachev 			u32 *frac_fb_div_p,
119*b843c749SSergey Zigachev 			u32 *ref_div_p,
120*b843c749SSergey Zigachev 			u32 *post_div_p)
121*b843c749SSergey Zigachev {
122*b843c749SSergey Zigachev 	unsigned target_clock = pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV ?
123*b843c749SSergey Zigachev 		freq : freq / 10;
124*b843c749SSergey Zigachev 
125*b843c749SSergey Zigachev 	unsigned fb_div_min, fb_div_max, fb_div;
126*b843c749SSergey Zigachev 	unsigned post_div_min, post_div_max, post_div;
127*b843c749SSergey Zigachev 	unsigned ref_div_min, ref_div_max, ref_div;
128*b843c749SSergey Zigachev 	unsigned post_div_best, diff_best;
129*b843c749SSergey Zigachev 	unsigned nom, den;
130*b843c749SSergey Zigachev 
131*b843c749SSergey Zigachev 	/* determine allowed feedback divider range */
132*b843c749SSergey Zigachev 	fb_div_min = pll->min_feedback_div;
133*b843c749SSergey Zigachev 	fb_div_max = pll->max_feedback_div;
134*b843c749SSergey Zigachev 
135*b843c749SSergey Zigachev 	if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV) {
136*b843c749SSergey Zigachev 		fb_div_min *= 10;
137*b843c749SSergey Zigachev 		fb_div_max *= 10;
138*b843c749SSergey Zigachev 	}
139*b843c749SSergey Zigachev 
140*b843c749SSergey Zigachev 	/* determine allowed ref divider range */
141*b843c749SSergey Zigachev 	if (pll->flags & AMDGPU_PLL_USE_REF_DIV)
142*b843c749SSergey Zigachev 		ref_div_min = pll->reference_div;
143*b843c749SSergey Zigachev 	else
144*b843c749SSergey Zigachev 		ref_div_min = pll->min_ref_div;
145*b843c749SSergey Zigachev 
146*b843c749SSergey Zigachev 	if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV &&
147*b843c749SSergey Zigachev 	    pll->flags & AMDGPU_PLL_USE_REF_DIV)
148*b843c749SSergey Zigachev 		ref_div_max = pll->reference_div;
149*b843c749SSergey Zigachev 	else
150*b843c749SSergey Zigachev 		ref_div_max = pll->max_ref_div;
151*b843c749SSergey Zigachev 
152*b843c749SSergey Zigachev 	/* determine allowed post divider range */
153*b843c749SSergey Zigachev 	if (pll->flags & AMDGPU_PLL_USE_POST_DIV) {
154*b843c749SSergey Zigachev 		post_div_min = pll->post_div;
155*b843c749SSergey Zigachev 		post_div_max = pll->post_div;
156*b843c749SSergey Zigachev 	} else {
157*b843c749SSergey Zigachev 		unsigned vco_min, vco_max;
158*b843c749SSergey Zigachev 
159*b843c749SSergey Zigachev 		if (pll->flags & AMDGPU_PLL_IS_LCD) {
160*b843c749SSergey Zigachev 			vco_min = pll->lcd_pll_out_min;
161*b843c749SSergey Zigachev 			vco_max = pll->lcd_pll_out_max;
162*b843c749SSergey Zigachev 		} else {
163*b843c749SSergey Zigachev 			vco_min = pll->pll_out_min;
164*b843c749SSergey Zigachev 			vco_max = pll->pll_out_max;
165*b843c749SSergey Zigachev 		}
166*b843c749SSergey Zigachev 
167*b843c749SSergey Zigachev 		if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV) {
168*b843c749SSergey Zigachev 			vco_min *= 10;
169*b843c749SSergey Zigachev 			vco_max *= 10;
170*b843c749SSergey Zigachev 		}
171*b843c749SSergey Zigachev 
172*b843c749SSergey Zigachev 		post_div_min = vco_min / target_clock;
173*b843c749SSergey Zigachev 		if ((target_clock * post_div_min) < vco_min)
174*b843c749SSergey Zigachev 			++post_div_min;
175*b843c749SSergey Zigachev 		if (post_div_min < pll->min_post_div)
176*b843c749SSergey Zigachev 			post_div_min = pll->min_post_div;
177*b843c749SSergey Zigachev 
178*b843c749SSergey Zigachev 		post_div_max = vco_max / target_clock;
179*b843c749SSergey Zigachev 		if ((target_clock * post_div_max) > vco_max)
180*b843c749SSergey Zigachev 			--post_div_max;
181*b843c749SSergey Zigachev 		if (post_div_max > pll->max_post_div)
182*b843c749SSergey Zigachev 			post_div_max = pll->max_post_div;
183*b843c749SSergey Zigachev 	}
184*b843c749SSergey Zigachev 
185*b843c749SSergey Zigachev 	/* represent the searched ratio as fractional number */
186*b843c749SSergey Zigachev 	nom = target_clock;
187*b843c749SSergey Zigachev 	den = pll->reference_freq;
188*b843c749SSergey Zigachev 
189*b843c749SSergey Zigachev 	/* reduce the numbers to a simpler ratio */
190*b843c749SSergey Zigachev 	amdgpu_pll_reduce_ratio(&nom, &den, fb_div_min, post_div_min);
191*b843c749SSergey Zigachev 
192*b843c749SSergey Zigachev 	/* now search for a post divider */
193*b843c749SSergey Zigachev 	if (pll->flags & AMDGPU_PLL_PREFER_MINM_OVER_MAXP)
194*b843c749SSergey Zigachev 		post_div_best = post_div_min;
195*b843c749SSergey Zigachev 	else
196*b843c749SSergey Zigachev 		post_div_best = post_div_max;
197*b843c749SSergey Zigachev 	diff_best = ~0;
198*b843c749SSergey Zigachev 
199*b843c749SSergey Zigachev 	for (post_div = post_div_min; post_div <= post_div_max; ++post_div) {
200*b843c749SSergey Zigachev 		unsigned diff;
201*b843c749SSergey Zigachev 		amdgpu_pll_get_fb_ref_div(nom, den, post_div, fb_div_max,
202*b843c749SSergey Zigachev 					  ref_div_max, &fb_div, &ref_div);
203*b843c749SSergey Zigachev 		diff = abs(target_clock - (pll->reference_freq * fb_div) /
204*b843c749SSergey Zigachev 			(ref_div * post_div));
205*b843c749SSergey Zigachev 
206*b843c749SSergey Zigachev 		if (diff < diff_best || (diff == diff_best &&
207*b843c749SSergey Zigachev 		    !(pll->flags & AMDGPU_PLL_PREFER_MINM_OVER_MAXP))) {
208*b843c749SSergey Zigachev 
209*b843c749SSergey Zigachev 			post_div_best = post_div;
210*b843c749SSergey Zigachev 			diff_best = diff;
211*b843c749SSergey Zigachev 		}
212*b843c749SSergey Zigachev 	}
213*b843c749SSergey Zigachev 	post_div = post_div_best;
214*b843c749SSergey Zigachev 
215*b843c749SSergey Zigachev 	/* get the feedback and reference divider for the optimal value */
216*b843c749SSergey Zigachev 	amdgpu_pll_get_fb_ref_div(nom, den, post_div, fb_div_max, ref_div_max,
217*b843c749SSergey Zigachev 				  &fb_div, &ref_div);
218*b843c749SSergey Zigachev 
219*b843c749SSergey Zigachev 	/* reduce the numbers to a simpler ratio once more */
220*b843c749SSergey Zigachev 	/* this also makes sure that the reference divider is large enough */
221*b843c749SSergey Zigachev 	amdgpu_pll_reduce_ratio(&fb_div, &ref_div, fb_div_min, ref_div_min);
222*b843c749SSergey Zigachev 
223*b843c749SSergey Zigachev 	/* avoid high jitter with small fractional dividers */
224*b843c749SSergey Zigachev 	if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV && (fb_div % 10)) {
225*b843c749SSergey Zigachev 		fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 60);
226*b843c749SSergey Zigachev 		if (fb_div < fb_div_min) {
227*b843c749SSergey Zigachev 			unsigned tmp = DIV_ROUND_UP(fb_div_min, fb_div);
228*b843c749SSergey Zigachev 			fb_div *= tmp;
229*b843c749SSergey Zigachev 			ref_div *= tmp;
230*b843c749SSergey Zigachev 		}
231*b843c749SSergey Zigachev 	}
232*b843c749SSergey Zigachev 
233*b843c749SSergey Zigachev 	/* and finally save the result */
234*b843c749SSergey Zigachev 	if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV) {
235*b843c749SSergey Zigachev 		*fb_div_p = fb_div / 10;
236*b843c749SSergey Zigachev 		*frac_fb_div_p = fb_div % 10;
237*b843c749SSergey Zigachev 	} else {
238*b843c749SSergey Zigachev 		*fb_div_p = fb_div;
239*b843c749SSergey Zigachev 		*frac_fb_div_p = 0;
240*b843c749SSergey Zigachev 	}
241*b843c749SSergey Zigachev 
242*b843c749SSergey Zigachev 	*dot_clock_p = ((pll->reference_freq * *fb_div_p * 10) +
243*b843c749SSergey Zigachev 			(pll->reference_freq * *frac_fb_div_p)) /
244*b843c749SSergey Zigachev 		       (ref_div * post_div * 10);
245*b843c749SSergey Zigachev 	*ref_div_p = ref_div;
246*b843c749SSergey Zigachev 	*post_div_p = post_div;
247*b843c749SSergey Zigachev 
248*b843c749SSergey Zigachev 	DRM_DEBUG_KMS("%d - %d, pll dividers - fb: %d.%d ref: %d, post %d\n",
249*b843c749SSergey Zigachev 		      freq, *dot_clock_p * 10, *fb_div_p, *frac_fb_div_p,
250*b843c749SSergey Zigachev 		      ref_div, post_div);
251*b843c749SSergey Zigachev }
252*b843c749SSergey Zigachev 
253*b843c749SSergey Zigachev /**
254*b843c749SSergey Zigachev  * amdgpu_pll_get_use_mask - look up a mask of which pplls are in use
255*b843c749SSergey Zigachev  *
256*b843c749SSergey Zigachev  * @crtc: drm crtc
257*b843c749SSergey Zigachev  *
258*b843c749SSergey Zigachev  * Returns the mask of which PPLLs (Pixel PLLs) are in use.
259*b843c749SSergey Zigachev  */
amdgpu_pll_get_use_mask(struct drm_crtc * crtc)260*b843c749SSergey Zigachev u32 amdgpu_pll_get_use_mask(struct drm_crtc *crtc)
261*b843c749SSergey Zigachev {
262*b843c749SSergey Zigachev 	struct drm_device *dev = crtc->dev;
263*b843c749SSergey Zigachev 	struct drm_crtc *test_crtc;
264*b843c749SSergey Zigachev 	struct amdgpu_crtc *test_amdgpu_crtc;
265*b843c749SSergey Zigachev 	u32 pll_in_use = 0;
266*b843c749SSergey Zigachev 
267*b843c749SSergey Zigachev 	list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
268*b843c749SSergey Zigachev 		if (crtc == test_crtc)
269*b843c749SSergey Zigachev 			continue;
270*b843c749SSergey Zigachev 
271*b843c749SSergey Zigachev 		test_amdgpu_crtc = to_amdgpu_crtc(test_crtc);
272*b843c749SSergey Zigachev 		if (test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID)
273*b843c749SSergey Zigachev 			pll_in_use |= (1 << test_amdgpu_crtc->pll_id);
274*b843c749SSergey Zigachev 	}
275*b843c749SSergey Zigachev 	return pll_in_use;
276*b843c749SSergey Zigachev }
277*b843c749SSergey Zigachev 
278*b843c749SSergey Zigachev /**
279*b843c749SSergey Zigachev  * amdgpu_pll_get_shared_dp_ppll - return the PPLL used by another crtc for DP
280*b843c749SSergey Zigachev  *
281*b843c749SSergey Zigachev  * @crtc: drm crtc
282*b843c749SSergey Zigachev  *
283*b843c749SSergey Zigachev  * Returns the PPLL (Pixel PLL) used by another crtc/encoder which is
284*b843c749SSergey Zigachev  * also in DP mode.  For DP, a single PPLL can be used for all DP
285*b843c749SSergey Zigachev  * crtcs/encoders.
286*b843c749SSergey Zigachev  */
amdgpu_pll_get_shared_dp_ppll(struct drm_crtc * crtc)287*b843c749SSergey Zigachev int amdgpu_pll_get_shared_dp_ppll(struct drm_crtc *crtc)
288*b843c749SSergey Zigachev {
289*b843c749SSergey Zigachev 	struct drm_device *dev = crtc->dev;
290*b843c749SSergey Zigachev 	struct drm_crtc *test_crtc;
291*b843c749SSergey Zigachev 	struct amdgpu_crtc *test_amdgpu_crtc;
292*b843c749SSergey Zigachev 
293*b843c749SSergey Zigachev 	list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
294*b843c749SSergey Zigachev 		if (crtc == test_crtc)
295*b843c749SSergey Zigachev 			continue;
296*b843c749SSergey Zigachev 		test_amdgpu_crtc = to_amdgpu_crtc(test_crtc);
297*b843c749SSergey Zigachev 		if (test_amdgpu_crtc->encoder &&
298*b843c749SSergey Zigachev 		    ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(test_amdgpu_crtc->encoder))) {
299*b843c749SSergey Zigachev 			/* for DP use the same PLL for all */
300*b843c749SSergey Zigachev 			if (test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID)
301*b843c749SSergey Zigachev 				return test_amdgpu_crtc->pll_id;
302*b843c749SSergey Zigachev 		}
303*b843c749SSergey Zigachev 	}
304*b843c749SSergey Zigachev 	return ATOM_PPLL_INVALID;
305*b843c749SSergey Zigachev }
306*b843c749SSergey Zigachev 
307*b843c749SSergey Zigachev /**
308*b843c749SSergey Zigachev  * amdgpu_pll_get_shared_nondp_ppll - return the PPLL used by another non-DP crtc
309*b843c749SSergey Zigachev  *
310*b843c749SSergey Zigachev  * @crtc: drm crtc
311*b843c749SSergey Zigachev  * @encoder: drm encoder
312*b843c749SSergey Zigachev  *
313*b843c749SSergey Zigachev  * Returns the PPLL (Pixel PLL) used by another non-DP crtc/encoder which can
314*b843c749SSergey Zigachev  * be shared (i.e., same clock).
315*b843c749SSergey Zigachev  */
amdgpu_pll_get_shared_nondp_ppll(struct drm_crtc * crtc)316*b843c749SSergey Zigachev int amdgpu_pll_get_shared_nondp_ppll(struct drm_crtc *crtc)
317*b843c749SSergey Zigachev {
318*b843c749SSergey Zigachev 	struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
319*b843c749SSergey Zigachev 	struct drm_device *dev = crtc->dev;
320*b843c749SSergey Zigachev 	struct drm_crtc *test_crtc;
321*b843c749SSergey Zigachev 	struct amdgpu_crtc *test_amdgpu_crtc;
322*b843c749SSergey Zigachev 	u32 adjusted_clock, test_adjusted_clock;
323*b843c749SSergey Zigachev 
324*b843c749SSergey Zigachev 	adjusted_clock = amdgpu_crtc->adjusted_clock;
325*b843c749SSergey Zigachev 
326*b843c749SSergey Zigachev 	if (adjusted_clock == 0)
327*b843c749SSergey Zigachev 		return ATOM_PPLL_INVALID;
328*b843c749SSergey Zigachev 
329*b843c749SSergey Zigachev 	list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
330*b843c749SSergey Zigachev 		if (crtc == test_crtc)
331*b843c749SSergey Zigachev 			continue;
332*b843c749SSergey Zigachev 		test_amdgpu_crtc = to_amdgpu_crtc(test_crtc);
333*b843c749SSergey Zigachev 		if (test_amdgpu_crtc->encoder &&
334*b843c749SSergey Zigachev 		    !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(test_amdgpu_crtc->encoder))) {
335*b843c749SSergey Zigachev 			/* check if we are already driving this connector with another crtc */
336*b843c749SSergey Zigachev 			if (test_amdgpu_crtc->connector == amdgpu_crtc->connector) {
337*b843c749SSergey Zigachev 				/* if we are, return that pll */
338*b843c749SSergey Zigachev 				if (test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID)
339*b843c749SSergey Zigachev 					return test_amdgpu_crtc->pll_id;
340*b843c749SSergey Zigachev 			}
341*b843c749SSergey Zigachev 			/* for non-DP check the clock */
342*b843c749SSergey Zigachev 			test_adjusted_clock = test_amdgpu_crtc->adjusted_clock;
343*b843c749SSergey Zigachev 			if ((crtc->mode.clock == test_crtc->mode.clock) &&
344*b843c749SSergey Zigachev 			    (adjusted_clock == test_adjusted_clock) &&
345*b843c749SSergey Zigachev 			    (amdgpu_crtc->ss_enabled == test_amdgpu_crtc->ss_enabled) &&
346*b843c749SSergey Zigachev 			    (test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID))
347*b843c749SSergey Zigachev 				return test_amdgpu_crtc->pll_id;
348*b843c749SSergey Zigachev 		}
349*b843c749SSergey Zigachev 	}
350*b843c749SSergey Zigachev 	return ATOM_PPLL_INVALID;
351*b843c749SSergey Zigachev }
352