1 /* $NetBSD: amdgpu_pll.c,v 1.4 2021/12/18 23:44:58 riastradh Exp $ */ 2 3 /* 4 * Copyright 2014 Advanced Micro Devices, Inc. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 */ 25 26 #include <sys/cdefs.h> 27 __KERNEL_RCSID(0, "$NetBSD: amdgpu_pll.c,v 1.4 2021/12/18 23:44:58 riastradh Exp $"); 28 29 #include <drm/amdgpu_drm.h> 30 #include "amdgpu.h" 31 #include "atom.h" 32 #include "atombios_encoders.h" 33 #include "amdgpu_pll.h" 34 #include <asm/div64.h> 35 #include <linux/gcd.h> 36 37 #include "amdgpu_pll.h" 38 39 /** 40 * amdgpu_pll_reduce_ratio - fractional number reduction 41 * 42 * @nom: nominator 43 * @den: denominator 44 * @nom_min: minimum value for nominator 45 * @den_min: minimum value for denominator 46 * 47 * Find the greatest common divisor and apply it on both nominator and 48 * denominator, but make nominator and denominator are at least as large 49 * as their minimum values. 50 */ 51 static void amdgpu_pll_reduce_ratio(unsigned *nom, unsigned *den, 52 unsigned nom_min, unsigned den_min) 53 { 54 unsigned tmp; 55 56 /* reduce the numbers to a simpler ratio */ 57 tmp = gcd(*nom, *den); 58 *nom /= tmp; 59 *den /= tmp; 60 61 /* make sure nominator is large enough */ 62 if (*nom < nom_min) { 63 tmp = DIV_ROUND_UP(nom_min, *nom); 64 *nom *= tmp; 65 *den *= tmp; 66 } 67 68 /* make sure the denominator is large enough */ 69 if (*den < den_min) { 70 tmp = DIV_ROUND_UP(den_min, *den); 71 *nom *= tmp; 72 *den *= tmp; 73 } 74 } 75 76 /** 77 * amdgpu_pll_get_fb_ref_div - feedback and ref divider calculation 78 * 79 * @nom: nominator 80 * @den: denominator 81 * @post_div: post divider 82 * @fb_div_max: feedback divider maximum 83 * @ref_div_max: reference divider maximum 84 * @fb_div: resulting feedback divider 85 * @ref_div: resulting reference divider 86 * 87 * Calculate feedback and reference divider for a given post divider. Makes 88 * sure we stay within the limits. 89 */ 90 static void amdgpu_pll_get_fb_ref_div(unsigned nom, unsigned den, unsigned post_div, 91 unsigned fb_div_max, unsigned ref_div_max, 92 unsigned *fb_div, unsigned *ref_div) 93 { 94 /* limit reference * post divider to a maximum */ 95 ref_div_max = min(128 / post_div, ref_div_max); 96 97 /* get matching reference and feedback divider */ 98 *ref_div = min(max(DIV_ROUND_CLOSEST(den, post_div), 1u), ref_div_max); 99 *fb_div = DIV_ROUND_CLOSEST(nom * *ref_div * post_div, den); 100 101 /* limit fb divider to its maximum */ 102 if (*fb_div > fb_div_max) { 103 *ref_div = DIV_ROUND_CLOSEST(*ref_div * fb_div_max, *fb_div); 104 *fb_div = fb_div_max; 105 } 106 } 107 108 /** 109 * amdgpu_pll_compute - compute PLL paramaters 110 * 111 * @pll: information about the PLL 112 * @dot_clock_p: resulting pixel clock 113 * fb_div_p: resulting feedback divider 114 * frac_fb_div_p: fractional part of the feedback divider 115 * ref_div_p: resulting reference divider 116 * post_div_p: resulting reference divider 117 * 118 * Try to calculate the PLL parameters to generate the given frequency: 119 * dot_clock = (ref_freq * feedback_div) / (ref_div * post_div) 120 */ 121 void amdgpu_pll_compute(struct amdgpu_pll *pll, 122 u32 freq, 123 u32 *dot_clock_p, 124 u32 *fb_div_p, 125 u32 *frac_fb_div_p, 126 u32 *ref_div_p, 127 u32 *post_div_p) 128 { 129 unsigned target_clock = pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV ? 130 freq : freq / 10; 131 132 unsigned fb_div_min, fb_div_max, fb_div; 133 unsigned post_div_min, post_div_max, post_div; 134 unsigned ref_div_min, ref_div_max, ref_div; 135 unsigned post_div_best, diff_best; 136 unsigned nom, den; 137 138 /* determine allowed feedback divider range */ 139 fb_div_min = pll->min_feedback_div; 140 fb_div_max = pll->max_feedback_div; 141 142 if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV) { 143 fb_div_min *= 10; 144 fb_div_max *= 10; 145 } 146 147 /* determine allowed ref divider range */ 148 if (pll->flags & AMDGPU_PLL_USE_REF_DIV) 149 ref_div_min = pll->reference_div; 150 else 151 ref_div_min = pll->min_ref_div; 152 153 if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV && 154 pll->flags & AMDGPU_PLL_USE_REF_DIV) 155 ref_div_max = pll->reference_div; 156 else 157 ref_div_max = pll->max_ref_div; 158 159 /* determine allowed post divider range */ 160 if (pll->flags & AMDGPU_PLL_USE_POST_DIV) { 161 post_div_min = pll->post_div; 162 post_div_max = pll->post_div; 163 } else { 164 unsigned vco_min, vco_max; 165 166 if (pll->flags & AMDGPU_PLL_IS_LCD) { 167 vco_min = pll->lcd_pll_out_min; 168 vco_max = pll->lcd_pll_out_max; 169 } else { 170 vco_min = pll->pll_out_min; 171 vco_max = pll->pll_out_max; 172 } 173 174 if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV) { 175 vco_min *= 10; 176 vco_max *= 10; 177 } 178 179 post_div_min = vco_min / target_clock; 180 if ((target_clock * post_div_min) < vco_min) 181 ++post_div_min; 182 if (post_div_min < pll->min_post_div) 183 post_div_min = pll->min_post_div; 184 185 post_div_max = vco_max / target_clock; 186 if ((target_clock * post_div_max) > vco_max) 187 --post_div_max; 188 if (post_div_max > pll->max_post_div) 189 post_div_max = pll->max_post_div; 190 } 191 192 /* represent the searched ratio as fractional number */ 193 nom = target_clock; 194 den = pll->reference_freq; 195 196 /* reduce the numbers to a simpler ratio */ 197 amdgpu_pll_reduce_ratio(&nom, &den, fb_div_min, post_div_min); 198 199 /* now search for a post divider */ 200 if (pll->flags & AMDGPU_PLL_PREFER_MINM_OVER_MAXP) 201 post_div_best = post_div_min; 202 else 203 post_div_best = post_div_max; 204 diff_best = ~0; 205 206 for (post_div = post_div_min; post_div <= post_div_max; ++post_div) { 207 unsigned diff; 208 amdgpu_pll_get_fb_ref_div(nom, den, post_div, fb_div_max, 209 ref_div_max, &fb_div, &ref_div); 210 diff = abs(target_clock - (pll->reference_freq * fb_div) / 211 (ref_div * post_div)); 212 213 if (diff < diff_best || (diff == diff_best && 214 !(pll->flags & AMDGPU_PLL_PREFER_MINM_OVER_MAXP))) { 215 216 post_div_best = post_div; 217 diff_best = diff; 218 } 219 } 220 post_div = post_div_best; 221 222 /* get the feedback and reference divider for the optimal value */ 223 amdgpu_pll_get_fb_ref_div(nom, den, post_div, fb_div_max, ref_div_max, 224 &fb_div, &ref_div); 225 226 /* reduce the numbers to a simpler ratio once more */ 227 /* this also makes sure that the reference divider is large enough */ 228 amdgpu_pll_reduce_ratio(&fb_div, &ref_div, fb_div_min, ref_div_min); 229 230 /* avoid high jitter with small fractional dividers */ 231 if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV && (fb_div % 10)) { 232 fb_div_min = max(fb_div_min, (9 - (fb_div % 10)) * 20 + 60); 233 if (fb_div < fb_div_min) { 234 unsigned tmp = DIV_ROUND_UP(fb_div_min, fb_div); 235 fb_div *= tmp; 236 ref_div *= tmp; 237 } 238 } 239 240 /* and finally save the result */ 241 if (pll->flags & AMDGPU_PLL_USE_FRAC_FB_DIV) { 242 *fb_div_p = fb_div / 10; 243 *frac_fb_div_p = fb_div % 10; 244 } else { 245 *fb_div_p = fb_div; 246 *frac_fb_div_p = 0; 247 } 248 249 *dot_clock_p = ((pll->reference_freq * *fb_div_p * 10) + 250 (pll->reference_freq * *frac_fb_div_p)) / 251 (ref_div * post_div * 10); 252 *ref_div_p = ref_div; 253 *post_div_p = post_div; 254 255 DRM_DEBUG_KMS("%d - %d, pll dividers - fb: %d.%d ref: %d, post %d\n", 256 freq, *dot_clock_p * 10, *fb_div_p, *frac_fb_div_p, 257 ref_div, post_div); 258 } 259 260 /** 261 * amdgpu_pll_get_use_mask - look up a mask of which pplls are in use 262 * 263 * @crtc: drm crtc 264 * 265 * Returns the mask of which PPLLs (Pixel PLLs) are in use. 266 */ 267 u32 amdgpu_pll_get_use_mask(struct drm_crtc *crtc) 268 { 269 struct drm_device *dev = crtc->dev; 270 struct drm_crtc *test_crtc; 271 struct amdgpu_crtc *test_amdgpu_crtc; 272 u32 pll_in_use = 0; 273 274 list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) { 275 if (crtc == test_crtc) 276 continue; 277 278 test_amdgpu_crtc = to_amdgpu_crtc(test_crtc); 279 if (test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID) 280 pll_in_use |= (1 << test_amdgpu_crtc->pll_id); 281 } 282 return pll_in_use; 283 } 284 285 /** 286 * amdgpu_pll_get_shared_dp_ppll - return the PPLL used by another crtc for DP 287 * 288 * @crtc: drm crtc 289 * 290 * Returns the PPLL (Pixel PLL) used by another crtc/encoder which is 291 * also in DP mode. For DP, a single PPLL can be used for all DP 292 * crtcs/encoders. 293 */ 294 int amdgpu_pll_get_shared_dp_ppll(struct drm_crtc *crtc) 295 { 296 struct drm_device *dev = crtc->dev; 297 struct drm_crtc *test_crtc; 298 struct amdgpu_crtc *test_amdgpu_crtc; 299 300 list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) { 301 if (crtc == test_crtc) 302 continue; 303 test_amdgpu_crtc = to_amdgpu_crtc(test_crtc); 304 if (test_amdgpu_crtc->encoder && 305 ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(test_amdgpu_crtc->encoder))) { 306 /* for DP use the same PLL for all */ 307 if (test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID) 308 return test_amdgpu_crtc->pll_id; 309 } 310 } 311 return ATOM_PPLL_INVALID; 312 } 313 314 /** 315 * amdgpu_pll_get_shared_nondp_ppll - return the PPLL used by another non-DP crtc 316 * 317 * @crtc: drm crtc 318 * @encoder: drm encoder 319 * 320 * Returns the PPLL (Pixel PLL) used by another non-DP crtc/encoder which can 321 * be shared (i.e., same clock). 322 */ 323 int amdgpu_pll_get_shared_nondp_ppll(struct drm_crtc *crtc) 324 { 325 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 326 struct drm_device *dev = crtc->dev; 327 struct drm_crtc *test_crtc; 328 struct amdgpu_crtc *test_amdgpu_crtc; 329 u32 adjusted_clock, test_adjusted_clock; 330 331 adjusted_clock = amdgpu_crtc->adjusted_clock; 332 333 if (adjusted_clock == 0) 334 return ATOM_PPLL_INVALID; 335 336 list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) { 337 if (crtc == test_crtc) 338 continue; 339 test_amdgpu_crtc = to_amdgpu_crtc(test_crtc); 340 if (test_amdgpu_crtc->encoder && 341 !ENCODER_MODE_IS_DP(amdgpu_atombios_encoder_get_encoder_mode(test_amdgpu_crtc->encoder))) { 342 /* check if we are already driving this connector with another crtc */ 343 if (test_amdgpu_crtc->connector == amdgpu_crtc->connector) { 344 /* if we are, return that pll */ 345 if (test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID) 346 return test_amdgpu_crtc->pll_id; 347 } 348 /* for non-DP check the clock */ 349 test_adjusted_clock = test_amdgpu_crtc->adjusted_clock; 350 if ((crtc->mode.clock == test_crtc->mode.clock) && 351 (adjusted_clock == test_adjusted_clock) && 352 (amdgpu_crtc->ss_enabled == test_amdgpu_crtc->ss_enabled) && 353 (test_amdgpu_crtc->pll_id != ATOM_PPLL_INVALID)) 354 return test_amdgpu_crtc->pll_id; 355 } 356 } 357 return ATOM_PPLL_INVALID; 358 } 359