xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/nouveau/nvkm/subdev/clk/nouveau_nvkm_subdev_clk_gm20b.c (revision 41ec02673d281bbb3d38e6c78504ce6e30c228c1)
1 /*	$NetBSD: nouveau_nvkm_subdev_clk_gm20b.c,v 1.2 2021/12/18 23:45:39 riastradh Exp $	*/
2 
3 /*
4  * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22  * DEALINGS IN THE SOFTWARE.
23  */
24 
25 #include <sys/cdefs.h>
26 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_subdev_clk_gm20b.c,v 1.2 2021/12/18 23:45:39 riastradh Exp $");
27 
28 #include <subdev/clk.h>
29 #include <subdev/volt.h>
30 #include <subdev/timer.h>
31 #include <core/device.h>
32 #include <core/tegra.h>
33 
34 #include "priv.h"
35 #include "gk20a.h"
36 
37 #define GPCPLL_CFG_SYNC_MODE	BIT(2)
38 
39 #define BYPASSCTRL_SYS	(SYS_GPCPLL_CFG_BASE + 0x340)
40 #define BYPASSCTRL_SYS_GPCPLL_SHIFT	0
41 #define BYPASSCTRL_SYS_GPCPLL_WIDTH	1
42 
43 #define GPCPLL_CFG2_SDM_DIN_SHIFT	0
44 #define GPCPLL_CFG2_SDM_DIN_WIDTH	8
45 #define GPCPLL_CFG2_SDM_DIN_MASK	\
46 	(MASK(GPCPLL_CFG2_SDM_DIN_WIDTH) << GPCPLL_CFG2_SDM_DIN_SHIFT)
47 #define GPCPLL_CFG2_SDM_DIN_NEW_SHIFT	8
48 #define GPCPLL_CFG2_SDM_DIN_NEW_WIDTH	15
49 #define GPCPLL_CFG2_SDM_DIN_NEW_MASK	\
50 	(MASK(GPCPLL_CFG2_SDM_DIN_NEW_WIDTH) << GPCPLL_CFG2_SDM_DIN_NEW_SHIFT)
51 #define GPCPLL_CFG2_SETUP2_SHIFT	16
52 #define GPCPLL_CFG2_PLL_STEPA_SHIFT	24
53 
54 #define GPCPLL_DVFS0	(SYS_GPCPLL_CFG_BASE + 0x10)
55 #define GPCPLL_DVFS0_DFS_COEFF_SHIFT	0
56 #define GPCPLL_DVFS0_DFS_COEFF_WIDTH	7
57 #define GPCPLL_DVFS0_DFS_COEFF_MASK	\
58 	(MASK(GPCPLL_DVFS0_DFS_COEFF_WIDTH) << GPCPLL_DVFS0_DFS_COEFF_SHIFT)
59 #define GPCPLL_DVFS0_DFS_DET_MAX_SHIFT	8
60 #define GPCPLL_DVFS0_DFS_DET_MAX_WIDTH	7
61 #define GPCPLL_DVFS0_DFS_DET_MAX_MASK	\
62 	(MASK(GPCPLL_DVFS0_DFS_DET_MAX_WIDTH) << GPCPLL_DVFS0_DFS_DET_MAX_SHIFT)
63 
64 #define GPCPLL_DVFS1		(SYS_GPCPLL_CFG_BASE + 0x14)
65 #define GPCPLL_DVFS1_DFS_EXT_DET_SHIFT		0
66 #define GPCPLL_DVFS1_DFS_EXT_DET_WIDTH		7
67 #define GPCPLL_DVFS1_DFS_EXT_STRB_SHIFT		7
68 #define GPCPLL_DVFS1_DFS_EXT_STRB_WIDTH		1
69 #define GPCPLL_DVFS1_DFS_EXT_CAL_SHIFT		8
70 #define GPCPLL_DVFS1_DFS_EXT_CAL_WIDTH		7
71 #define GPCPLL_DVFS1_DFS_EXT_SEL_SHIFT		15
72 #define GPCPLL_DVFS1_DFS_EXT_SEL_WIDTH		1
73 #define GPCPLL_DVFS1_DFS_CTRL_SHIFT		16
74 #define GPCPLL_DVFS1_DFS_CTRL_WIDTH		12
75 #define GPCPLL_DVFS1_EN_SDM_SHIFT		28
76 #define GPCPLL_DVFS1_EN_SDM_WIDTH		1
77 #define GPCPLL_DVFS1_EN_SDM_BIT			BIT(28)
78 #define GPCPLL_DVFS1_EN_DFS_SHIFT		29
79 #define GPCPLL_DVFS1_EN_DFS_WIDTH		1
80 #define GPCPLL_DVFS1_EN_DFS_BIT			BIT(29)
81 #define GPCPLL_DVFS1_EN_DFS_CAL_SHIFT		30
82 #define GPCPLL_DVFS1_EN_DFS_CAL_WIDTH		1
83 #define GPCPLL_DVFS1_EN_DFS_CAL_BIT		BIT(30)
84 #define GPCPLL_DVFS1_DFS_CAL_DONE_SHIFT		31
85 #define GPCPLL_DVFS1_DFS_CAL_DONE_WIDTH		1
86 #define GPCPLL_DVFS1_DFS_CAL_DONE_BIT		BIT(31)
87 
88 #define GPC_BCAST_GPCPLL_DVFS2	(GPC_BCAST_GPCPLL_CFG_BASE + 0x20)
89 #define GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT	BIT(16)
90 
91 #define GPCPLL_CFG3_PLL_DFS_TESTOUT_SHIFT	24
92 #define GPCPLL_CFG3_PLL_DFS_TESTOUT_WIDTH	7
93 
94 #define DFS_DET_RANGE	6	/* -2^6 ... 2^6-1 */
95 #define SDM_DIN_RANGE	12	/* -2^12 ... 2^12-1 */
96 
97 struct gm20b_clk_dvfs_params {
98 	s32 coeff_slope;
99 	s32 coeff_offs;
100 	u32 vco_ctrl;
101 };
102 
103 static const struct gm20b_clk_dvfs_params gm20b_dvfs_params = {
104 	.coeff_slope = -165230,
105 	.coeff_offs = 214007,
106 	.vco_ctrl = 0x7 << 3,
107 };
108 
109 /*
110  * base.n is now the *integer* part of the N factor.
111  * sdm_din contains n's decimal part.
112  */
113 struct gm20b_pll {
114 	struct gk20a_pll base;
115 	u32 sdm_din;
116 };
117 
118 struct gm20b_clk_dvfs {
119 	u32 dfs_coeff;
120 	s32 dfs_det_max;
121 	s32 dfs_ext_cal;
122 };
123 
124 struct gm20b_clk {
125 	/* currently applied parameters */
126 	struct gk20a_clk base;
127 	struct gm20b_clk_dvfs dvfs;
128 	u32 uv;
129 
130 	/* new parameters to apply */
131 	struct gk20a_pll new_pll;
132 	struct gm20b_clk_dvfs new_dvfs;
133 	u32 new_uv;
134 
135 	const struct gm20b_clk_dvfs_params *dvfs_params;
136 
137 	/* fused parameters */
138 	s32 uvdet_slope;
139 	s32 uvdet_offs;
140 
141 	/* safe frequency we can use at minimum voltage */
142 	u32 safe_fmax_vmin;
143 };
144 #define gm20b_clk(p) container_of((gk20a_clk(p)), struct gm20b_clk, base)
145 
pl_to_div(u32 pl)146 static u32 pl_to_div(u32 pl)
147 {
148 	return pl;
149 }
150 
div_to_pl(u32 div)151 static u32 div_to_pl(u32 div)
152 {
153 	return div;
154 }
155 
156 static const struct gk20a_clk_pllg_params gm20b_pllg_params = {
157 	.min_vco = 1300000, .max_vco = 2600000,
158 	.min_u = 12000, .max_u = 38400,
159 	.min_m = 1, .max_m = 255,
160 	.min_n = 8, .max_n = 255,
161 	.min_pl = 1, .max_pl = 31,
162 };
163 
164 static void
gm20b_pllg_read_mnp(struct gm20b_clk * clk,struct gm20b_pll * pll)165 gm20b_pllg_read_mnp(struct gm20b_clk *clk, struct gm20b_pll *pll)
166 {
167 	struct nvkm_subdev *subdev = &clk->base.base.subdev;
168 	struct nvkm_device *device = subdev->device;
169 	u32 val;
170 
171 	gk20a_pllg_read_mnp(&clk->base, &pll->base);
172 	val = nvkm_rd32(device, GPCPLL_CFG2);
173 	pll->sdm_din = (val >> GPCPLL_CFG2_SDM_DIN_SHIFT) &
174 		       MASK(GPCPLL_CFG2_SDM_DIN_WIDTH);
175 }
176 
177 static void
gm20b_pllg_write_mnp(struct gm20b_clk * clk,const struct gm20b_pll * pll)178 gm20b_pllg_write_mnp(struct gm20b_clk *clk, const struct gm20b_pll *pll)
179 {
180 	struct nvkm_device *device = clk->base.base.subdev.device;
181 
182 	nvkm_mask(device, GPCPLL_CFG2, GPCPLL_CFG2_SDM_DIN_MASK,
183 		  pll->sdm_din << GPCPLL_CFG2_SDM_DIN_SHIFT);
184 	gk20a_pllg_write_mnp(&clk->base, &pll->base);
185 }
186 
187 /*
188  * Determine DFS_COEFF for the requested voltage. Always select external
189  * calibration override equal to the voltage, and set maximum detection
190  * limit "0" (to make sure that PLL output remains under F/V curve when
191  * voltage increases).
192  */
193 static void
gm20b_dvfs_calc_det_coeff(struct gm20b_clk * clk,s32 uv,struct gm20b_clk_dvfs * dvfs)194 gm20b_dvfs_calc_det_coeff(struct gm20b_clk *clk, s32 uv,
195 			  struct gm20b_clk_dvfs *dvfs)
196 {
197 	struct nvkm_subdev *subdev = &clk->base.base.subdev;
198 	const struct gm20b_clk_dvfs_params *p = clk->dvfs_params;
199 	u32 coeff;
200 	/* Work with mv as uv would likely trigger an overflow */
201 	s32 mv = DIV_ROUND_CLOSEST(uv, 1000);
202 
203 	/* coeff = slope * voltage + offset */
204 	coeff = DIV_ROUND_CLOSEST(mv * p->coeff_slope, 1000) + p->coeff_offs;
205 	coeff = DIV_ROUND_CLOSEST(coeff, 1000);
206 	dvfs->dfs_coeff = min_t(u32, coeff, MASK(GPCPLL_DVFS0_DFS_COEFF_WIDTH));
207 
208 	dvfs->dfs_ext_cal = DIV_ROUND_CLOSEST(uv - clk->uvdet_offs,
209 					     clk->uvdet_slope);
210 	/* should never happen */
211 	if (abs(dvfs->dfs_ext_cal) >= BIT(DFS_DET_RANGE))
212 		nvkm_error(subdev, "dfs_ext_cal overflow!\n");
213 
214 	dvfs->dfs_det_max = 0;
215 
216 	nvkm_debug(subdev, "%s uv: %d coeff: %x, ext_cal: %d, det_max: %d\n",
217 		   __func__, uv, dvfs->dfs_coeff, dvfs->dfs_ext_cal,
218 		   dvfs->dfs_det_max);
219 }
220 
221 /*
222  * Solve equation for integer and fractional part of the effective NDIV:
223  *
224  * n_eff = n_int + 1/2 + (SDM_DIN / 2^(SDM_DIN_RANGE + 1)) +
225  *         (DVFS_COEFF * DVFS_DET_DELTA) / 2^DFS_DET_RANGE
226  *
227  * The SDM_DIN LSB is finally shifted out, since it is not accessible by sw.
228  */
229 static void
gm20b_dvfs_calc_ndiv(struct gm20b_clk * clk,u32 n_eff,u32 * n_int,u32 * sdm_din)230 gm20b_dvfs_calc_ndiv(struct gm20b_clk *clk, u32 n_eff, u32 *n_int, u32 *sdm_din)
231 {
232 	struct nvkm_subdev *subdev = &clk->base.base.subdev;
233 	const struct gk20a_clk_pllg_params *p = clk->base.params;
234 	u32 n;
235 	s32 det_delta;
236 	u32 rem, rem_range;
237 
238 	/* calculate current ext_cal and subtract previous one */
239 	det_delta = DIV_ROUND_CLOSEST(((s32)clk->uv) - clk->uvdet_offs,
240 				      clk->uvdet_slope);
241 	det_delta -= clk->dvfs.dfs_ext_cal;
242 	det_delta = min(det_delta, clk->dvfs.dfs_det_max);
243 	det_delta *= clk->dvfs.dfs_coeff;
244 
245 	/* integer part of n */
246 	n = (n_eff << DFS_DET_RANGE) - det_delta;
247 	/* should never happen! */
248 	if (n <= 0) {
249 		nvkm_error(subdev, "ndiv <= 0 - setting to 1...\n");
250 		n = 1 << DFS_DET_RANGE;
251 	}
252 	if (n >> DFS_DET_RANGE > p->max_n) {
253 		nvkm_error(subdev, "ndiv > max_n - setting to max_n...\n");
254 		n = p->max_n << DFS_DET_RANGE;
255 	}
256 	*n_int = n >> DFS_DET_RANGE;
257 
258 	/* fractional part of n */
259 	rem = ((u32)n) & MASK(DFS_DET_RANGE);
260 	rem_range = SDM_DIN_RANGE + 1 - DFS_DET_RANGE;
261 	/* subtract 2^SDM_DIN_RANGE to account for the 1/2 of the equation */
262 	rem = (rem << rem_range) - BIT(SDM_DIN_RANGE);
263 	/* lose 8 LSB and clip - sdm_din only keeps the most significant byte */
264 	*sdm_din = (rem >> BITS_PER_BYTE) & MASK(GPCPLL_CFG2_SDM_DIN_WIDTH);
265 
266 	nvkm_debug(subdev, "%s n_eff: %d, n_int: %d, sdm_din: %d\n", __func__,
267 		   n_eff, *n_int, *sdm_din);
268 }
269 
270 static int
gm20b_pllg_slide(struct gm20b_clk * clk,u32 n)271 gm20b_pllg_slide(struct gm20b_clk *clk, u32 n)
272 {
273 	struct nvkm_subdev *subdev = &clk->base.base.subdev;
274 	struct nvkm_device *device = subdev->device;
275 	struct gm20b_pll pll;
276 	u32 n_int, sdm_din;
277 	int ret = 0;
278 
279 	/* calculate the new n_int/sdm_din for this n/uv */
280 	gm20b_dvfs_calc_ndiv(clk, n, &n_int, &sdm_din);
281 
282 	/* get old coefficients */
283 	gm20b_pllg_read_mnp(clk, &pll);
284 	/* do nothing if NDIV is the same */
285 	if (n_int == pll.base.n && sdm_din == pll.sdm_din)
286 		return 0;
287 
288 	/* pll slowdown mode */
289 	nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
290 		BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT),
291 		BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT));
292 
293 	/* new ndiv ready for ramp */
294 	/* in DVFS mode SDM is updated via "new" field */
295 	nvkm_mask(device, GPCPLL_CFG2, GPCPLL_CFG2_SDM_DIN_NEW_MASK,
296 		  sdm_din << GPCPLL_CFG2_SDM_DIN_NEW_SHIFT);
297 	pll.base.n = n_int;
298 	udelay(1);
299 	gk20a_pllg_write_mnp(&clk->base, &pll.base);
300 
301 	/* dynamic ramp to new ndiv */
302 	udelay(1);
303 	nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
304 		  BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT),
305 		  BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT));
306 
307 	/* wait for ramping to complete */
308 	if (nvkm_wait_usec(device, 500, GPC_BCAST_NDIV_SLOWDOWN_DEBUG,
309 		GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK,
310 		GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK) < 0)
311 		ret = -ETIMEDOUT;
312 
313 	/* in DVFS mode complete SDM update */
314 	nvkm_mask(device, GPCPLL_CFG2, GPCPLL_CFG2_SDM_DIN_MASK,
315 		  sdm_din << GPCPLL_CFG2_SDM_DIN_SHIFT);
316 
317 	/* exit slowdown mode */
318 	nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN,
319 		BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT) |
320 		BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), 0);
321 	nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN);
322 
323 	return ret;
324 }
325 
326 static int
gm20b_pllg_enable(struct gm20b_clk * clk)327 gm20b_pllg_enable(struct gm20b_clk *clk)
328 {
329 	struct nvkm_device *device = clk->base.base.subdev.device;
330 
331 	nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE);
332 	nvkm_rd32(device, GPCPLL_CFG);
333 
334 	/* In DVFS mode lock cannot be used - so just delay */
335 	udelay(40);
336 
337 	/* set SYNC_MODE for glitchless switch out of bypass */
338 	nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_SYNC_MODE,
339 		       GPCPLL_CFG_SYNC_MODE);
340 	nvkm_rd32(device, GPCPLL_CFG);
341 
342 	/* switch to VCO mode */
343 	nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT),
344 		  BIT(SEL_VCO_GPC2CLK_OUT_SHIFT));
345 
346 	return 0;
347 }
348 
349 static void
gm20b_pllg_disable(struct gm20b_clk * clk)350 gm20b_pllg_disable(struct gm20b_clk *clk)
351 {
352 	struct nvkm_device *device = clk->base.base.subdev.device;
353 
354 	/* put PLL in bypass before disabling it */
355 	nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0);
356 
357 	/* clear SYNC_MODE before disabling PLL */
358 	nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_SYNC_MODE, 0);
359 
360 	nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0);
361 	nvkm_rd32(device, GPCPLL_CFG);
362 }
363 
364 static int
gm20b_pllg_program_mnp(struct gm20b_clk * clk,const struct gk20a_pll * pll)365 gm20b_pllg_program_mnp(struct gm20b_clk *clk, const struct gk20a_pll *pll)
366 {
367 	struct nvkm_subdev *subdev = &clk->base.base.subdev;
368 	struct nvkm_device *device = subdev->device;
369 	struct gm20b_pll cur_pll;
370 	u32 n_int, sdm_din;
371 	/* if we only change pdiv, we can do a glitchless transition */
372 	bool pdiv_only;
373 	int ret;
374 
375 	gm20b_dvfs_calc_ndiv(clk, pll->n, &n_int, &sdm_din);
376 	gm20b_pllg_read_mnp(clk, &cur_pll);
377 	pdiv_only = cur_pll.base.n == n_int && cur_pll.sdm_din == sdm_din &&
378 		    cur_pll.base.m == pll->m;
379 
380 	/* need full sequence if clock not enabled yet */
381 	if (!gk20a_pllg_is_enabled(&clk->base))
382 		pdiv_only = false;
383 
384 	/* split VCO-to-bypass jump in half by setting out divider 1:2 */
385 	nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
386 		  GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT);
387 	/* Intentional 2nd write to assure linear divider operation */
388 	nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
389 		  GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT);
390 	nvkm_rd32(device, GPC2CLK_OUT);
391 	udelay(2);
392 
393 	if (pdiv_only) {
394 		u32 old = cur_pll.base.pl;
395 		u32 new = pll->pl;
396 
397 		/*
398 		 * we can do a glitchless transition only if the old and new PL
399 		 * parameters share at least one bit set to 1. If this is not
400 		 * the case, calculate and program an interim PL that will allow
401 		 * us to respect that rule.
402 		 */
403 		if ((old & new) == 0) {
404 			cur_pll.base.pl = min(old | BIT(ffs(new) - 1),
405 					      new | BIT(ffs(old) - 1));
406 			gk20a_pllg_write_mnp(&clk->base, &cur_pll.base);
407 		}
408 
409 		cur_pll.base.pl = new;
410 		gk20a_pllg_write_mnp(&clk->base, &cur_pll.base);
411 	} else {
412 		/* disable before programming if more than pdiv changes */
413 		gm20b_pllg_disable(clk);
414 
415 		cur_pll.base = *pll;
416 		cur_pll.base.n = n_int;
417 		cur_pll.sdm_din = sdm_din;
418 		gm20b_pllg_write_mnp(clk, &cur_pll);
419 
420 		ret = gm20b_pllg_enable(clk);
421 		if (ret)
422 			return ret;
423 	}
424 
425 	/* restore out divider 1:1 */
426 	udelay(2);
427 	nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
428 		  GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT);
429 	/* Intentional 2nd write to assure linear divider operation */
430 	nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK,
431 		  GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT);
432 	nvkm_rd32(device, GPC2CLK_OUT);
433 
434 	return 0;
435 }
436 
437 static int
gm20b_pllg_program_mnp_slide(struct gm20b_clk * clk,const struct gk20a_pll * pll)438 gm20b_pllg_program_mnp_slide(struct gm20b_clk *clk, const struct gk20a_pll *pll)
439 {
440 	struct gk20a_pll cur_pll;
441 	int ret;
442 
443 	if (gk20a_pllg_is_enabled(&clk->base)) {
444 		gk20a_pllg_read_mnp(&clk->base, &cur_pll);
445 
446 		/* just do NDIV slide if there is no change to M and PL */
447 		if (pll->m == cur_pll.m && pll->pl == cur_pll.pl)
448 			return gm20b_pllg_slide(clk, pll->n);
449 
450 		/* slide down to current NDIV_LO */
451 		cur_pll.n = gk20a_pllg_n_lo(&clk->base, &cur_pll);
452 		ret = gm20b_pllg_slide(clk, cur_pll.n);
453 		if (ret)
454 			return ret;
455 	}
456 
457 	/* program MNP with the new clock parameters and new NDIV_LO */
458 	cur_pll = *pll;
459 	cur_pll.n = gk20a_pllg_n_lo(&clk->base, &cur_pll);
460 	ret = gm20b_pllg_program_mnp(clk, &cur_pll);
461 	if (ret)
462 		return ret;
463 
464 	/* slide up to new NDIV */
465 	return gm20b_pllg_slide(clk, pll->n);
466 }
467 
468 static int
gm20b_clk_calc(struct nvkm_clk * base,struct nvkm_cstate * cstate)469 gm20b_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
470 {
471 	struct gm20b_clk *clk = gm20b_clk(base);
472 	struct nvkm_subdev *subdev = &base->subdev;
473 	struct nvkm_volt *volt = base->subdev.device->volt;
474 	int ret;
475 
476 	ret = gk20a_pllg_calc_mnp(&clk->base, cstate->domain[nv_clk_src_gpc] *
477 					     GK20A_CLK_GPC_MDIV, &clk->new_pll);
478 	if (ret)
479 		return ret;
480 
481 	clk->new_uv = volt->vid[cstate->voltage].uv;
482 	gm20b_dvfs_calc_det_coeff(clk, clk->new_uv, &clk->new_dvfs);
483 
484 	nvkm_debug(subdev, "%s uv: %d uv\n", __func__, clk->new_uv);
485 
486 	return 0;
487 }
488 
489 /*
490  * Compute PLL parameters that are always safe for the current voltage
491  */
492 static void
gm20b_dvfs_calc_safe_pll(struct gm20b_clk * clk,struct gk20a_pll * pll)493 gm20b_dvfs_calc_safe_pll(struct gm20b_clk *clk, struct gk20a_pll *pll)
494 {
495 	u32 rate = gk20a_pllg_calc_rate(&clk->base, pll) / KHZ;
496 	u32 parent_rate = clk->base.parent_rate / KHZ;
497 	u32 nmin, nsafe;
498 
499 	/* remove a safe margin of 10% */
500 	if (rate > clk->safe_fmax_vmin)
501 		rate = rate * (100 - 10) / 100;
502 
503 	/* gpc2clk */
504 	rate *= 2;
505 
506 	nmin = DIV_ROUND_UP(pll->m * clk->base.params->min_vco, parent_rate);
507 	nsafe = pll->m * rate / (clk->base.parent_rate);
508 
509 	if (nsafe < nmin) {
510 		pll->pl = DIV_ROUND_UP(nmin * parent_rate, pll->m * rate);
511 		nsafe = nmin;
512 	}
513 
514 	pll->n = nsafe;
515 }
516 
517 static void
gm20b_dvfs_program_coeff(struct gm20b_clk * clk,u32 coeff)518 gm20b_dvfs_program_coeff(struct gm20b_clk *clk, u32 coeff)
519 {
520 	struct nvkm_device *device = clk->base.base.subdev.device;
521 
522 	/* strobe to read external DFS coefficient */
523 	nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2,
524 		  GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT,
525 		  GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT);
526 
527 	nvkm_mask(device, GPCPLL_DVFS0, GPCPLL_DVFS0_DFS_COEFF_MASK,
528 		  coeff << GPCPLL_DVFS0_DFS_COEFF_SHIFT);
529 
530 	udelay(1);
531 	nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2,
532 		  GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT, 0);
533 }
534 
535 static void
gm20b_dvfs_program_ext_cal(struct gm20b_clk * clk,u32 dfs_det_cal)536 gm20b_dvfs_program_ext_cal(struct gm20b_clk *clk, u32 dfs_det_cal)
537 {
538 	struct nvkm_device *device = clk->base.base.subdev.device;
539 	u32 val;
540 
541 	nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2, MASK(DFS_DET_RANGE + 1),
542 		  dfs_det_cal);
543 	udelay(1);
544 
545 	val = nvkm_rd32(device, GPCPLL_DVFS1);
546 	if (!(val & BIT(25))) {
547 		/* Use external value to overwrite calibration value */
548 		val |= BIT(25) | BIT(16);
549 		nvkm_wr32(device, GPCPLL_DVFS1, val);
550 	}
551 }
552 
553 static void
gm20b_dvfs_program_dfs_detection(struct gm20b_clk * clk,struct gm20b_clk_dvfs * dvfs)554 gm20b_dvfs_program_dfs_detection(struct gm20b_clk *clk,
555 				 struct gm20b_clk_dvfs *dvfs)
556 {
557 	struct nvkm_device *device = clk->base.base.subdev.device;
558 
559 	/* strobe to read external DFS coefficient */
560 	nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2,
561 		  GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT,
562 		  GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT);
563 
564 	nvkm_mask(device, GPCPLL_DVFS0,
565 		  GPCPLL_DVFS0_DFS_COEFF_MASK | GPCPLL_DVFS0_DFS_DET_MAX_MASK,
566 		  dvfs->dfs_coeff << GPCPLL_DVFS0_DFS_COEFF_SHIFT |
567 		  dvfs->dfs_det_max << GPCPLL_DVFS0_DFS_DET_MAX_SHIFT);
568 
569 	udelay(1);
570 	nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2,
571 		  GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT, 0);
572 
573 	gm20b_dvfs_program_ext_cal(clk, dvfs->dfs_ext_cal);
574 }
575 
576 static int
gm20b_clk_prog(struct nvkm_clk * base)577 gm20b_clk_prog(struct nvkm_clk *base)
578 {
579 	struct gm20b_clk *clk = gm20b_clk(base);
580 	u32 cur_freq;
581 	int ret;
582 
583 	/* No change in DVFS settings? */
584 	if (clk->uv == clk->new_uv)
585 		goto prog;
586 
587 	/*
588 	 * Interim step for changing DVFS detection settings: low enough
589 	 * frequency to be safe at at DVFS coeff = 0.
590 	 *
591 	 * 1. If voltage is increasing:
592 	 * - safe frequency target matches the lowest - old - frequency
593 	 * - DVFS settings are still old
594 	 * - Voltage already increased to new level by volt, but maximum
595 	 *   detection limit assures PLL output remains under F/V curve
596 	 *
597 	 * 2. If voltage is decreasing:
598 	 * - safe frequency target matches the lowest - new - frequency
599 	 * - DVFS settings are still old
600 	 * - Voltage is also old, it will be lowered by volt afterwards
601 	 *
602 	 * Interim step can be skipped if old frequency is below safe minimum,
603 	 * i.e., it is low enough to be safe at any voltage in operating range
604 	 * with zero DVFS coefficient.
605 	 */
606 	cur_freq = nvkm_clk_read(&clk->base.base, nv_clk_src_gpc);
607 	if (cur_freq > clk->safe_fmax_vmin) {
608 		struct gk20a_pll pll_safe;
609 
610 		if (clk->uv < clk->new_uv)
611 			/* voltage will raise: safe frequency is current one */
612 			pll_safe = clk->base.pll;
613 		else
614 			/* voltage will drop: safe frequency is new one */
615 			pll_safe = clk->new_pll;
616 
617 		gm20b_dvfs_calc_safe_pll(clk, &pll_safe);
618 		ret = gm20b_pllg_program_mnp_slide(clk, &pll_safe);
619 		if (ret)
620 			return ret;
621 	}
622 
623 	/*
624 	 * DVFS detection settings transition:
625 	 * - Set DVFS coefficient zero
626 	 * - Set calibration level to new voltage
627 	 * - Set DVFS coefficient to match new voltage
628 	 */
629 	gm20b_dvfs_program_coeff(clk, 0);
630 	gm20b_dvfs_program_ext_cal(clk, clk->new_dvfs.dfs_ext_cal);
631 	gm20b_dvfs_program_coeff(clk, clk->new_dvfs.dfs_coeff);
632 	gm20b_dvfs_program_dfs_detection(clk, &clk->new_dvfs);
633 
634 prog:
635 	clk->uv = clk->new_uv;
636 	clk->dvfs = clk->new_dvfs;
637 	clk->base.pll = clk->new_pll;
638 
639 	return gm20b_pllg_program_mnp_slide(clk, &clk->base.pll);
640 }
641 
642 static struct nvkm_pstate
643 gm20b_pstates[] = {
644 	{
645 		.base = {
646 			.domain[nv_clk_src_gpc] = 76800,
647 			.voltage = 0,
648 		},
649 	},
650 	{
651 		.base = {
652 			.domain[nv_clk_src_gpc] = 153600,
653 			.voltage = 1,
654 		},
655 	},
656 	{
657 		.base = {
658 			.domain[nv_clk_src_gpc] = 230400,
659 			.voltage = 2,
660 		},
661 	},
662 	{
663 		.base = {
664 			.domain[nv_clk_src_gpc] = 307200,
665 			.voltage = 3,
666 		},
667 	},
668 	{
669 		.base = {
670 			.domain[nv_clk_src_gpc] = 384000,
671 			.voltage = 4,
672 		},
673 	},
674 	{
675 		.base = {
676 			.domain[nv_clk_src_gpc] = 460800,
677 			.voltage = 5,
678 		},
679 	},
680 	{
681 		.base = {
682 			.domain[nv_clk_src_gpc] = 537600,
683 			.voltage = 6,
684 		},
685 	},
686 	{
687 		.base = {
688 			.domain[nv_clk_src_gpc] = 614400,
689 			.voltage = 7,
690 		},
691 	},
692 	{
693 		.base = {
694 			.domain[nv_clk_src_gpc] = 691200,
695 			.voltage = 8,
696 		},
697 	},
698 	{
699 		.base = {
700 			.domain[nv_clk_src_gpc] = 768000,
701 			.voltage = 9,
702 		},
703 	},
704 	{
705 		.base = {
706 			.domain[nv_clk_src_gpc] = 844800,
707 			.voltage = 10,
708 		},
709 	},
710 	{
711 		.base = {
712 			.domain[nv_clk_src_gpc] = 921600,
713 			.voltage = 11,
714 		},
715 	},
716 	{
717 		.base = {
718 			.domain[nv_clk_src_gpc] = 998400,
719 			.voltage = 12,
720 		},
721 	},
722 };
723 
724 static void
gm20b_clk_fini(struct nvkm_clk * base)725 gm20b_clk_fini(struct nvkm_clk *base)
726 {
727 	struct nvkm_device *device = base->subdev.device;
728 	struct gm20b_clk *clk = gm20b_clk(base);
729 
730 	/* slide to VCO min */
731 	if (gk20a_pllg_is_enabled(&clk->base)) {
732 		struct gk20a_pll pll;
733 		u32 n_lo;
734 
735 		gk20a_pllg_read_mnp(&clk->base, &pll);
736 		n_lo = gk20a_pllg_n_lo(&clk->base, &pll);
737 		gm20b_pllg_slide(clk, n_lo);
738 	}
739 
740 	gm20b_pllg_disable(clk);
741 
742 	/* set IDDQ */
743 	nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 1);
744 }
745 
746 static int
gm20b_clk_init_dvfs(struct gm20b_clk * clk)747 gm20b_clk_init_dvfs(struct gm20b_clk *clk)
748 {
749 	struct nvkm_subdev *subdev = &clk->base.base.subdev;
750 	struct nvkm_device *device = subdev->device;
751 	bool fused = clk->uvdet_offs && clk->uvdet_slope;
752 	static const s32 ADC_SLOPE_UV = 10000; /* default ADC detection slope */
753 	u32 data;
754 	int ret;
755 
756 	/* Enable NA DVFS */
757 	nvkm_mask(device, GPCPLL_DVFS1, GPCPLL_DVFS1_EN_DFS_BIT,
758 		  GPCPLL_DVFS1_EN_DFS_BIT);
759 
760 	/* Set VCO_CTRL */
761 	if (clk->dvfs_params->vco_ctrl)
762 		nvkm_mask(device, GPCPLL_CFG3, GPCPLL_CFG3_VCO_CTRL_MASK,
763 		      clk->dvfs_params->vco_ctrl << GPCPLL_CFG3_VCO_CTRL_SHIFT);
764 
765 	if (fused) {
766 		/* Start internal calibration, but ignore results */
767 		nvkm_mask(device, GPCPLL_DVFS1, GPCPLL_DVFS1_EN_DFS_CAL_BIT,
768 			  GPCPLL_DVFS1_EN_DFS_CAL_BIT);
769 
770 		/* got uvdev parameters from fuse, skip calibration */
771 		goto calibrated;
772 	}
773 
774 	/*
775 	 * If calibration parameters are not fused, start internal calibration,
776 	 * wait for completion, and use results along with default slope to
777 	 * calculate ADC offset during boot.
778 	 */
779 	nvkm_mask(device, GPCPLL_DVFS1, GPCPLL_DVFS1_EN_DFS_CAL_BIT,
780 			  GPCPLL_DVFS1_EN_DFS_CAL_BIT);
781 
782 	/* Wait for internal calibration done (spec < 2us). */
783 	ret = nvkm_wait_usec(device, 10, GPCPLL_DVFS1,
784 			     GPCPLL_DVFS1_DFS_CAL_DONE_BIT,
785 			     GPCPLL_DVFS1_DFS_CAL_DONE_BIT);
786 	if (ret < 0) {
787 		nvkm_error(subdev, "GPCPLL calibration timeout\n");
788 		return -ETIMEDOUT;
789 	}
790 
791 	data = nvkm_rd32(device, GPCPLL_CFG3) >>
792 			 GPCPLL_CFG3_PLL_DFS_TESTOUT_SHIFT;
793 	data &= MASK(GPCPLL_CFG3_PLL_DFS_TESTOUT_WIDTH);
794 
795 	clk->uvdet_slope = ADC_SLOPE_UV;
796 	clk->uvdet_offs = ((s32)clk->uv) - data * ADC_SLOPE_UV;
797 
798 	nvkm_debug(subdev, "calibrated DVFS parameters: offs %d, slope %d\n",
799 		   clk->uvdet_offs, clk->uvdet_slope);
800 
801 calibrated:
802 	/* Compute and apply initial DVFS parameters */
803 	gm20b_dvfs_calc_det_coeff(clk, clk->uv, &clk->dvfs);
804 	gm20b_dvfs_program_coeff(clk, 0);
805 	gm20b_dvfs_program_ext_cal(clk, clk->dvfs.dfs_ext_cal);
806 	gm20b_dvfs_program_coeff(clk, clk->dvfs.dfs_coeff);
807 	gm20b_dvfs_program_dfs_detection(clk, &clk->new_dvfs);
808 
809 	return 0;
810 }
811 
812 /* Forward declaration to detect speedo >=1 in gm20b_clk_init() */
813 static const struct nvkm_clk_func gm20b_clk;
814 
815 static int
gm20b_clk_init(struct nvkm_clk * base)816 gm20b_clk_init(struct nvkm_clk *base)
817 {
818 	struct gk20a_clk *clk = gk20a_clk(base);
819 	struct nvkm_subdev *subdev = &clk->base.subdev;
820 	struct nvkm_device *device = subdev->device;
821 	int ret;
822 	u32 data;
823 
824 	/* get out from IDDQ */
825 	nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 0);
826 	nvkm_rd32(device, GPCPLL_CFG);
827 	udelay(5);
828 
829 	nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK,
830 		  GPC2CLK_OUT_INIT_VAL);
831 
832 	/* Set the global bypass control to VCO */
833 	nvkm_mask(device, BYPASSCTRL_SYS,
834 	       MASK(BYPASSCTRL_SYS_GPCPLL_WIDTH) << BYPASSCTRL_SYS_GPCPLL_SHIFT,
835 	       0);
836 
837 	ret = gk20a_clk_setup_slide(clk);
838 	if (ret)
839 		return ret;
840 
841 	/* If not fused, set RAM SVOP PDP data 0x2, and enable fuse override */
842 	data = nvkm_rd32(device, 0x021944);
843 	if (!(data & 0x3)) {
844 		data |= 0x2;
845 		nvkm_wr32(device, 0x021944, data);
846 
847 		data = nvkm_rd32(device, 0x021948);
848 		data |=  0x1;
849 		nvkm_wr32(device, 0x021948, data);
850 	}
851 
852 	/* Disable idle slow down  */
853 	nvkm_mask(device, 0x20160, 0x003f0000, 0x0);
854 
855 	/* speedo >= 1? */
856 	if (clk->base.func == &gm20b_clk) {
857 		struct gm20b_clk *_clk = gm20b_clk(base);
858 		struct nvkm_volt *volt = device->volt;
859 
860 		/* Get current voltage */
861 		_clk->uv = nvkm_volt_get(volt);
862 
863 		/* Initialize DVFS */
864 		ret = gm20b_clk_init_dvfs(_clk);
865 		if (ret)
866 			return ret;
867 	}
868 
869 	/* Start with lowest frequency */
870 	base->func->calc(base, &base->func->pstates[0].base);
871 	ret = base->func->prog(base);
872 	if (ret) {
873 		nvkm_error(subdev, "cannot initialize clock\n");
874 		return ret;
875 	}
876 
877 	return 0;
878 }
879 
880 static const struct nvkm_clk_func
881 gm20b_clk_speedo0 = {
882 	.init = gm20b_clk_init,
883 	.fini = gk20a_clk_fini,
884 	.read = gk20a_clk_read,
885 	.calc = gk20a_clk_calc,
886 	.prog = gk20a_clk_prog,
887 	.tidy = gk20a_clk_tidy,
888 	.pstates = gm20b_pstates,
889 	/* Speedo 0 only supports 12 voltages */
890 	.nr_pstates = ARRAY_SIZE(gm20b_pstates) - 1,
891 	.domains = {
892 		{ nv_clk_src_crystal, 0xff },
893 		{ nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV },
894 		{ nv_clk_src_max },
895 	},
896 };
897 
898 static const struct nvkm_clk_func
899 gm20b_clk = {
900 	.init = gm20b_clk_init,
901 	.fini = gm20b_clk_fini,
902 	.read = gk20a_clk_read,
903 	.calc = gm20b_clk_calc,
904 	.prog = gm20b_clk_prog,
905 	.tidy = gk20a_clk_tidy,
906 	.pstates = gm20b_pstates,
907 	.nr_pstates = ARRAY_SIZE(gm20b_pstates),
908 	.domains = {
909 		{ nv_clk_src_crystal, 0xff },
910 		{ nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV },
911 		{ nv_clk_src_max },
912 	},
913 };
914 
915 static int
gm20b_clk_new_speedo0(struct nvkm_device * device,int index,struct nvkm_clk ** pclk)916 gm20b_clk_new_speedo0(struct nvkm_device *device, int index,
917 		      struct nvkm_clk **pclk)
918 {
919 	struct gk20a_clk *clk;
920 	int ret;
921 
922 	clk = kzalloc(sizeof(*clk), GFP_KERNEL);
923 	if (!clk)
924 		return -ENOMEM;
925 	*pclk = &clk->base;
926 
927 	ret = gk20a_clk_ctor(device, index, &gm20b_clk_speedo0,
928 			     &gm20b_pllg_params, clk);
929 
930 	clk->pl_to_div = pl_to_div;
931 	clk->div_to_pl = div_to_pl;
932 
933 	return ret;
934 }
935 
936 /* FUSE register */
937 #define FUSE_RESERVED_CALIB0	0x204
938 #define FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_SHIFT	0
939 #define FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_WIDTH	4
940 #define FUSE_RESERVED_CALIB0_INTERCEPT_INT_SHIFT	4
941 #define FUSE_RESERVED_CALIB0_INTERCEPT_INT_WIDTH	10
942 #define FUSE_RESERVED_CALIB0_SLOPE_FRAC_SHIFT		14
943 #define FUSE_RESERVED_CALIB0_SLOPE_FRAC_WIDTH		10
944 #define FUSE_RESERVED_CALIB0_SLOPE_INT_SHIFT		24
945 #define FUSE_RESERVED_CALIB0_SLOPE_INT_WIDTH		6
946 #define FUSE_RESERVED_CALIB0_FUSE_REV_SHIFT		30
947 #define FUSE_RESERVED_CALIB0_FUSE_REV_WIDTH		2
948 
949 static int
gm20b_clk_init_fused_params(struct gm20b_clk * clk)950 gm20b_clk_init_fused_params(struct gm20b_clk *clk)
951 {
952 	struct nvkm_subdev *subdev = &clk->base.base.subdev;
953 	u32 val = 0;
954 	u32 rev = 0;
955 
956 #if IS_ENABLED(CONFIG_ARCH_TEGRA)
957 	tegra_fuse_readl(FUSE_RESERVED_CALIB0, &val);
958 	rev = (val >> FUSE_RESERVED_CALIB0_FUSE_REV_SHIFT) &
959 	      MASK(FUSE_RESERVED_CALIB0_FUSE_REV_WIDTH);
960 #endif
961 
962 	/* No fused parameters, we will calibrate later */
963 	if (rev == 0)
964 		return -EINVAL;
965 
966 	/* Integer part in mV + fractional part in uV */
967 	clk->uvdet_slope = ((val >> FUSE_RESERVED_CALIB0_SLOPE_INT_SHIFT) &
968 			MASK(FUSE_RESERVED_CALIB0_SLOPE_INT_WIDTH)) * 1000 +
969 			((val >> FUSE_RESERVED_CALIB0_SLOPE_FRAC_SHIFT) &
970 			MASK(FUSE_RESERVED_CALIB0_SLOPE_FRAC_WIDTH));
971 
972 	/* Integer part in mV + fractional part in 100uV */
973 	clk->uvdet_offs = ((val >> FUSE_RESERVED_CALIB0_INTERCEPT_INT_SHIFT) &
974 			MASK(FUSE_RESERVED_CALIB0_INTERCEPT_INT_WIDTH)) * 1000 +
975 			((val >> FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_SHIFT) &
976 			 MASK(FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_WIDTH)) * 100;
977 
978 	nvkm_debug(subdev, "fused calibration data: slope %d, offs %d\n",
979 		   clk->uvdet_slope, clk->uvdet_offs);
980 	return 0;
981 }
982 
983 static int
gm20b_clk_init_safe_fmax(struct gm20b_clk * clk)984 gm20b_clk_init_safe_fmax(struct gm20b_clk *clk)
985 {
986 	struct nvkm_subdev *subdev = &clk->base.base.subdev;
987 	struct nvkm_volt *volt = subdev->device->volt;
988 	struct nvkm_pstate *pstates = clk->base.base.func->pstates;
989 	int nr_pstates = clk->base.base.func->nr_pstates;
990 	int vmin, id = 0;
991 	u32 fmax = 0;
992 	int i;
993 
994 	/* find lowest voltage we can use */
995 	vmin = volt->vid[0].uv;
996 	for (i = 1; i < volt->vid_nr; i++) {
997 		if (volt->vid[i].uv <= vmin) {
998 			vmin = volt->vid[i].uv;
999 			id = volt->vid[i].vid;
1000 		}
1001 	}
1002 
1003 	/* find max frequency at this voltage */
1004 	for (i = 0; i < nr_pstates; i++)
1005 		if (pstates[i].base.voltage == id)
1006 			fmax = max(fmax,
1007 				   pstates[i].base.domain[nv_clk_src_gpc]);
1008 
1009 	if (!fmax) {
1010 		nvkm_error(subdev, "failed to evaluate safe fmax\n");
1011 		return -EINVAL;
1012 	}
1013 
1014 	/* we are safe at 90% of the max frequency */
1015 	clk->safe_fmax_vmin = fmax * (100 - 10) / 100;
1016 	nvkm_debug(subdev, "safe fmax @ vmin = %u Khz\n", clk->safe_fmax_vmin);
1017 
1018 	return 0;
1019 }
1020 
1021 int
gm20b_clk_new(struct nvkm_device * device,int index,struct nvkm_clk ** pclk)1022 gm20b_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
1023 {
1024 	struct nvkm_device_tegra *tdev = device->func->tegra(device);
1025 	struct gm20b_clk *clk;
1026 	struct nvkm_subdev *subdev;
1027 	struct gk20a_clk_pllg_params *clk_params;
1028 	int ret;
1029 
1030 	/* Speedo 0 GPUs cannot use noise-aware PLL */
1031 	if (tdev->gpu_speedo_id == 0)
1032 		return gm20b_clk_new_speedo0(device, index, pclk);
1033 
1034 	/* Speedo >= 1, use NAPLL */
1035 	clk = kzalloc(sizeof(*clk) + sizeof(*clk_params), GFP_KERNEL);
1036 	if (!clk)
1037 		return -ENOMEM;
1038 	*pclk = &clk->base.base;
1039 	subdev = &clk->base.base.subdev;
1040 
1041 	/* duplicate the clock parameters since we will patch them below */
1042 	clk_params = (void *) (clk + 1);
1043 	*clk_params = gm20b_pllg_params;
1044 	ret = gk20a_clk_ctor(device, index, &gm20b_clk, clk_params,
1045 			     &clk->base);
1046 	if (ret)
1047 		return ret;
1048 
1049 	/*
1050 	 * NAPLL can only work with max_u, clamp the m range so
1051 	 * gk20a_pllg_calc_mnp always uses it
1052 	 */
1053 	clk_params->max_m = clk_params->min_m = DIV_ROUND_UP(clk_params->max_u,
1054 						(clk->base.parent_rate / KHZ));
1055 	if (clk_params->max_m == 0) {
1056 		nvkm_warn(subdev, "cannot use NAPLL, using legacy clock...\n");
1057 		kfree(clk);
1058 		return gm20b_clk_new_speedo0(device, index, pclk);
1059 	}
1060 
1061 	clk->base.pl_to_div = pl_to_div;
1062 	clk->base.div_to_pl = div_to_pl;
1063 
1064 	clk->dvfs_params = &gm20b_dvfs_params;
1065 
1066 	ret = gm20b_clk_init_fused_params(clk);
1067 	/*
1068 	 * we will calibrate during init - should never happen on
1069 	 * prod parts
1070 	 */
1071 	if (ret)
1072 		nvkm_warn(subdev, "no fused calibration parameters\n");
1073 
1074 	ret = gm20b_clk_init_safe_fmax(clk);
1075 	if (ret)
1076 		return ret;
1077 
1078 	return 0;
1079 }
1080