xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/nouveau/nvkm/subdev/clk/nouveau_nvkm_subdev_clk_gt215.c (revision 41ec02673d281bbb3d38e6c78504ce6e30c228c1)
1 /*	$NetBSD: nouveau_nvkm_subdev_clk_gt215.c,v 1.5 2021/12/18 23:45:39 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2012 Red Hat Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Ben Skeggs
25  *          Roy Spliet
26  */
27 #include <sys/cdefs.h>
28 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_subdev_clk_gt215.c,v 1.5 2021/12/18 23:45:39 riastradh Exp $");
29 
30 #define gt215_clk(p) container_of((p), struct gt215_clk, base)
31 #include "gt215.h"
32 #include "pll.h"
33 
34 #include <engine/fifo.h>
35 #include <subdev/bios.h>
36 #include <subdev/bios/pll.h>
37 #include <subdev/timer.h>
38 
39 struct gt215_clk {
40 	struct nvkm_clk base;
41 	struct gt215_clk_info eng[nv_clk_src_max];
42 };
43 
44 static u32 read_clk(struct gt215_clk *, int, bool);
45 static u32 read_pll(struct gt215_clk *, int, u32);
46 
47 static u32
read_vco(struct gt215_clk * clk,int idx)48 read_vco(struct gt215_clk *clk, int idx)
49 {
50 	struct nvkm_device *device = clk->base.subdev.device;
51 	u32 sctl = nvkm_rd32(device, 0x4120 + (idx * 4));
52 
53 	switch (sctl & 0x00000030) {
54 	case 0x00000000:
55 		return device->crystal;
56 	case 0x00000020:
57 		return read_pll(clk, 0x41, 0x00e820);
58 	case 0x00000030:
59 		return read_pll(clk, 0x42, 0x00e8a0);
60 	default:
61 		return 0;
62 	}
63 }
64 
65 static u32
read_clk(struct gt215_clk * clk,int idx,bool ignore_en)66 read_clk(struct gt215_clk *clk, int idx, bool ignore_en)
67 {
68 	struct nvkm_device *device = clk->base.subdev.device;
69 	u32 sctl, sdiv, sclk;
70 
71 	/* refclk for the 0xe8xx plls is a fixed frequency */
72 	if (idx >= 0x40) {
73 		if (device->chipset == 0xaf) {
74 			/* no joke.. seriously.. sigh.. */
75 			return nvkm_rd32(device, 0x00471c) * 1000;
76 		}
77 
78 		return device->crystal;
79 	}
80 
81 	sctl = nvkm_rd32(device, 0x4120 + (idx * 4));
82 	if (!ignore_en && !(sctl & 0x00000100))
83 		return 0;
84 
85 	/* out_alt */
86 	if (sctl & 0x00000400)
87 		return 108000;
88 
89 	/* vco_out */
90 	switch (sctl & 0x00003000) {
91 	case 0x00000000:
92 		if (!(sctl & 0x00000200))
93 			return device->crystal;
94 		return 0;
95 	case 0x00002000:
96 		if (sctl & 0x00000040)
97 			return 108000;
98 		return 100000;
99 	case 0x00003000:
100 		/* vco_enable */
101 		if (!(sctl & 0x00000001))
102 			return 0;
103 
104 		sclk = read_vco(clk, idx);
105 		sdiv = ((sctl & 0x003f0000) >> 16) + 2;
106 		return (sclk * 2) / sdiv;
107 	default:
108 		return 0;
109 	}
110 }
111 
112 static u32
read_pll(struct gt215_clk * clk,int idx,u32 pll)113 read_pll(struct gt215_clk *clk, int idx, u32 pll)
114 {
115 	struct nvkm_device *device = clk->base.subdev.device;
116 	u32 ctrl = nvkm_rd32(device, pll + 0);
117 	u32 sclk = 0, P = 1, N = 1, M = 1;
118 	u32 MP;
119 
120 	if (!(ctrl & 0x00000008)) {
121 		if (ctrl & 0x00000001) {
122 			u32 coef = nvkm_rd32(device, pll + 4);
123 			M = (coef & 0x000000ff) >> 0;
124 			N = (coef & 0x0000ff00) >> 8;
125 			P = (coef & 0x003f0000) >> 16;
126 
127 			/* no post-divider on these..
128 			 * XXX: it looks more like two post-"dividers" that
129 			 * cross each other out in the default RPLL config */
130 			if ((pll & 0x00ff00) == 0x00e800)
131 				P = 1;
132 
133 			sclk = read_clk(clk, 0x00 + idx, false);
134 		}
135 	} else {
136 		sclk = read_clk(clk, 0x10 + idx, false);
137 	}
138 
139 	MP = M * P;
140 
141 	if (!MP)
142 		return 0;
143 
144 	return sclk * N / MP;
145 }
146 
147 static int
gt215_clk_read(struct nvkm_clk * base,enum nv_clk_src src)148 gt215_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
149 {
150 	struct gt215_clk *clk = gt215_clk(base);
151 	struct nvkm_subdev *subdev = &clk->base.subdev;
152 	struct nvkm_device *device = subdev->device;
153 	u32 hsrc;
154 
155 	switch (src) {
156 	case nv_clk_src_crystal:
157 		return device->crystal;
158 	case nv_clk_src_core:
159 	case nv_clk_src_core_intm:
160 		return read_pll(clk, 0x00, 0x4200);
161 	case nv_clk_src_shader:
162 		return read_pll(clk, 0x01, 0x4220);
163 	case nv_clk_src_mem:
164 		return read_pll(clk, 0x02, 0x4000);
165 	case nv_clk_src_disp:
166 		return read_clk(clk, 0x20, false);
167 	case nv_clk_src_vdec:
168 		return read_clk(clk, 0x21, false);
169 	case nv_clk_src_pmu:
170 		return read_clk(clk, 0x25, false);
171 	case nv_clk_src_host:
172 		hsrc = (nvkm_rd32(device, 0xc040) & 0x30000000) >> 28;
173 		switch (hsrc) {
174 		case 0:
175 			return read_clk(clk, 0x1d, false);
176 		case 2:
177 		case 3:
178 			return 277000;
179 		default:
180 			nvkm_error(subdev, "unknown HOST clock source %d\n", hsrc);
181 			return -EINVAL;
182 		}
183 	default:
184 		nvkm_error(subdev, "invalid clock source %d\n", src);
185 		return -EINVAL;
186 	}
187 
188 	return 0;
189 }
190 
191 static int
gt215_clk_info(struct nvkm_clk * base,int idx,u32 khz,struct gt215_clk_info * info)192 gt215_clk_info(struct nvkm_clk *base, int idx, u32 khz,
193 	       struct gt215_clk_info *info)
194 {
195 	struct gt215_clk *clk = gt215_clk(base);
196 	u32 oclk, sclk, sdiv;
197 	s32 diff;
198 
199 	info->clk = 0;
200 
201 	switch (khz) {
202 	case 27000:
203 		info->clk = 0x00000100;
204 		return khz;
205 	case 100000:
206 		info->clk = 0x00002100;
207 		return khz;
208 	case 108000:
209 		info->clk = 0x00002140;
210 		return khz;
211 	default:
212 		sclk = read_vco(clk, idx);
213 		sdiv = min((sclk * 2) / khz, (u32)65);
214 		oclk = (sclk * 2) / sdiv;
215 		diff = ((khz + 3000) - oclk);
216 
217 		/* When imprecise, play it safe and aim for a clock lower than
218 		 * desired rather than higher */
219 		if (diff < 0) {
220 			sdiv++;
221 			oclk = (sclk * 2) / sdiv;
222 		}
223 
224 		/* divider can go as low as 2, limited here because NVIDIA
225 		 * and the VBIOS on my NVA8 seem to prefer using the PLL
226 		 * for 810MHz - is there a good reason?
227 		 * XXX: PLLs with refclk 810MHz?  */
228 		if (sdiv > 4) {
229 			info->clk = (((sdiv - 2) << 16) | 0x00003100);
230 			return oclk;
231 		}
232 
233 		break;
234 	}
235 
236 	return -ERANGE;
237 }
238 
239 int
gt215_pll_info(struct nvkm_clk * base,int idx,u32 pll,u32 khz,struct gt215_clk_info * info)240 gt215_pll_info(struct nvkm_clk *base, int idx, u32 pll, u32 khz,
241 	       struct gt215_clk_info *info)
242 {
243 	struct gt215_clk *clk = gt215_clk(base);
244 	struct nvkm_subdev *subdev = &clk->base.subdev;
245 	struct nvbios_pll limits;
246 	int P, N, M, diff;
247 	int ret;
248 
249 	info->pll = 0;
250 
251 	/* If we can get a within [-2, 3) MHz of a divider, we'll disable the
252 	 * PLL and use the divider instead. */
253 	ret = gt215_clk_info(&clk->base, idx, khz, info);
254 	diff = khz - ret;
255 	if (!pll || (diff >= -2000 && diff < 3000)) {
256 		goto out;
257 	}
258 
259 	/* Try with PLL */
260 	ret = nvbios_pll_parse(subdev->device->bios, pll, &limits);
261 	if (ret)
262 		return ret;
263 
264 	ret = gt215_clk_info(&clk->base, idx - 0x10, limits.refclk, info);
265 	if (ret != limits.refclk)
266 		return -EINVAL;
267 
268 	ret = gt215_pll_calc(subdev, &limits, khz, &N, NULL, &M, &P);
269 	if (ret >= 0) {
270 		info->pll = (P << 16) | (N << 8) | M;
271 	}
272 
273 out:
274 	info->fb_delay = max(((khz + 7566) / 15133), (u32) 18);
275 	return ret ? ret : -ERANGE;
276 }
277 
278 static int
calc_clk(struct gt215_clk * clk,struct nvkm_cstate * cstate,int idx,u32 pll,int dom)279 calc_clk(struct gt215_clk *clk, struct nvkm_cstate *cstate,
280 	 int idx, u32 pll, int dom)
281 {
282 	int ret = gt215_pll_info(&clk->base, idx, pll, cstate->domain[dom],
283 				 &clk->eng[dom]);
284 	if (ret >= 0)
285 		return 0;
286 	return ret;
287 }
288 
289 static int
calc_host(struct gt215_clk * clk,struct nvkm_cstate * cstate)290 calc_host(struct gt215_clk *clk, struct nvkm_cstate *cstate)
291 {
292 	int ret = 0;
293 	u32 kHz = cstate->domain[nv_clk_src_host];
294 	struct gt215_clk_info *info = &clk->eng[nv_clk_src_host];
295 
296 	if (kHz == 277000) {
297 		info->clk = 0;
298 		info->host_out = NVA3_HOST_277;
299 		return 0;
300 	}
301 
302 	info->host_out = NVA3_HOST_CLK;
303 
304 	ret = gt215_clk_info(&clk->base, 0x1d, kHz, info);
305 	if (ret >= 0)
306 		return 0;
307 
308 	return ret;
309 }
310 
311 int
gt215_clk_pre(struct nvkm_clk * clk,unsigned long * flags)312 gt215_clk_pre(struct nvkm_clk *clk, unsigned long *flags)
313 {
314 	struct nvkm_device *device = clk->subdev.device;
315 	struct nvkm_fifo *fifo = device->fifo;
316 
317 	/* halt and idle execution engines */
318 	nvkm_mask(device, 0x020060, 0x00070000, 0x00000000);
319 	nvkm_mask(device, 0x002504, 0x00000001, 0x00000001);
320 	/* Wait until the interrupt handler is finished */
321 	if (nvkm_msec(device, 2000,
322 		if (!nvkm_rd32(device, 0x000100))
323 			break;
324 	) < 0)
325 		return -EBUSY;
326 
327 	if (fifo)
328 		nvkm_fifo_pause(fifo, flags);
329 
330 	if (nvkm_msec(device, 2000,
331 		if (nvkm_rd32(device, 0x002504) & 0x00000010)
332 			break;
333 	) < 0)
334 		return -EIO;
335 
336 	if (nvkm_msec(device, 2000,
337 		u32 tmp = nvkm_rd32(device, 0x00251c) & 0x0000003f;
338 		if (tmp == 0x0000003f)
339 			break;
340 	) < 0)
341 		return -EIO;
342 
343 	return 0;
344 }
345 
346 void
gt215_clk_post(struct nvkm_clk * clk,unsigned long * flags)347 gt215_clk_post(struct nvkm_clk *clk, unsigned long *flags)
348 {
349 	struct nvkm_device *device = clk->subdev.device;
350 	struct nvkm_fifo *fifo = device->fifo;
351 
352 	if (fifo && flags)
353 		nvkm_fifo_start(fifo, flags);
354 
355 	nvkm_mask(device, 0x002504, 0x00000001, 0x00000000);
356 	nvkm_mask(device, 0x020060, 0x00070000, 0x00040000);
357 }
358 
359 static void
disable_clk_src(struct gt215_clk * clk,u32 src)360 disable_clk_src(struct gt215_clk *clk, u32 src)
361 {
362 	struct nvkm_device *device = clk->base.subdev.device;
363 	nvkm_mask(device, src, 0x00000100, 0x00000000);
364 	nvkm_mask(device, src, 0x00000001, 0x00000000);
365 }
366 
367 static void
prog_pll(struct gt215_clk * clk,int idx,u32 pll,int dom)368 prog_pll(struct gt215_clk *clk, int idx, u32 pll, int dom)
369 {
370 	struct gt215_clk_info *info = &clk->eng[dom];
371 	struct nvkm_device *device = clk->base.subdev.device;
372 	const u32 src0 = 0x004120 + (idx * 4);
373 	const u32 src1 = 0x004160 + (idx * 4);
374 	const u32 ctrl = pll + 0;
375 	const u32 coef = pll + 4;
376 	u32 bypass;
377 
378 	if (info->pll) {
379 		/* Always start from a non-PLL clock */
380 		bypass = nvkm_rd32(device, ctrl)  & 0x00000008;
381 		if (!bypass) {
382 			nvkm_mask(device, src1, 0x00000101, 0x00000101);
383 			nvkm_mask(device, ctrl, 0x00000008, 0x00000008);
384 			udelay(20);
385 		}
386 
387 		nvkm_mask(device, src0, 0x003f3141, 0x00000101 | info->clk);
388 		nvkm_wr32(device, coef, info->pll);
389 		nvkm_mask(device, ctrl, 0x00000015, 0x00000015);
390 		nvkm_mask(device, ctrl, 0x00000010, 0x00000000);
391 		if (nvkm_msec(device, 2000,
392 			if (nvkm_rd32(device, ctrl) & 0x00020000)
393 				break;
394 		) < 0) {
395 			nvkm_mask(device, ctrl, 0x00000010, 0x00000010);
396 			nvkm_mask(device, src0, 0x00000101, 0x00000000);
397 			return;
398 		}
399 		nvkm_mask(device, ctrl, 0x00000010, 0x00000010);
400 		nvkm_mask(device, ctrl, 0x00000008, 0x00000000);
401 		disable_clk_src(clk, src1);
402 	} else {
403 		nvkm_mask(device, src1, 0x003f3141, 0x00000101 | info->clk);
404 		nvkm_mask(device, ctrl, 0x00000018, 0x00000018);
405 		udelay(20);
406 		nvkm_mask(device, ctrl, 0x00000001, 0x00000000);
407 		disable_clk_src(clk, src0);
408 	}
409 }
410 
411 static void
prog_clk(struct gt215_clk * clk,int idx,int dom)412 prog_clk(struct gt215_clk *clk, int idx, int dom)
413 {
414 	struct gt215_clk_info *info = &clk->eng[dom];
415 	struct nvkm_device *device = clk->base.subdev.device;
416 	nvkm_mask(device, 0x004120 + (idx * 4), 0x003f3141, 0x00000101 | info->clk);
417 }
418 
419 static void
prog_host(struct gt215_clk * clk)420 prog_host(struct gt215_clk *clk)
421 {
422 	struct gt215_clk_info *info = &clk->eng[nv_clk_src_host];
423 	struct nvkm_device *device = clk->base.subdev.device;
424 	u32 hsrc = (nvkm_rd32(device, 0xc040));
425 
426 	switch (info->host_out) {
427 	case NVA3_HOST_277:
428 		if ((hsrc & 0x30000000) == 0) {
429 			nvkm_wr32(device, 0xc040, hsrc | 0x20000000);
430 			disable_clk_src(clk, 0x4194);
431 		}
432 		break;
433 	case NVA3_HOST_CLK:
434 		prog_clk(clk, 0x1d, nv_clk_src_host);
435 		if ((hsrc & 0x30000000) >= 0x20000000) {
436 			nvkm_wr32(device, 0xc040, hsrc & ~0x30000000);
437 		}
438 		break;
439 	default:
440 		break;
441 	}
442 
443 	/* This seems to be a clock gating factor on idle, always set to 64 */
444 	nvkm_wr32(device, 0xc044, 0x3e);
445 }
446 
447 static void
prog_core(struct gt215_clk * clk,int dom)448 prog_core(struct gt215_clk *clk, int dom)
449 {
450 	struct gt215_clk_info *info = &clk->eng[dom];
451 	struct nvkm_device *device = clk->base.subdev.device;
452 	u32 fb_delay = nvkm_rd32(device, 0x10002c);
453 
454 	if (fb_delay < info->fb_delay)
455 		nvkm_wr32(device, 0x10002c, info->fb_delay);
456 
457 	prog_pll(clk, 0x00, 0x004200, dom);
458 
459 	if (fb_delay > info->fb_delay)
460 		nvkm_wr32(device, 0x10002c, info->fb_delay);
461 }
462 
463 static int
gt215_clk_calc(struct nvkm_clk * base,struct nvkm_cstate * cstate)464 gt215_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
465 {
466 	struct gt215_clk *clk = gt215_clk(base);
467 	struct gt215_clk_info *core = &clk->eng[nv_clk_src_core];
468 	int ret;
469 
470 	if ((ret = calc_clk(clk, cstate, 0x10, 0x4200, nv_clk_src_core)) ||
471 	    (ret = calc_clk(clk, cstate, 0x11, 0x4220, nv_clk_src_shader)) ||
472 	    (ret = calc_clk(clk, cstate, 0x20, 0x0000, nv_clk_src_disp)) ||
473 	    (ret = calc_clk(clk, cstate, 0x21, 0x0000, nv_clk_src_vdec)) ||
474 	    (ret = calc_host(clk, cstate)))
475 		return ret;
476 
477 	/* XXX: Should be reading the highest bit in the VBIOS clock to decide
478 	 * whether to use a PLL or not... but using a PLL defeats the purpose */
479 	if (core->pll) {
480 		ret = gt215_clk_info(&clk->base, 0x10,
481 				     cstate->domain[nv_clk_src_core_intm],
482 				     &clk->eng[nv_clk_src_core_intm]);
483 		if (ret < 0)
484 			return ret;
485 	}
486 
487 	return 0;
488 }
489 
490 static int
gt215_clk_prog(struct nvkm_clk * base)491 gt215_clk_prog(struct nvkm_clk *base)
492 {
493 	struct gt215_clk *clk = gt215_clk(base);
494 	struct gt215_clk_info *core = &clk->eng[nv_clk_src_core];
495 	int ret = 0;
496 	unsigned long flags;
497 	unsigned long *f = &flags;
498 
499 	ret = gt215_clk_pre(&clk->base, f);
500 	if (ret)
501 		goto out;
502 
503 	if (core->pll)
504 		prog_core(clk, nv_clk_src_core_intm);
505 
506 	prog_core(clk,  nv_clk_src_core);
507 	prog_pll(clk, 0x01, 0x004220, nv_clk_src_shader);
508 	prog_clk(clk, 0x20, nv_clk_src_disp);
509 	prog_clk(clk, 0x21, nv_clk_src_vdec);
510 	prog_host(clk);
511 
512 out:
513 	if (ret == -EBUSY)
514 		f = NULL;
515 
516 	gt215_clk_post(&clk->base, f);
517 	return ret;
518 }
519 
520 static void
gt215_clk_tidy(struct nvkm_clk * base)521 gt215_clk_tidy(struct nvkm_clk *base)
522 {
523 }
524 
525 static const struct nvkm_clk_func
526 gt215_clk = {
527 	.read = gt215_clk_read,
528 	.calc = gt215_clk_calc,
529 	.prog = gt215_clk_prog,
530 	.tidy = gt215_clk_tidy,
531 	.domains = {
532 		{ nv_clk_src_crystal  , 0xff },
533 		{ nv_clk_src_core     , 0x00, 0, "core", 1000 },
534 		{ nv_clk_src_shader   , 0x01, 0, "shader", 1000 },
535 		{ nv_clk_src_mem      , 0x02, 0, "memory", 1000 },
536 		{ nv_clk_src_vdec     , 0x03 },
537 		{ nv_clk_src_disp     , 0x04 },
538 		{ nv_clk_src_host     , 0x05 },
539 		{ nv_clk_src_core_intm, 0x06 },
540 		{ nv_clk_src_max }
541 	}
542 };
543 
544 int
gt215_clk_new(struct nvkm_device * device,int index,struct nvkm_clk ** pclk)545 gt215_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
546 {
547 	struct gt215_clk *clk;
548 
549 	if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
550 		return -ENOMEM;
551 	*pclk = &clk->base;
552 
553 	return nvkm_clk_ctor(&gt215_clk, device, index, true, &clk->base);
554 }
555