xref: /netbsd-src/sys/external/bsd/drm2/dist/drm/nouveau/nvkm/subdev/clk/nouveau_nvkm_subdev_clk_mcp77.c (revision 41ec02673d281bbb3d38e6c78504ce6e30c228c1)
1 /*	$NetBSD: nouveau_nvkm_subdev_clk_mcp77.c,v 1.3 2021/12/18 23:45:39 riastradh Exp $	*/
2 
3 /*
4  * Copyright 2012 Red Hat Inc.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors: Ben Skeggs
25  */
26 #include <sys/cdefs.h>
27 __KERNEL_RCSID(0, "$NetBSD: nouveau_nvkm_subdev_clk_mcp77.c,v 1.3 2021/12/18 23:45:39 riastradh Exp $");
28 
29 #define mcp77_clk(p) container_of((p), struct mcp77_clk, base)
30 #include "gt215.h"
31 #include "pll.h"
32 
33 #include <subdev/bios.h>
34 #include <subdev/bios/pll.h>
35 #include <subdev/timer.h>
36 
37 struct mcp77_clk {
38 	struct nvkm_clk base;
39 	enum nv_clk_src csrc, ssrc, vsrc;
40 	u32 cctrl, sctrl;
41 	u32 ccoef, scoef;
42 	u32 cpost, spost;
43 	u32 vdiv;
44 };
45 
46 static u32
read_div(struct mcp77_clk * clk)47 read_div(struct mcp77_clk *clk)
48 {
49 	struct nvkm_device *device = clk->base.subdev.device;
50 	return nvkm_rd32(device, 0x004600);
51 }
52 
53 static u32
read_pll(struct mcp77_clk * clk,u32 base)54 read_pll(struct mcp77_clk *clk, u32 base)
55 {
56 	struct nvkm_device *device = clk->base.subdev.device;
57 	u32 ctrl = nvkm_rd32(device, base + 0);
58 	u32 coef = nvkm_rd32(device, base + 4);
59 	u32 ref = nvkm_clk_read(&clk->base, nv_clk_src_href);
60 	u32 post_div = 0;
61 	u32 clock = 0;
62 	int N1, M1;
63 
64 	switch (base){
65 	case 0x4020:
66 		post_div = 1 << ((nvkm_rd32(device, 0x4070) & 0x000f0000) >> 16);
67 		break;
68 	case 0x4028:
69 		post_div = (nvkm_rd32(device, 0x4040) & 0x000f0000) >> 16;
70 		break;
71 	default:
72 		break;
73 	}
74 
75 	N1 = (coef & 0x0000ff00) >> 8;
76 	M1 = (coef & 0x000000ff);
77 	if ((ctrl & 0x80000000) && M1) {
78 		clock = ref * N1 / M1;
79 		clock = clock / post_div;
80 	}
81 
82 	return clock;
83 }
84 
85 static int
mcp77_clk_read(struct nvkm_clk * base,enum nv_clk_src src)86 mcp77_clk_read(struct nvkm_clk *base, enum nv_clk_src src)
87 {
88 	struct mcp77_clk *clk = mcp77_clk(base);
89 	struct nvkm_subdev *subdev = &clk->base.subdev;
90 	struct nvkm_device *device = subdev->device;
91 	u32 mast = nvkm_rd32(device, 0x00c054);
92 	u32 P = 0;
93 
94 	switch (src) {
95 	case nv_clk_src_crystal:
96 		return device->crystal;
97 	case nv_clk_src_href:
98 		return 100000; /* PCIE reference clock */
99 	case nv_clk_src_hclkm4:
100 		return nvkm_clk_read(&clk->base, nv_clk_src_href) * 4;
101 	case nv_clk_src_hclkm2d3:
102 		return nvkm_clk_read(&clk->base, nv_clk_src_href) * 2 / 3;
103 	case nv_clk_src_host:
104 		switch (mast & 0x000c0000) {
105 		case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm2d3);
106 		case 0x00040000: break;
107 		case 0x00080000: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm4);
108 		case 0x000c0000: return nvkm_clk_read(&clk->base, nv_clk_src_cclk);
109 		}
110 		break;
111 	case nv_clk_src_core:
112 		P = (nvkm_rd32(device, 0x004028) & 0x00070000) >> 16;
113 
114 		switch (mast & 0x00000003) {
115 		case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P;
116 		case 0x00000001: return 0;
117 		case 0x00000002: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm4) >> P;
118 		case 0x00000003: return read_pll(clk, 0x004028) >> P;
119 		}
120 		break;
121 	case nv_clk_src_cclk:
122 		if ((mast & 0x03000000) != 0x03000000)
123 			return nvkm_clk_read(&clk->base, nv_clk_src_core);
124 
125 		if ((mast & 0x00000200) == 0x00000000)
126 			return nvkm_clk_read(&clk->base, nv_clk_src_core);
127 
128 		switch (mast & 0x00000c00) {
129 		case 0x00000000: return nvkm_clk_read(&clk->base, nv_clk_src_href);
130 		case 0x00000400: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm4);
131 		case 0x00000800: return nvkm_clk_read(&clk->base, nv_clk_src_hclkm2d3);
132 		default: return 0;
133 		}
134 	case nv_clk_src_shader:
135 		P = (nvkm_rd32(device, 0x004020) & 0x00070000) >> 16;
136 		switch (mast & 0x00000030) {
137 		case 0x00000000:
138 			if (mast & 0x00000040)
139 				return nvkm_clk_read(&clk->base, nv_clk_src_href) >> P;
140 			return nvkm_clk_read(&clk->base, nv_clk_src_crystal) >> P;
141 		case 0x00000010: break;
142 		case 0x00000020: return read_pll(clk, 0x004028) >> P;
143 		case 0x00000030: return read_pll(clk, 0x004020) >> P;
144 		}
145 		break;
146 	case nv_clk_src_mem:
147 		return 0;
148 		break;
149 	case nv_clk_src_vdec:
150 		P = (read_div(clk) & 0x00000700) >> 8;
151 
152 		switch (mast & 0x00400000) {
153 		case 0x00400000:
154 			return nvkm_clk_read(&clk->base, nv_clk_src_core) >> P;
155 			break;
156 		default:
157 			return 500000 >> P;
158 			break;
159 		}
160 		break;
161 	default:
162 		break;
163 	}
164 
165 	nvkm_debug(subdev, "unknown clock source %d %08x\n", src, mast);
166 	return 0;
167 }
168 
169 static u32
calc_pll(struct mcp77_clk * clk,u32 reg,u32 clock,int * N,int * M,int * P)170 calc_pll(struct mcp77_clk *clk, u32 reg,
171 	 u32 clock, int *N, int *M, int *P)
172 {
173 	struct nvkm_subdev *subdev = &clk->base.subdev;
174 	struct nvbios_pll pll;
175 	int ret;
176 
177 	ret = nvbios_pll_parse(subdev->device->bios, reg, &pll);
178 	if (ret)
179 		return 0;
180 
181 	pll.vco2.max_freq = 0;
182 	pll.refclk = nvkm_clk_read(&clk->base, nv_clk_src_href);
183 	if (!pll.refclk)
184 		return 0;
185 
186 	return nv04_pll_calc(subdev, &pll, clock, N, M, NULL, NULL, P);
187 }
188 
189 static inline u32
calc_P(u32 src,u32 target,int * div)190 calc_P(u32 src, u32 target, int *div)
191 {
192 	u32 clk0 = src, clk1 = src;
193 	for (*div = 0; *div <= 7; (*div)++) {
194 		if (clk0 <= target) {
195 			clk1 = clk0 << (*div ? 1 : 0);
196 			break;
197 		}
198 		clk0 >>= 1;
199 	}
200 
201 	if (target - clk0 <= clk1 - target)
202 		return clk0;
203 	(*div)--;
204 	return clk1;
205 }
206 
207 static int
mcp77_clk_calc(struct nvkm_clk * base,struct nvkm_cstate * cstate)208 mcp77_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate)
209 {
210 	struct mcp77_clk *clk = mcp77_clk(base);
211 	const int shader = cstate->domain[nv_clk_src_shader];
212 	const int core = cstate->domain[nv_clk_src_core];
213 	const int vdec = cstate->domain[nv_clk_src_vdec];
214 	struct nvkm_subdev *subdev = &clk->base.subdev;
215 	u32 out = 0, clock = 0;
216 	int N, M, P1, P2 = 0;
217 	int divs = 0;
218 
219 	/* cclk: find suitable source, disable PLL if we can */
220 	if (core < nvkm_clk_read(&clk->base, nv_clk_src_hclkm4))
221 		out = calc_P(nvkm_clk_read(&clk->base, nv_clk_src_hclkm4), core, &divs);
222 
223 	/* Calculate clock * 2, so shader clock can use it too */
224 	clock = calc_pll(clk, 0x4028, (core << 1), &N, &M, &P1);
225 
226 	if (abs(core - out) <= abs(core - (clock >> 1))) {
227 		clk->csrc = nv_clk_src_hclkm4;
228 		clk->cctrl = divs << 16;
229 	} else {
230 		/* NVCTRL is actually used _after_ NVPOST, and after what we
231 		 * call NVPLL. To make matters worse, NVPOST is an integer
232 		 * divider instead of a right-shift number. */
233 		if(P1 > 2) {
234 			P2 = P1 - 2;
235 			P1 = 2;
236 		}
237 
238 		clk->csrc = nv_clk_src_core;
239 		clk->ccoef = (N << 8) | M;
240 
241 		clk->cctrl = (P2 + 1) << 16;
242 		clk->cpost = (1 << P1) << 16;
243 	}
244 
245 	/* sclk: nvpll + divisor, href or spll */
246 	out = 0;
247 	if (shader == nvkm_clk_read(&clk->base, nv_clk_src_href)) {
248 		clk->ssrc = nv_clk_src_href;
249 	} else {
250 		clock = calc_pll(clk, 0x4020, shader, &N, &M, &P1);
251 		if (clk->csrc == nv_clk_src_core)
252 			out = calc_P((core << 1), shader, &divs);
253 
254 		if (abs(shader - out) <=
255 		    abs(shader - clock) &&
256 		   (divs + P2) <= 7) {
257 			clk->ssrc = nv_clk_src_core;
258 			clk->sctrl = (divs + P2) << 16;
259 		} else {
260 			clk->ssrc = nv_clk_src_shader;
261 			clk->scoef = (N << 8) | M;
262 			clk->sctrl = P1 << 16;
263 		}
264 	}
265 
266 	/* vclk */
267 	out = calc_P(core, vdec, &divs);
268 	clock = calc_P(500000, vdec, &P1);
269 	if(abs(vdec - out) <= abs(vdec - clock)) {
270 		clk->vsrc = nv_clk_src_cclk;
271 		clk->vdiv = divs << 16;
272 	} else {
273 		clk->vsrc = nv_clk_src_vdec;
274 		clk->vdiv = P1 << 16;
275 	}
276 
277 	/* Print strategy! */
278 	nvkm_debug(subdev, "nvpll: %08x %08x %08x\n",
279 		   clk->ccoef, clk->cpost, clk->cctrl);
280 	nvkm_debug(subdev, " spll: %08x %08x %08x\n",
281 		   clk->scoef, clk->spost, clk->sctrl);
282 	nvkm_debug(subdev, " vdiv: %08x\n", clk->vdiv);
283 	if (clk->csrc == nv_clk_src_hclkm4)
284 		nvkm_debug(subdev, "core: hrefm4\n");
285 	else
286 		nvkm_debug(subdev, "core: nvpll\n");
287 
288 	if (clk->ssrc == nv_clk_src_hclkm4)
289 		nvkm_debug(subdev, "shader: hrefm4\n");
290 	else if (clk->ssrc == nv_clk_src_core)
291 		nvkm_debug(subdev, "shader: nvpll\n");
292 	else
293 		nvkm_debug(subdev, "shader: spll\n");
294 
295 	if (clk->vsrc == nv_clk_src_hclkm4)
296 		nvkm_debug(subdev, "vdec: 500MHz\n");
297 	else
298 		nvkm_debug(subdev, "vdec: core\n");
299 
300 	return 0;
301 }
302 
303 static int
mcp77_clk_prog(struct nvkm_clk * base)304 mcp77_clk_prog(struct nvkm_clk *base)
305 {
306 	struct mcp77_clk *clk = mcp77_clk(base);
307 	struct nvkm_subdev *subdev = &clk->base.subdev;
308 	struct nvkm_device *device = subdev->device;
309 	u32 pllmask = 0, mast;
310 	unsigned long flags;
311 	unsigned long *f = &flags;
312 	int ret = 0;
313 
314 	ret = gt215_clk_pre(&clk->base, f);
315 	if (ret)
316 		goto out;
317 
318 	/* First switch to safe clocks: href */
319 	mast = nvkm_mask(device, 0xc054, 0x03400e70, 0x03400640);
320 	mast &= ~0x00400e73;
321 	mast |= 0x03000000;
322 
323 	switch (clk->csrc) {
324 	case nv_clk_src_hclkm4:
325 		nvkm_mask(device, 0x4028, 0x00070000, clk->cctrl);
326 		mast |= 0x00000002;
327 		break;
328 	case nv_clk_src_core:
329 		nvkm_wr32(device, 0x402c, clk->ccoef);
330 		nvkm_wr32(device, 0x4028, 0x80000000 | clk->cctrl);
331 		nvkm_wr32(device, 0x4040, clk->cpost);
332 		pllmask |= (0x3 << 8);
333 		mast |= 0x00000003;
334 		break;
335 	default:
336 		nvkm_warn(subdev, "Reclocking failed: unknown core clock\n");
337 		goto resume;
338 	}
339 
340 	switch (clk->ssrc) {
341 	case nv_clk_src_href:
342 		nvkm_mask(device, 0x4020, 0x00070000, 0x00000000);
343 		/* mast |= 0x00000000; */
344 		break;
345 	case nv_clk_src_core:
346 		nvkm_mask(device, 0x4020, 0x00070000, clk->sctrl);
347 		mast |= 0x00000020;
348 		break;
349 	case nv_clk_src_shader:
350 		nvkm_wr32(device, 0x4024, clk->scoef);
351 		nvkm_wr32(device, 0x4020, 0x80000000 | clk->sctrl);
352 		nvkm_wr32(device, 0x4070, clk->spost);
353 		pllmask |= (0x3 << 12);
354 		mast |= 0x00000030;
355 		break;
356 	default:
357 		nvkm_warn(subdev, "Reclocking failed: unknown sclk clock\n");
358 		goto resume;
359 	}
360 
361 	if (nvkm_msec(device, 2000,
362 		u32 tmp = nvkm_rd32(device, 0x004080) & pllmask;
363 		if (tmp == pllmask)
364 			break;
365 	) < 0)
366 		goto resume;
367 
368 	switch (clk->vsrc) {
369 	case nv_clk_src_cclk:
370 		mast |= 0x00400000;
371 		/* fall through */
372 	default:
373 		nvkm_wr32(device, 0x4600, clk->vdiv);
374 	}
375 
376 	nvkm_wr32(device, 0xc054, mast);
377 
378 resume:
379 	/* Disable some PLLs and dividers when unused */
380 	if (clk->csrc != nv_clk_src_core) {
381 		nvkm_wr32(device, 0x4040, 0x00000000);
382 		nvkm_mask(device, 0x4028, 0x80000000, 0x00000000);
383 	}
384 
385 	if (clk->ssrc != nv_clk_src_shader) {
386 		nvkm_wr32(device, 0x4070, 0x00000000);
387 		nvkm_mask(device, 0x4020, 0x80000000, 0x00000000);
388 	}
389 
390 out:
391 	if (ret == -EBUSY)
392 		f = NULL;
393 
394 	gt215_clk_post(&clk->base, f);
395 	return ret;
396 }
397 
398 static void
mcp77_clk_tidy(struct nvkm_clk * base)399 mcp77_clk_tidy(struct nvkm_clk *base)
400 {
401 }
402 
403 static const struct nvkm_clk_func
404 mcp77_clk = {
405 	.read = mcp77_clk_read,
406 	.calc = mcp77_clk_calc,
407 	.prog = mcp77_clk_prog,
408 	.tidy = mcp77_clk_tidy,
409 	.domains = {
410 		{ nv_clk_src_crystal, 0xff },
411 		{ nv_clk_src_href   , 0xff },
412 		{ nv_clk_src_core   , 0xff, 0, "core", 1000 },
413 		{ nv_clk_src_shader , 0xff, 0, "shader", 1000 },
414 		{ nv_clk_src_vdec   , 0xff, 0, "vdec", 1000 },
415 		{ nv_clk_src_max }
416 	}
417 };
418 
419 int
mcp77_clk_new(struct nvkm_device * device,int index,struct nvkm_clk ** pclk)420 mcp77_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk)
421 {
422 	struct mcp77_clk *clk;
423 
424 	if (!(clk = kzalloc(sizeof(*clk), GFP_KERNEL)))
425 		return -ENOMEM;
426 	*pclk = &clk->base;
427 
428 	return nvkm_clk_ctor(&mcp77_clk, device, index, true, &clk->base);
429 }
430