xref: /netbsd-src/sys/dev/fdt/cpufreq_dt.c (revision d2f78e91308e5ce76ca3a58c82e68d5a45e5cd83)
1 /* $NetBSD: cpufreq_dt.c,v 1.19 2021/02/22 06:21:35 ryo Exp $ */
2 
3 /*-
4  * Copyright (c) 2015-2017 Jared McNeill <jmcneill@invisible.ca>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: cpufreq_dt.c,v 1.19 2021/02/22 06:21:35 ryo Exp $");
31 
32 #include <sys/param.h>
33 #include <sys/systm.h>
34 #include <sys/device.h>
35 #include <sys/kmem.h>
36 #include <sys/bus.h>
37 #include <sys/atomic.h>
38 #include <sys/xcall.h>
39 #include <sys/sysctl.h>
40 #include <sys/queue.h>
41 #include <sys/once.h>
42 #include <sys/cpu.h>
43 
44 #include <dev/fdt/fdtvar.h>
45 
46 struct cpufreq_dt_table {
47 	int			phandle;
48 	TAILQ_ENTRY(cpufreq_dt_table) next;
49 };
50 
51 static TAILQ_HEAD(, cpufreq_dt_table) cpufreq_dt_tables =
52     TAILQ_HEAD_INITIALIZER(cpufreq_dt_tables);
53 static kmutex_t cpufreq_dt_tables_lock;
54 
55 struct cpufreq_dt_opp {
56 	u_int			freq_khz;
57 	u_int			voltage_uv;
58 	u_int			latency_ns;
59 };
60 
61 struct cpufreq_dt_softc {
62 	device_t		sc_dev;
63 	int			sc_phandle;
64 	struct clk		*sc_clk;
65 	struct fdtbus_regulator	*sc_supply;
66 
67 	struct cpufreq_dt_opp	*sc_opp;
68 	ssize_t			sc_nopp;
69 
70 	u_int			sc_freq_target;
71 	bool			sc_freq_throttle;
72 
73 	u_int			sc_busy;
74 
75 	char			*sc_freq_available;
76 	int			sc_node_target;
77 	int			sc_node_current;
78 	int			sc_node_available;
79 
80 	struct cpufreq_dt_table	sc_table;
81 };
82 
83 static void
cpufreq_dt_change_cb(void * arg1,void * arg2)84 cpufreq_dt_change_cb(void *arg1, void *arg2)
85 {
86 	struct cpufreq_dt_softc * const sc = arg1;
87 	struct cpu_info *ci = curcpu();
88 
89 	ci->ci_data.cpu_cc_freq = clk_get_rate(sc->sc_clk);
90 }
91 
92 static int
cpufreq_dt_set_rate(struct cpufreq_dt_softc * sc,u_int freq_khz)93 cpufreq_dt_set_rate(struct cpufreq_dt_softc *sc, u_int freq_khz)
94 {
95 	struct cpufreq_dt_opp *opp = NULL;
96 	u_int old_rate, new_rate, old_uv, new_uv;
97 	uint64_t xc;
98 	int error;
99 	ssize_t n;
100 
101 	for (n = 0; n < sc->sc_nopp; n++)
102 		if (sc->sc_opp[n].freq_khz == freq_khz) {
103 			opp = &sc->sc_opp[n];
104 			break;
105 		}
106 	if (opp == NULL)
107 		return EINVAL;
108 
109 	old_rate = clk_get_rate(sc->sc_clk);
110 	new_rate = freq_khz * 1000;
111 	new_uv = opp->voltage_uv;
112 
113 	if (old_rate == new_rate)
114 		return 0;
115 
116 	if (sc->sc_supply != NULL) {
117 		error = fdtbus_regulator_get_voltage(sc->sc_supply, &old_uv);
118 		if (error != 0)
119 			return error;
120 
121 		if (new_uv > old_uv) {
122 			error = fdtbus_regulator_set_voltage(sc->sc_supply,
123 			    new_uv, new_uv);
124 			if (error != 0)
125 				return error;
126 		}
127 	}
128 
129 	error = clk_set_rate(sc->sc_clk, new_rate);
130 	if (error != 0)
131 		return error;
132 
133 	const u_int latency_us = howmany(opp->latency_ns, 1000);
134 	if (latency_us > 0)
135 		delay(latency_us);
136 
137 	if (sc->sc_supply != NULL) {
138 		if (new_uv < old_uv) {
139 			error = fdtbus_regulator_set_voltage(sc->sc_supply,
140 			    new_uv, new_uv);
141 			if (error != 0)
142 				return error;
143 		}
144 	}
145 
146 	if (error == 0) {
147 		xc = xc_broadcast(0, cpufreq_dt_change_cb, sc, NULL);
148 		xc_wait(xc);
149 
150 		pmf_event_inject(NULL, PMFE_SPEED_CHANGED);
151 	}
152 
153 	return 0;
154 }
155 
156 static void
cpufreq_dt_throttle_enable(device_t dev)157 cpufreq_dt_throttle_enable(device_t dev)
158 {
159 	struct cpufreq_dt_softc * const sc = device_private(dev);
160 
161 	if (sc->sc_freq_throttle)
162 		return;
163 
164 	const u_int freq_khz = sc->sc_opp[sc->sc_nopp - 1].freq_khz;
165 
166 	while (atomic_cas_uint(&sc->sc_busy, 0, 1) != 0)
167 		kpause("throttle", false, 1, NULL);
168 
169 	if (cpufreq_dt_set_rate(sc, freq_khz) == 0) {
170 		aprint_debug_dev(sc->sc_dev, "throttle enabled (%u.%03u MHz)\n",
171 		    freq_khz / 1000, freq_khz % 1000);
172 		sc->sc_freq_throttle = true;
173 		if (sc->sc_freq_target == 0)
174 			sc->sc_freq_target = clk_get_rate(sc->sc_clk) / 1000000;
175 	}
176 
177 	atomic_dec_uint(&sc->sc_busy);
178 }
179 
180 static void
cpufreq_dt_throttle_disable(device_t dev)181 cpufreq_dt_throttle_disable(device_t dev)
182 {
183 	struct cpufreq_dt_softc * const sc = device_private(dev);
184 
185 	if (!sc->sc_freq_throttle)
186 		return;
187 
188 	while (atomic_cas_uint(&sc->sc_busy, 0, 1) != 0)
189 		kpause("throttle", false, 1, NULL);
190 
191 	const u_int freq_khz = sc->sc_freq_target * 1000;
192 
193 	if (cpufreq_dt_set_rate(sc, freq_khz) == 0) {
194 		aprint_debug_dev(sc->sc_dev, "throttle disabled (%u.%03u MHz)\n",
195 		    freq_khz / 1000, freq_khz % 1000);
196 		sc->sc_freq_throttle = false;
197 	}
198 
199 	atomic_dec_uint(&sc->sc_busy);
200 }
201 
202 static int
cpufreq_dt_sysctl_helper(SYSCTLFN_ARGS)203 cpufreq_dt_sysctl_helper(SYSCTLFN_ARGS)
204 {
205 	struct cpufreq_dt_softc * const sc = rnode->sysctl_data;
206 	struct sysctlnode node;
207 	u_int fq, oldfq = 0;
208 	int error, n;
209 
210 	node = *rnode;
211 	node.sysctl_data = &fq;
212 
213 	if (rnode->sysctl_num == sc->sc_node_target) {
214 		if (sc->sc_freq_target == 0)
215 			sc->sc_freq_target = clk_get_rate(sc->sc_clk) / 1000000;
216 		fq = sc->sc_freq_target;
217 	} else
218 		fq = clk_get_rate(sc->sc_clk) / 1000000;
219 
220 	if (rnode->sysctl_num == sc->sc_node_target)
221 		oldfq = fq;
222 
223 	if (sc->sc_freq_target == 0)
224 		sc->sc_freq_target = fq;
225 
226 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
227 	if (error || newp == NULL)
228 		return error;
229 
230 	if (fq == oldfq || rnode->sysctl_num != sc->sc_node_target)
231 		return 0;
232 
233 	for (n = 0; n < sc->sc_nopp; n++)
234 		if (sc->sc_opp[n].freq_khz / 1000 == fq)
235 			break;
236 	if (n == sc->sc_nopp)
237 		return EINVAL;
238 
239 	if (atomic_cas_uint(&sc->sc_busy, 0, 1) != 0)
240 		return EBUSY;
241 
242 	sc->sc_freq_target = fq;
243 
244 	if (sc->sc_freq_throttle)
245 		error = 0;
246 	else
247 		error = cpufreq_dt_set_rate(sc, fq * 1000);
248 
249 	atomic_dec_uint(&sc->sc_busy);
250 
251 	return error;
252 }
253 
254 static struct cpu_info *
cpufreq_dt_cpu_lookup(cpuid_t mpidr)255 cpufreq_dt_cpu_lookup(cpuid_t mpidr)
256 {
257 	CPU_INFO_ITERATOR cii;
258 	struct cpu_info *ci;
259 
260 	for (CPU_INFO_FOREACH(cii, ci)) {
261 		if (ci->ci_cpuid == mpidr)
262 			return ci;
263 	}
264 
265 	return NULL;
266 }
267 
268 static void
cpufreq_dt_init_sysctl(struct cpufreq_dt_softc * sc)269 cpufreq_dt_init_sysctl(struct cpufreq_dt_softc *sc)
270 {
271 	const struct sysctlnode *node, *cpunode;
272 	struct sysctllog *cpufreq_log = NULL;
273 	struct cpu_info *ci;
274 	bus_addr_t mpidr;
275 	int error, i;
276 
277 	if (fdtbus_get_reg(sc->sc_phandle, 0, &mpidr, 0) != 0)
278 		return;
279 
280 	ci = cpufreq_dt_cpu_lookup(mpidr);
281 	if (ci == NULL)
282 		return;
283 
284 	sc->sc_freq_available = kmem_zalloc(strlen("XXXX ") * sc->sc_nopp, KM_SLEEP);
285 	for (i = 0; i < sc->sc_nopp; i++) {
286 		char buf[6];
287 		snprintf(buf, sizeof(buf), i ? " %u" : "%u", sc->sc_opp[i].freq_khz / 1000);
288 		strcat(sc->sc_freq_available, buf);
289 	}
290 
291 	error = sysctl_createv(&cpufreq_log, 0, NULL, &node,
292 	    CTLFLAG_PERMANENT, CTLTYPE_NODE, "machdep", NULL,
293 	    NULL, 0, NULL, 0, CTL_MACHDEP, CTL_EOL);
294 	if (error)
295 		goto sysctl_failed;
296 	error = sysctl_createv(&cpufreq_log, 0, &node, &node,
297 	    0, CTLTYPE_NODE, "cpufreq", NULL,
298 	    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
299 	if (error)
300 		goto sysctl_failed;
301 	error = sysctl_createv(&cpufreq_log, 0, &node, &cpunode,
302 	    0, CTLTYPE_NODE, cpu_name(ci), NULL,
303 	    NULL, 0, NULL, 0, CTL_CREATE, CTL_EOL);
304 	if (error)
305 		goto sysctl_failed;
306 
307 	error = sysctl_createv(&cpufreq_log, 0, &cpunode, &node,
308 	    CTLFLAG_READWRITE, CTLTYPE_INT, "target", NULL,
309 	    cpufreq_dt_sysctl_helper, 0, (void *)sc, 0,
310 	    CTL_CREATE, CTL_EOL);
311 	if (error)
312 		goto sysctl_failed;
313 	sc->sc_node_target = node->sysctl_num;
314 
315 	error = sysctl_createv(&cpufreq_log, 0, &cpunode, &node,
316 	    CTLFLAG_READWRITE, CTLTYPE_INT, "current", NULL,
317 	    cpufreq_dt_sysctl_helper, 0, (void *)sc, 0,
318 	    CTL_CREATE, CTL_EOL);
319 	if (error)
320 		goto sysctl_failed;
321 	sc->sc_node_current = node->sysctl_num;
322 
323 	error = sysctl_createv(&cpufreq_log, 0, &cpunode, &node,
324 	    0, CTLTYPE_STRING, "available", NULL,
325 	    NULL, 0, sc->sc_freq_available, 0,
326 	    CTL_CREATE, CTL_EOL);
327 	if (error)
328 		goto sysctl_failed;
329 	sc->sc_node_available = node->sysctl_num;
330 
331 	return;
332 
333 sysctl_failed:
334 	aprint_error_dev(sc->sc_dev, "couldn't create sysctl nodes: %d\n", error);
335 	sysctl_teardown(&cpufreq_log);
336 }
337 
338 static int
cpufreq_dt_parse_opp(struct cpufreq_dt_softc * sc)339 cpufreq_dt_parse_opp(struct cpufreq_dt_softc *sc)
340 {
341 	const int phandle = sc->sc_phandle;
342 	const u_int *opp;
343 	int len, i;
344 
345 	opp = fdtbus_get_prop(phandle, "operating-points", &len);
346 	if (len < 8)
347 		return ENXIO;
348 
349 	sc->sc_nopp = len / 8;
350 	sc->sc_opp = kmem_zalloc(sizeof(*sc->sc_opp) * sc->sc_nopp, KM_SLEEP);
351 	for (i = 0; i < sc->sc_nopp; i++, opp += 2) {
352 		sc->sc_opp[i].freq_khz = be32toh(opp[0]);
353 		sc->sc_opp[i].voltage_uv = be32toh(opp[1]);
354 	}
355 
356 	return 0;
357 }
358 
359 static const struct fdt_opp_info *
cpufreq_dt_lookup_opp_info(const int opp_table)360 cpufreq_dt_lookup_opp_info(const int opp_table)
361 {
362 	__link_set_decl(fdt_opps, struct fdt_opp_info);
363 	struct fdt_opp_info * const *opp;
364 	const struct fdt_opp_info *best_opp = NULL;
365 	int match, best_match = 0;
366 
367 	__link_set_foreach(opp, fdt_opps) {
368 		const struct device_compatible_entry compat_data[] = {
369 			{ .compat = (*opp)->opp_compat },
370 			DEVICE_COMPAT_EOL
371 		};
372 
373 		match = of_compatible_match(opp_table, compat_data);
374 		if (match > best_match) {
375 			best_match = match;
376 			best_opp = *opp;
377 		}
378 	}
379 
380 	return best_opp;
381 }
382 
383 static bool
cpufreq_dt_opp_v2_supported(const int opp_table,const int opp_node)384 cpufreq_dt_opp_v2_supported(const int opp_table, const int opp_node)
385 {
386 	return true;
387 }
388 
389 FDT_OPP(opp_v2, "operating-points-v2", cpufreq_dt_opp_v2_supported);
390 
391 static bool
cpufreq_dt_node_supported(const struct fdt_opp_info * opp_info,const int opp_table,const int opp_node)392 cpufreq_dt_node_supported(const struct fdt_opp_info *opp_info, const int opp_table, const int opp_node)
393 {
394 	if (!fdtbus_status_okay(opp_node))
395 		return false;
396 	if (of_hasprop(opp_node, "opp-suspend"))
397 		return false;
398 
399 	if (opp_info != NULL)
400 		return opp_info->opp_supported(opp_table, opp_node);
401 
402 	return false;
403 }
404 
405 static int
cpufreq_dt_parse_opp_v2(struct cpufreq_dt_softc * sc)406 cpufreq_dt_parse_opp_v2(struct cpufreq_dt_softc *sc)
407 {
408 	const int phandle = sc->sc_phandle;
409 	struct cpufreq_dt_table *table;
410 	const struct fdt_opp_info *opp_info;
411 	const u_int *opp_uv;
412 	uint64_t opp_hz;
413 	int opp_node, len, i, index;
414 
415 	const int opp_table = fdtbus_get_phandle(phandle, "operating-points-v2");
416 	if (opp_table < 0)
417 		return ENOENT;
418 
419 	/* If the table is shared, only setup a single instance */
420 	if (of_hasprop(opp_table, "opp-shared")) {
421 		TAILQ_FOREACH(table, &cpufreq_dt_tables, next)
422 			if (table->phandle == opp_table)
423 				return EEXIST;
424 		sc->sc_table.phandle = opp_table;
425 		TAILQ_INSERT_TAIL(&cpufreq_dt_tables, &sc->sc_table, next);
426 	}
427 
428 	opp_info = cpufreq_dt_lookup_opp_info(opp_table);
429 
430 	for (opp_node = OF_child(opp_table); opp_node; opp_node = OF_peer(opp_node)) {
431 		if (!cpufreq_dt_node_supported(opp_info, opp_table, opp_node))
432 			continue;
433 		sc->sc_nopp++;
434 	}
435 
436 	if (sc->sc_nopp == 0)
437 		return EINVAL;
438 
439 	sc->sc_opp = kmem_zalloc(sizeof(*sc->sc_opp) * sc->sc_nopp, KM_SLEEP);
440 	index = sc->sc_nopp - 1;
441 	for (opp_node = OF_child(opp_table), i = 0; opp_node; opp_node = OF_peer(opp_node), i++) {
442 		if (!cpufreq_dt_node_supported(opp_info, opp_table, opp_node))
443 			continue;
444 		if (of_getprop_uint64(opp_node, "opp-hz", &opp_hz) != 0)
445 			return EINVAL;
446 		opp_uv = fdtbus_get_prop(opp_node, "opp-microvolt", &len);
447 		if (opp_uv == NULL || len < 1)
448 			return EINVAL;
449 		/* Table is in reverse order */
450 		sc->sc_opp[index].freq_khz = (u_int)(opp_hz / 1000);
451 		sc->sc_opp[index].voltage_uv = be32toh(opp_uv[0]);
452 		of_getprop_uint32(opp_node, "clock-latency-ns", &sc->sc_opp[index].latency_ns);
453 		--index;
454 	}
455 
456 	return 0;
457 }
458 
459 static int
cpufreq_dt_parse(struct cpufreq_dt_softc * sc)460 cpufreq_dt_parse(struct cpufreq_dt_softc *sc)
461 {
462 	const int phandle = sc->sc_phandle;
463 	int error, i;
464 
465 	if (of_hasprop(phandle, "cpu-supply")) {
466 		sc->sc_supply = fdtbus_regulator_acquire(phandle, "cpu-supply");
467 		if (sc->sc_supply == NULL) {
468 			aprint_error_dev(sc->sc_dev,
469 			    "couldn't acquire cpu-supply\n");
470 			return ENXIO;
471 		}
472 	}
473 	sc->sc_clk = fdtbus_clock_get_index(phandle, 0);
474 	if (sc->sc_clk == NULL) {
475 		aprint_error_dev(sc->sc_dev, "couldn't acquire clock\n");
476 		return ENXIO;
477 	}
478 
479 	mutex_enter(&cpufreq_dt_tables_lock);
480 	if (of_hasprop(phandle, "operating-points"))
481 		error = cpufreq_dt_parse_opp(sc);
482 	else if (of_hasprop(phandle, "operating-points-v2"))
483 		error = cpufreq_dt_parse_opp_v2(sc);
484 	else
485 		error = EINVAL;
486 	mutex_exit(&cpufreq_dt_tables_lock);
487 
488 	if (error) {
489 		if (error != EEXIST)
490 			aprint_error_dev(sc->sc_dev,
491 			    "couldn't parse operating points: %d\n", error);
492 		return error;
493 	}
494 
495 	for (i = 0; i < sc->sc_nopp; i++) {
496 		aprint_debug_dev(sc->sc_dev, "supported rate: %u.%03u MHz, %u uV\n",
497 		    sc->sc_opp[i].freq_khz / 1000,
498 		    sc->sc_opp[i].freq_khz % 1000,
499 		    sc->sc_opp[i].voltage_uv);
500 	}
501 
502 	return 0;
503 }
504 
505 static int
cpufreq_dt_match(device_t parent,cfdata_t cf,void * aux)506 cpufreq_dt_match(device_t parent, cfdata_t cf, void *aux)
507 {
508 	struct fdt_attach_args * const faa = aux;
509 	const int phandle = faa->faa_phandle;
510 	bus_addr_t addr;
511 
512 	if (fdtbus_get_reg(phandle, 0, &addr, NULL) != 0)
513 		return 0;
514 
515 	if (!of_hasprop(phandle, "clocks"))
516 		return 0;
517 
518 	if (!of_hasprop(phandle, "operating-points") &&
519 	    !of_hasprop(phandle, "operating-points-v2"))
520 		return 0;
521 
522 	return 1;
523 }
524 
525 static void
cpufreq_dt_init(device_t self)526 cpufreq_dt_init(device_t self)
527 {
528 	struct cpufreq_dt_softc * const sc = device_private(self);
529 	int error;
530 
531 	if ((error = cpufreq_dt_parse(sc)) != 0)
532 		return;
533 
534 	pmf_event_register(sc->sc_dev, PMFE_THROTTLE_ENABLE, cpufreq_dt_throttle_enable, true);
535 	pmf_event_register(sc->sc_dev, PMFE_THROTTLE_DISABLE, cpufreq_dt_throttle_disable, true);
536 
537 	cpufreq_dt_init_sysctl(sc);
538 
539 	if (sc->sc_nopp > 0) {
540 		struct cpufreq_dt_opp * const opp = &sc->sc_opp[0];
541 
542 		aprint_normal_dev(sc->sc_dev, "rate: %u.%03u MHz, %u uV\n",
543 		    opp->freq_khz / 1000, opp->freq_khz % 1000, opp->voltage_uv);
544 		cpufreq_dt_set_rate(sc, opp->freq_khz);
545 	}
546 }
547 
548 static int
cpufreq_dt_lock_init(void)549 cpufreq_dt_lock_init(void)
550 {
551 	mutex_init(&cpufreq_dt_tables_lock, MUTEX_DEFAULT, IPL_NONE);
552 	return 0;
553 }
554 
555 static void
cpufreq_dt_attach(device_t parent,device_t self,void * aux)556 cpufreq_dt_attach(device_t parent, device_t self, void *aux)
557 {
558 	static ONCE_DECL(locks);
559 	struct cpufreq_dt_softc * const sc = device_private(self);
560 	struct fdt_attach_args * const faa = aux;
561 
562 	RUN_ONCE(&locks, cpufreq_dt_lock_init);
563 
564 	sc->sc_dev = self;
565 	sc->sc_phandle = faa->faa_phandle;
566 
567 	aprint_naive("\n");
568 	aprint_normal("\n");
569 
570 	config_interrupts(self, cpufreq_dt_init);
571 }
572 
573 CFATTACH_DECL_NEW(cpufreq_dt, sizeof(struct cpufreq_dt_softc),
574     cpufreq_dt_match, cpufreq_dt_attach, NULL, NULL);
575