xref: /netbsd-src/sys/arch/arm/cortex/gtmr.c (revision 122b5006ee1bd67145794b4cde92f4fe4781a5ec)
1 /*	$NetBSD: gtmr.c,v 1.46 2021/10/31 16:23:47 skrll Exp $	*/
2 
3 /*-
4  * Copyright (c) 2012 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Matt Thomas
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: gtmr.c,v 1.46 2021/10/31 16:23:47 skrll Exp $");
34 
35 #include <sys/param.h>
36 #include <sys/bus.h>
37 #include <sys/device.h>
38 #include <sys/intr.h>
39 #include <sys/kernel.h>
40 #include <sys/percpu.h>
41 #include <sys/proc.h>
42 #include <sys/systm.h>
43 #include <sys/timetc.h>
44 #include <sys/cpu.h>
45 
46 #include <prop/proplib.h>
47 
48 #include <arm/locore.h>
49 #include <arm/cpufunc.h>
50 
51 #include <arm/cortex/gtmr_var.h>
52 #include <arm/cortex/mpcore_var.h>
53 
54 static int gtmr_match(device_t, cfdata_t, void *);
55 static void gtmr_attach(device_t, device_t, void *);
56 
57 static u_int gtmr_get_timecount(struct timecounter *);
58 
59 static uint64_t gtmr_read_cntct(struct gtmr_softc *);
60 static uint32_t gtmr_read_ctl(struct gtmr_softc *);
61 static void gtmr_write_ctl(struct gtmr_softc *, uint32_t);
62 static void gtmr_write_tval(struct gtmr_softc *, uint32_t);
63 static void gtmr_write_cval(struct gtmr_softc *, uint64_t);
64 
65 static struct gtmr_softc gtmr_sc;
66 
67 struct gtmr_percpu {
68 	uint32_t pc_delta;
69 };
70 
71 static struct timecounter gtmr_timecounter = {
72 	.tc_get_timecount = gtmr_get_timecount,
73 	.tc_poll_pps = 0,
74 	.tc_counter_mask = ~0u,
75 	.tc_frequency = 0,			/* set by cpu_initclocks() */
76 	.tc_name = NULL,			/* set by attach */
77 	.tc_quality = 500,
78 	.tc_priv = &gtmr_sc,
79 	.tc_next = NULL,
80 };
81 
82 CFATTACH_DECL_NEW(armgtmr, 0, gtmr_match, gtmr_attach, NULL, NULL);
83 
84 /* ARGSUSED */
85 static int
86 gtmr_match(device_t parent, cfdata_t cf, void *aux)
87 {
88 	struct mpcore_attach_args * const mpcaa = aux;
89 
90 	if (gtmr_sc.sc_dev != NULL)
91 		return 0;
92 
93 	/* Generic Timer is always implemented in ARMv8-A */
94 	if (!cpu_gtmr_exists_p())
95 		return 0;
96 
97 	if (strcmp(mpcaa->mpcaa_name, cf->cf_name) != 0)
98 		return 0;
99 
100 	return 1;
101 }
102 
103 static void
104 gtmr_attach(device_t parent, device_t self, void *aux)
105 {
106 	struct mpcore_attach_args * const mpcaa = aux;
107 	struct gtmr_softc *sc = &gtmr_sc;
108 	prop_dictionary_t dict = device_properties(self);
109 	prop_dictionary_t pdict = device_properties(device_parent(self));
110 	char freqbuf[sizeof("X.XXX SHz")];
111 	bool flag;
112 
113 	/*
114 	 * This runs at a fixed frequency of 1 to 50MHz.
115 	 */
116 	if (!prop_dictionary_get_uint32(dict, "frequency", &sc->sc_freq))
117 		sc->sc_freq = gtmr_cntfrq_read();
118 
119 	if (!prop_dictionary_get_bool(dict, "physical", &sc->sc_physical))
120 	    prop_dictionary_get_bool(pdict, "physical", &sc->sc_physical);
121 
122 	KASSERT(sc->sc_freq != 0);
123 
124 	humanize_number(freqbuf, sizeof(freqbuf), sc->sc_freq, "Hz", 1000);
125 
126 	aprint_naive("\n");
127 	aprint_normal(": Generic Timer (%s, %s)\n", freqbuf,
128 	    sc->sc_physical ? "physical" : "virtual");
129 
130 	if (prop_dictionary_get_bool(dict, "sun50i-a64-unstable-timer", &flag) && flag) {
131 		sc->sc_flags |= GTMR_FLAG_SUN50I_A64_UNSTABLE_TIMER;
132 		aprint_debug_dev(self, "enabling Allwinner A64 timer workaround\n");
133 	}
134 
135 	self->dv_private = sc;
136 	sc->sc_dev = self;
137 
138 #ifdef DIAGNOSTIC
139 	sc->sc_percpu = percpu_alloc(sizeof(struct gtmr_percpu));
140 #endif
141 
142 	evcnt_attach_dynamic(&sc->sc_ev_missing_ticks, EVCNT_TYPE_MISC, NULL,
143 	    device_xname(self), "missing interrupts");
144 
145 	if (mpcaa->mpcaa_irq != -1) {
146 		sc->sc_global_ih = intr_establish(mpcaa->mpcaa_irq, IPL_CLOCK,
147 		    IST_LEVEL | IST_MPSAFE, gtmr_intr, NULL);
148 		if (sc->sc_global_ih == NULL)
149 			panic("%s: unable to register timer interrupt", __func__);
150 		aprint_normal_dev(self, "interrupting on irq %d\n",
151 		    mpcaa->mpcaa_irq);
152 	}
153 
154 	const uint32_t cnt_frq = gtmr_cntfrq_read();
155 	if (cnt_frq == 0) {
156 		aprint_verbose_dev(self, "cp15 CNT_FRQ not set\n");
157 	} else if (cnt_frq != sc->sc_freq) {
158 		aprint_verbose_dev(self,
159 		    "cp15 CNT_FRQ (%u) differs from supplied frequency\n",
160 		    cnt_frq);
161 	}
162 
163 	gtmr_timecounter.tc_name = device_xname(sc->sc_dev);
164 	gtmr_timecounter.tc_frequency = sc->sc_freq;
165 	gtmr_timecounter.tc_priv = sc;
166 
167 	tc_init(&gtmr_timecounter);
168 
169 	/* Disable the timer until we are ready */
170 	gtmr_write_ctl(sc, 0);
171 }
172 
173 static uint64_t
174 gtmr_read_cntct(struct gtmr_softc *sc)
175 {
176 	isb();
177 
178 	if (ISSET(sc->sc_flags, GTMR_FLAG_SUN50I_A64_UNSTABLE_TIMER)) {
179 		/*
180 		 * The Allwinner A64 SoC has an unstable architectural timer.
181 		 * To workaround this problem, ignore reads where the lower
182 		 * 10 bits are all 0s or 1s.
183 		 */
184 		uint64_t val;
185 		u_int bits;
186 		do {
187 			val = sc->sc_physical ? gtmr_cntpct_read() : gtmr_cntvct_read();
188 			bits = val & __BITS(9,0);
189 		} while (bits == 0 || bits == __BITS(9,0));
190 		return val;
191 	}
192 
193 	return sc->sc_physical ? gtmr_cntpct_read() : gtmr_cntvct_read();
194 }
195 
196 static uint32_t
197 gtmr_read_ctl(struct gtmr_softc *sc)
198 {
199 	isb();
200 
201 	if (sc->sc_physical)
202 		return gtmr_cntp_ctl_read();
203 	else
204 		return gtmr_cntv_ctl_read();
205 }
206 
207 static void
208 gtmr_write_ctl(struct gtmr_softc *sc, uint32_t val)
209 {
210 	if (sc->sc_physical)
211 		gtmr_cntp_ctl_write(val);
212 	else
213 		gtmr_cntv_ctl_write(val);
214 
215 	isb();
216 }
217 
218 static void
219 gtmr_write_tval(struct gtmr_softc *sc, uint32_t val)
220 {
221 	if (sc->sc_physical)
222 		gtmr_cntp_tval_write(val);
223 	else
224 		gtmr_cntv_tval_write(val);
225 
226 	isb();
227 }
228 
229 static void
230 gtmr_write_cval(struct gtmr_softc *sc, uint64_t val)
231 {
232 	if (sc->sc_physical)
233 		gtmr_cntp_cval_write(val);
234 	else
235 		gtmr_cntv_cval_write(val);
236 
237 	isb();
238 }
239 
240 
241 void
242 gtmr_init_cpu_clock(struct cpu_info *ci)
243 {
244 	struct gtmr_softc * const sc = &gtmr_sc;
245 	uint32_t cntk;
246 	uint64_t ctl;
247 
248 	KASSERT(ci == curcpu());
249 
250 	/* XXX hmm... called from cpu_hatch which hasn't lowered ipl yet */
251 	int s = splsched();
252 
253 	/*
254 	 * Allow the virtual and physical counters to be accessed from
255 	 * usermode. (PL0)
256 	 */
257 	cntk = gtmr_cntk_ctl_read();
258 	cntk &= ~(CNTKCTL_PL0PTEN | CNTKCTL_PL0VTEN | CNTKCTL_EVNTEN);
259 	if (sc->sc_physical) {
260 		cntk |= CNTKCTL_PL0PCTEN;
261 		cntk &= ~CNTKCTL_PL0VCTEN;
262 	} else {
263 		cntk |= CNTKCTL_PL0VCTEN;
264 		cntk &= ~CNTKCTL_PL0PCTEN;
265 	}
266 	gtmr_cntk_ctl_write(cntk);
267 	isb();
268 
269 	/*
270 	 * enable timer and stop masking the timer.
271 	 */
272 	ctl = gtmr_read_ctl(sc);
273 	ctl &= ~CNTCTL_IMASK;
274 	ctl |= CNTCTL_ENABLE;
275 	gtmr_write_ctl(sc, ctl);
276 
277 	/*
278 	 * Get now and update the compare timer.
279 	 */
280 	ci->ci_lastintr = gtmr_read_cntct(sc);
281 	gtmr_write_tval(sc, sc->sc_autoinc);
282 
283 	splx(s);
284 
285 	KASSERT(gtmr_read_cntct(sc) != 0);
286 }
287 
288 void
289 gtmr_cpu_initclocks(void)
290 {
291 	struct gtmr_softc * const sc = &gtmr_sc;
292 
293 	KASSERT(sc->sc_dev != NULL);
294 	KASSERT(sc->sc_freq != 0);
295 
296 	sc->sc_autoinc = sc->sc_freq / hz;
297 
298 	gtmr_init_cpu_clock(curcpu());
299 }
300 
301 void
302 gtmr_delay(unsigned int n)
303 {
304 	struct gtmr_softc * const sc = &gtmr_sc;
305 
306 	KASSERT(sc != NULL);
307 
308 	uint32_t freq = sc->sc_freq ? sc->sc_freq : gtmr_cntfrq_read();
309 	KASSERT(freq != 0);
310 
311 	const unsigned int incr_per_us = howmany(freq, 1000000);
312 	int64_t ticks = (int64_t)n * incr_per_us;
313 
314 	uint64_t last = gtmr_read_cntct(sc);
315 
316 	while (ticks > 0) {
317 		uint64_t curr = gtmr_read_cntct(sc);
318 		if (curr >= last)
319 			ticks -= (curr - last);
320 		else
321 			ticks -= (UINT64_MAX - curr + last);
322 		last = curr;
323 	}
324 }
325 
326 /*
327  * gtmr_intr:
328  *
329  *	Handle the hardclock interrupt.
330  */
331 int
332 gtmr_intr(void *arg)
333 {
334 	struct cpu_info * const ci = curcpu();
335 	struct clockframe * const cf = arg;
336 	struct gtmr_softc * const sc = &gtmr_sc;
337 
338 	const uint32_t ctl = gtmr_read_ctl(sc);
339 	if ((ctl & (CNTCTL_ENABLE|CNTCTL_ISTATUS)) != (CNTCTL_ENABLE|CNTCTL_ISTATUS)) {
340 		aprint_debug_dev(ci->ci_dev, "spurious timer interrupt (ctl=%#x)\n", ctl);
341 		return 0;
342 	}
343 
344 	const uint64_t now = gtmr_read_cntct(sc);
345 	uint64_t delta = now - ci->ci_lastintr;
346 
347 #ifdef DIAGNOSTIC
348 	struct gtmr_percpu *pc = NULL;
349 	if (!ISSET(sc->sc_flags, GTMR_FLAG_SUN50I_A64_UNSTABLE_TIMER)) {
350 		const uint64_t then = sc->sc_physical ? gtmr_cntp_cval_read() : gtmr_cntv_cval_read();
351 		pc = percpu_getref(sc->sc_percpu);
352 		KASSERTMSG(then <= now, "%"PRId64, now - then);
353 		KASSERTMSG(then + pc->pc_delta >= ci->ci_lastintr + sc->sc_autoinc,
354 		    "%"PRId64, then + pc->pc_delta - ci->ci_lastintr - sc->sc_autoinc);
355 	}
356 #endif
357 
358 	if (!ISSET(sc->sc_flags, GTMR_FLAG_SUN50I_A64_UNSTABLE_TIMER)) {
359 		KASSERTMSG(delta > sc->sc_autoinc / 100,
360 		    "%s: interrupting too quickly (delta=%"PRIu64") autoinc=%lu",
361 		    ci->ci_data.cpu_name, delta, sc->sc_autoinc);
362 	}
363 
364 	/*
365 	 * If we got interrupted too soon (delta < sc->sc_autoinc)
366 	 * or we missed (or almost missed) a tick
367 	 * (delta >= 7 * sc->sc_autoinc / 4), don't try to adjust for jitter.
368 	 */
369 	if (delta >= sc->sc_autoinc && delta <= 7 * sc->sc_autoinc / 4) {
370 		delta -= sc->sc_autoinc;
371 	} else {
372 		delta = 0;
373 	}
374 
375 	isb();
376 	if (ISSET(sc->sc_flags, GTMR_FLAG_SUN50I_A64_UNSTABLE_TIMER)) {
377 		gtmr_write_cval(sc, now + sc->sc_autoinc - delta);
378 	} else {
379 		gtmr_write_tval(sc, sc->sc_autoinc - delta);
380 	}
381 
382 	ci->ci_lastintr = now;
383 
384 #ifdef DIAGNOSTIC
385 	if (!ISSET(sc->sc_flags, GTMR_FLAG_SUN50I_A64_UNSTABLE_TIMER)) {
386 		KASSERT(delta == (uint32_t) delta);
387 		pc->pc_delta = delta;
388 		percpu_putref(sc->sc_percpu);
389 	}
390 #endif
391 
392 	hardclock(cf);
393 
394 	sc->sc_ev_missing_ticks.ev_count += delta / sc->sc_autoinc;
395 
396 	return 1;
397 }
398 
399 void
400 setstatclockrate(int newhz)
401 {
402 }
403 
404 static u_int
405 gtmr_get_timecount(struct timecounter *tc)
406 {
407 	struct gtmr_softc * const sc = tc->tc_priv;
408 
409 	return (u_int) gtmr_read_cntct(sc);
410 }
411