xref: /netbsd-src/sys/arch/arm/cortex/gtmr.c (revision 181254a7b1bdde6873432bffef2d2decc4b5c22f)
1 /*	$NetBSD: gtmr.c,v 1.41 2019/08/12 23:31:48 jmcneill Exp $	*/
2 
3 /*-
4  * Copyright (c) 2012 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Matt Thomas
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: gtmr.c,v 1.41 2019/08/12 23:31:48 jmcneill Exp $");
34 
35 #include <sys/param.h>
36 #include <sys/bus.h>
37 #include <sys/device.h>
38 #include <sys/intr.h>
39 #include <sys/kernel.h>
40 #include <sys/percpu.h>
41 #include <sys/proc.h>
42 #include <sys/systm.h>
43 #include <sys/timetc.h>
44 
45 #include <prop/proplib.h>
46 
47 #include <arm/locore.h>
48 #include <arm/cpufunc.h>
49 
50 #include <arm/cortex/gtmr_var.h>
51 #include <arm/cortex/mpcore_var.h>
52 
53 static int gtmr_match(device_t, cfdata_t, void *);
54 static void gtmr_attach(device_t, device_t, void *);
55 
56 static u_int gtmr_get_timecount(struct timecounter *);
57 
58 static uint64_t gtmr_read_cntct(struct gtmr_softc *);
59 static uint32_t gtmr_read_ctl(struct gtmr_softc *);
60 static void gtmr_write_ctl(struct gtmr_softc *, uint32_t);
61 static void gtmr_write_tval(struct gtmr_softc *, uint32_t);
62 static void gtmr_write_cval(struct gtmr_softc *, uint64_t);
63 
64 static struct gtmr_softc gtmr_sc;
65 
66 struct gtmr_percpu {
67 	uint32_t pc_delta;
68 };
69 
70 static struct timecounter gtmr_timecounter = {
71 	.tc_get_timecount = gtmr_get_timecount,
72 	.tc_poll_pps = 0,
73 	.tc_counter_mask = ~0u,
74 	.tc_frequency = 0,			/* set by cpu_initclocks() */
75 	.tc_name = NULL,			/* set by attach */
76 	.tc_quality = 500,
77 	.tc_priv = &gtmr_sc,
78 	.tc_next = NULL,
79 };
80 
81 CFATTACH_DECL_NEW(armgtmr, 0, gtmr_match, gtmr_attach, NULL, NULL);
82 
83 /* ARGSUSED */
84 static int
85 gtmr_match(device_t parent, cfdata_t cf, void *aux)
86 {
87 	struct mpcore_attach_args * const mpcaa = aux;
88 
89 	if (gtmr_sc.sc_dev != NULL)
90 		return 0;
91 
92 	/* Generic Timer is always implemented in ARMv8-A */
93 	if (!cpu_gtmr_exists_p())
94 		return 0;
95 
96 	if (strcmp(mpcaa->mpcaa_name, cf->cf_name) != 0)
97 		return 0;
98 
99 	return 1;
100 }
101 
102 static void
103 gtmr_attach(device_t parent, device_t self, void *aux)
104 {
105 	struct mpcore_attach_args * const mpcaa = aux;
106 	struct gtmr_softc *sc = &gtmr_sc;
107 	prop_dictionary_t dict = device_properties(self);
108 	prop_dictionary_t pdict = device_properties(device_parent(self));
109 	char freqbuf[sizeof("X.XXX SHz")];
110 	bool flag;
111 
112 	/*
113 	 * This runs at a fixed frequency of 1 to 50MHz.
114 	 */
115 	if (!prop_dictionary_get_uint32(dict, "frequency", &sc->sc_freq))
116 		sc->sc_freq = gtmr_cntfrq_read();
117 
118 	if (!prop_dictionary_get_bool(dict, "physical", &sc->sc_physical))
119 	    prop_dictionary_get_bool(pdict, "physical", &sc->sc_physical);
120 
121 	KASSERT(sc->sc_freq != 0);
122 
123 	humanize_number(freqbuf, sizeof(freqbuf), sc->sc_freq, "Hz", 1000);
124 
125 	aprint_naive("\n");
126 	aprint_normal(": Generic Timer (%s, %s)\n", freqbuf,
127 	    sc->sc_physical ? "physical" : "virtual");
128 
129 	if (prop_dictionary_get_bool(dict, "sun50i-a64-unstable-timer", &flag) && flag) {
130 		sc->sc_flags |= GTMR_FLAG_SUN50I_A64_UNSTABLE_TIMER;
131 		aprint_debug_dev(self, "enabling Allwinner A64 timer workaround\n");
132 	}
133 
134 	self->dv_private = sc;
135 	sc->sc_dev = self;
136 
137 #ifdef DIAGNOSTIC
138 	sc->sc_percpu = percpu_alloc(sizeof(struct gtmr_percpu));
139 #endif
140 
141 	evcnt_attach_dynamic(&sc->sc_ev_missing_ticks, EVCNT_TYPE_MISC, NULL,
142 	    device_xname(self), "missing interrupts");
143 
144 	if (mpcaa->mpcaa_irq != -1) {
145 		sc->sc_global_ih = intr_establish(mpcaa->mpcaa_irq, IPL_CLOCK,
146 		    IST_LEVEL | IST_MPSAFE, gtmr_intr, NULL);
147 		if (sc->sc_global_ih == NULL)
148 			panic("%s: unable to register timer interrupt", __func__);
149 		aprint_normal_dev(self, "interrupting on irq %d\n",
150 		    mpcaa->mpcaa_irq);
151 	}
152 
153 	const uint32_t cnt_frq = gtmr_cntfrq_read();
154 	if (cnt_frq == 0) {
155 		aprint_verbose_dev(self, "cp15 CNT_FRQ not set\n");
156 	} else if (cnt_frq != sc->sc_freq) {
157 		aprint_verbose_dev(self,
158 		    "cp15 CNT_FRQ (%u) differs from supplied frequency\n",
159 		    cnt_frq);
160 	}
161 
162 	gtmr_timecounter.tc_name = device_xname(sc->sc_dev);
163 	gtmr_timecounter.tc_frequency = sc->sc_freq;
164 	gtmr_timecounter.tc_priv = sc;
165 
166 	tc_init(&gtmr_timecounter);
167 
168 	/* Disable the timer until we are ready */
169 	gtmr_write_ctl(sc, 0);
170 }
171 
172 static uint64_t
173 gtmr_read_cntct(struct gtmr_softc *sc)
174 {
175 	arm_isb();
176 
177 	if (ISSET(sc->sc_flags, GTMR_FLAG_SUN50I_A64_UNSTABLE_TIMER)) {
178 		/*
179 		 * The Allwinner A64 SoC has an unstable architectural timer.
180 		 * To workaround this problem, ignore reads where the lower
181 		 * 11 bits are all 0s or 1s.
182 		 */
183 		uint64_t val;
184 		u_int bits;
185 		do {
186 			val = sc->sc_physical ? gtmr_cntpct_read() : gtmr_cntvct_read();
187 			bits = val & __BITS(9,0);
188 		} while (bits == 0 || bits == __BITS(9,0));
189 		return val;
190 	}
191 
192 	return sc->sc_physical ? gtmr_cntpct_read() : gtmr_cntvct_read();
193 }
194 
195 static uint32_t
196 gtmr_read_ctl(struct gtmr_softc *sc)
197 {
198 	if (sc->sc_physical)
199 		return gtmr_cntp_ctl_read();
200 	else
201 		return gtmr_cntv_ctl_read();
202 }
203 
204 static void
205 gtmr_write_ctl(struct gtmr_softc *sc, uint32_t val)
206 {
207 	if (sc->sc_physical)
208 		gtmr_cntp_ctl_write(val);
209 	else
210 		gtmr_cntv_ctl_write(val);
211 
212 	arm_isb();
213 }
214 
215 static void
216 gtmr_write_tval(struct gtmr_softc *sc, uint32_t val)
217 {
218 	if (sc->sc_physical)
219 		gtmr_cntp_tval_write(val);
220 	else
221 		gtmr_cntv_tval_write(val);
222 
223 	arm_isb();
224 }
225 
226 static void
227 gtmr_write_cval(struct gtmr_softc *sc, uint64_t val)
228 {
229 	if (sc->sc_physical)
230 		gtmr_cntp_cval_write(val);
231 	else
232 		gtmr_cntv_cval_write(val);
233 
234 	arm_isb();
235 }
236 
237 
238 void
239 gtmr_init_cpu_clock(struct cpu_info *ci)
240 {
241 	struct gtmr_softc * const sc = &gtmr_sc;
242 	uint32_t val;
243 
244 	KASSERT(ci == curcpu());
245 
246 	int s = splsched();
247 
248 	/*
249 	 * Allow the virtual and physical counters to be accessed from
250 	 * usermode. (PL0)
251 	 */
252 	val = gtmr_cntk_ctl_read();
253 	val &= ~(CNTKCTL_PL0PTEN | CNTKCTL_PL0VTEN | CNTKCTL_EVNTEN);
254 	if (sc->sc_physical) {
255 		val |= CNTKCTL_PL0PCTEN;
256 		val &= ~CNTKCTL_PL0VCTEN;
257 	} else {
258 		val |= CNTKCTL_PL0VCTEN;
259 		val &= ~CNTKCTL_PL0PCTEN;
260 	}
261 	gtmr_cntk_ctl_write(val);
262 	arm_isb();
263 
264 	/*
265 	 * enable timer and stop masking the timer.
266 	 */
267 	gtmr_write_ctl(sc, CNTCTL_ENABLE);
268 
269 	/*
270 	 * Get now and update the compare timer.
271 	 */
272 	ci->ci_lastintr = gtmr_read_cntct(sc);
273 	gtmr_write_tval(sc, sc->sc_autoinc);
274 	splx(s);
275 	KASSERT(gtmr_read_cntct(sc) != 0);
276 }
277 
278 void
279 gtmr_cpu_initclocks(void)
280 {
281 	struct gtmr_softc * const sc = &gtmr_sc;
282 
283 	KASSERT(sc->sc_dev != NULL);
284 	KASSERT(sc->sc_freq != 0);
285 
286 	sc->sc_autoinc = sc->sc_freq / hz;
287 
288 	gtmr_init_cpu_clock(curcpu());
289 }
290 
291 void
292 gtmr_delay(unsigned int n)
293 {
294 	struct gtmr_softc * const sc = &gtmr_sc;
295 
296 	KASSERT(sc != NULL);
297 
298 	uint32_t freq = sc->sc_freq ? sc->sc_freq : gtmr_cntfrq_read();
299 	KASSERT(freq != 0);
300 
301 	const unsigned int incr_per_us = howmany(freq, 1000000);
302 	int64_t ticks = (int64_t)n * incr_per_us;
303 
304 	uint64_t last = gtmr_read_cntct(sc);
305 
306 	while (ticks > 0) {
307 		uint64_t curr = gtmr_read_cntct(sc);
308 		if (curr >= last)
309 			ticks -= (curr - last);
310 		else
311 			ticks -= (UINT64_MAX - curr + last);
312 		last = curr;
313 	}
314 }
315 
316 /*
317  * gtmr_intr:
318  *
319  *	Handle the hardclock interrupt.
320  */
321 int
322 gtmr_intr(void *arg)
323 {
324 	struct cpu_info * const ci = curcpu();
325 	struct clockframe * const cf = arg;
326 	struct gtmr_softc * const sc = &gtmr_sc;
327 	uint32_t ctl;
328 
329 	ctl = gtmr_read_ctl(sc);
330 	if ((ctl & CNTCTL_ISTATUS) == 0)
331 		return 0;
332 
333 	ctl |= CNTCTL_IMASK;
334 	gtmr_write_ctl(sc, ctl);
335 
336 	const uint64_t now = gtmr_read_cntct(sc);
337 	uint64_t delta = now - ci->ci_lastintr;
338 
339 #ifdef DIAGNOSTIC
340 	struct gtmr_percpu *pc = NULL;
341 	if (!ISSET(sc->sc_flags, GTMR_FLAG_SUN50I_A64_UNSTABLE_TIMER)) {
342 		const uint64_t then = sc->sc_physical ? gtmr_cntp_cval_read() : gtmr_cntv_cval_read();
343 		pc = percpu_getref(sc->sc_percpu);
344 		KASSERTMSG(then <= now, "%"PRId64, now - then);
345 		KASSERTMSG(then + pc->pc_delta >= ci->ci_lastintr + sc->sc_autoinc,
346 		    "%"PRId64, then + pc->pc_delta - ci->ci_lastintr - sc->sc_autoinc);
347 	}
348 #endif
349 
350 	if (!ISSET(sc->sc_flags, GTMR_FLAG_SUN50I_A64_UNSTABLE_TIMER)) {
351 		KASSERTMSG(delta > sc->sc_autoinc / 100,
352 		    "%s: interrupting too quickly (delta=%"PRIu64") autoinc=%lu",
353 		    ci->ci_data.cpu_name, delta, sc->sc_autoinc);
354 	}
355 
356 	/*
357 	 * If we got interrupted too soon (delta < sc->sc_autoinc)
358 	 * or we missed (or almost missed) a tick
359 	 * (delta >= 7 * sc->sc_autoinc / 4), don't try to adjust for jitter.
360 	 */
361 	if (delta >= sc->sc_autoinc && delta <= 7 * sc->sc_autoinc / 4) {
362 		delta -= sc->sc_autoinc;
363 	} else {
364 		delta = 0;
365 	}
366 
367 	arm_isb();
368 	if (ISSET(sc->sc_flags, GTMR_FLAG_SUN50I_A64_UNSTABLE_TIMER)) {
369 		gtmr_write_cval(sc, now + sc->sc_autoinc - delta);
370 	} else {
371 		gtmr_write_tval(sc, sc->sc_autoinc - delta);
372 	}
373 
374 	ctl = gtmr_read_ctl(sc);
375 	ctl &= ~CNTCTL_IMASK;
376 	ctl |= CNTCTL_ENABLE;
377 	gtmr_write_ctl(sc, ctl);
378 
379 	ci->ci_lastintr = now;
380 
381 #ifdef DIAGNOSTIC
382 	if (!ISSET(sc->sc_flags, GTMR_FLAG_SUN50I_A64_UNSTABLE_TIMER)) {
383 		KASSERT(delta == (uint32_t) delta);
384 		pc->pc_delta = delta;
385 		percpu_putref(sc->sc_percpu);
386 	}
387 #endif
388 
389 	hardclock(cf);
390 
391 	sc->sc_ev_missing_ticks.ev_count += delta / sc->sc_autoinc;
392 
393 	return 1;
394 }
395 
396 void
397 setstatclockrate(int newhz)
398 {
399 }
400 
401 static u_int
402 gtmr_get_timecount(struct timecounter *tc)
403 {
404 	struct gtmr_softc * const sc = tc->tc_priv;
405 
406 	return (u_int) gtmr_read_cntct(sc);
407 }
408