1 /* $NetBSD: a9tmr.c,v 1.22 2022/03/03 06:26:28 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2012 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Matt Thomas
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: a9tmr.c,v 1.22 2022/03/03 06:26:28 riastradh Exp $");
34
35 #include <sys/param.h>
36 #include <sys/bus.h>
37 #include <sys/device.h>
38 #include <sys/intr.h>
39 #include <sys/kernel.h>
40 #include <sys/proc.h>
41 #include <sys/systm.h>
42 #include <sys/timetc.h>
43 #include <sys/xcall.h>
44
45 #include <prop/proplib.h>
46
47 #include <arm/cortex/a9tmr_reg.h>
48 #include <arm/cortex/a9tmr_var.h>
49
50 #include <arm/cortex/mpcore_var.h>
51
52 static int a9tmr_match(device_t, cfdata_t, void *);
53 static void a9tmr_attach(device_t, device_t, void *);
54
55 static u_int a9tmr_get_timecount(struct timecounter *);
56
57 static struct a9tmr_softc a9tmr_sc;
58
59 static struct timecounter a9tmr_timecounter = {
60 .tc_get_timecount = a9tmr_get_timecount,
61 .tc_poll_pps = 0,
62 .tc_counter_mask = ~0u,
63 .tc_frequency = 0, /* set by cpu_initclocks() */
64 .tc_name = NULL, /* set by attach */
65 .tc_quality = 500,
66 .tc_priv = &a9tmr_sc,
67 .tc_next = NULL,
68 };
69
70 CFATTACH_DECL_NEW(arma9tmr, 0, a9tmr_match, a9tmr_attach, NULL, NULL);
71
72 static inline uint32_t
a9tmr_global_read(struct a9tmr_softc * sc,bus_size_t o)73 a9tmr_global_read(struct a9tmr_softc *sc, bus_size_t o)
74 {
75 return bus_space_read_4(sc->sc_memt, sc->sc_global_memh, o);
76 }
77
78 static inline void
a9tmr_global_write(struct a9tmr_softc * sc,bus_size_t o,uint32_t v)79 a9tmr_global_write(struct a9tmr_softc *sc, bus_size_t o, uint32_t v)
80 {
81 bus_space_write_4(sc->sc_memt, sc->sc_global_memh, o, v);
82 }
83
84
85 /* ARGSUSED */
86 static int
a9tmr_match(device_t parent,cfdata_t cf,void * aux)87 a9tmr_match(device_t parent, cfdata_t cf, void *aux)
88 {
89 struct mpcore_attach_args * const mpcaa = aux;
90
91 if (a9tmr_sc.sc_dev != NULL)
92 return 0;
93
94 if ((armreg_pfr1_read() & ARM_PFR1_GTIMER_MASK) != 0)
95 return 0;
96
97 if (!CPU_ID_CORTEX_A9_P(curcpu()->ci_arm_cpuid) &&
98 !CPU_ID_CORTEX_A5_P(curcpu()->ci_arm_cpuid))
99 return 0;
100
101 if (strcmp(mpcaa->mpcaa_name, cf->cf_name) != 0)
102 return 0;
103
104 /*
105 * This isn't present on UP A9s (since CBAR isn't present).
106 */
107 uint32_t mpidr = armreg_mpidr_read();
108 if (mpidr == 0 || (mpidr & MPIDR_U))
109 return 0;
110
111 return 1;
112 }
113
114 static void
a9tmr_attach(device_t parent,device_t self,void * aux)115 a9tmr_attach(device_t parent, device_t self, void *aux)
116 {
117 struct a9tmr_softc *sc = &a9tmr_sc;
118 struct mpcore_attach_args * const mpcaa = aux;
119 prop_dictionary_t dict = device_properties(self);
120 char freqbuf[sizeof("XXX SHz")];
121 const char *cpu_type;
122
123 /*
124 * This runs at the ARM PERIPHCLOCK.
125 * The MD code should have setup our frequency for us.
126 */
127 if (!prop_dictionary_get_uint32(dict, "frequency", &sc->sc_freq)) {
128 dict = device_properties(parent);
129 prop_dictionary_get_uint32(dict, "frequency", &sc->sc_freq);
130 }
131
132 humanize_number(freqbuf, sizeof(freqbuf), sc->sc_freq, "Hz", 1000);
133
134 aprint_naive("\n");
135 if (CPU_ID_CORTEX_A5_P(curcpu()->ci_arm_cpuid)) {
136 cpu_type = "A5";
137 } else {
138 cpu_type = "A9";
139 }
140 aprint_normal(": %s Global 64-bit Timer (%s)\n", cpu_type, freqbuf);
141
142 device_set_private(self, sc);
143 sc->sc_dev = self;
144 sc->sc_memt = mpcaa->mpcaa_memt;
145 sc->sc_memh = mpcaa->mpcaa_memh;
146
147 evcnt_attach_dynamic(&sc->sc_ev_missing_ticks, EVCNT_TYPE_MISC, NULL,
148 device_xname(self), "missing interrupts");
149
150 bus_space_subregion(sc->sc_memt, sc->sc_memh,
151 mpcaa->mpcaa_off1, TMR_GLOBAL_SIZE, &sc->sc_global_memh);
152
153 /* Enable the timer early for delay(), disable all other features */
154 a9tmr_global_write(sc, TMR_GBL_CTL, TMR_CTL_ENABLE);
155
156 if (mpcaa->mpcaa_irq != -1) {
157 sc->sc_global_ih = intr_establish(mpcaa->mpcaa_irq, IPL_CLOCK,
158 IST_EDGE | IST_MPSAFE, a9tmr_intr, NULL);
159 if (sc->sc_global_ih == NULL)
160 panic("%s: unable to register timer interrupt", __func__);
161 aprint_normal_dev(sc->sc_dev, "interrupting on irq %d\n",
162 mpcaa->mpcaa_irq);
163 }
164 }
165
166 static inline uint64_t
a9tmr_gettime(struct a9tmr_softc * sc)167 a9tmr_gettime(struct a9tmr_softc *sc)
168 {
169 uint32_t lo, hi;
170
171 do {
172 hi = a9tmr_global_read(sc, TMR_GBL_CTR_U);
173 lo = a9tmr_global_read(sc, TMR_GBL_CTR_L);
174 } while (hi != a9tmr_global_read(sc, TMR_GBL_CTR_U));
175
176 return ((uint64_t)hi << 32) | lo;
177 }
178
179 void
a9tmr_init_cpu_clock(struct cpu_info * ci)180 a9tmr_init_cpu_clock(struct cpu_info *ci)
181 {
182 struct a9tmr_softc * const sc = &a9tmr_sc;
183 uint64_t now = a9tmr_gettime(sc);
184
185 KASSERT(ci == curcpu());
186
187 ci->ci_lastintr = now;
188
189 a9tmr_global_write(sc, TMR_GBL_AUTOINC, sc->sc_autoinc);
190
191 /*
192 * To update the compare register we have to disable comparisons first.
193 */
194 uint32_t ctl = a9tmr_global_read(sc, TMR_GBL_CTL);
195 if (ctl & TMR_GBL_CTL_CMP_ENABLE) {
196 a9tmr_global_write(sc, TMR_GBL_CTL,
197 ctl & ~TMR_GBL_CTL_CMP_ENABLE);
198 }
199
200 /*
201 * Schedule the next interrupt.
202 */
203 now += sc->sc_autoinc;
204 a9tmr_global_write(sc, TMR_GBL_CMP_L, (uint32_t) now);
205 a9tmr_global_write(sc, TMR_GBL_CMP_H, (uint32_t) (now >> 32));
206
207 /*
208 * Re-enable the comparator and now enable interrupts.
209 */
210 a9tmr_global_write(sc, TMR_GBL_INT, 1); /* clear interrupt pending */
211 ctl |= TMR_GBL_CTL_CMP_ENABLE | TMR_GBL_CTL_INT_ENABLE |
212 TMR_GBL_CTL_AUTO_INC | TMR_CTL_ENABLE;
213 a9tmr_global_write(sc, TMR_GBL_CTL, ctl);
214 #if 0
215 printf("%s: %s: ctl %#x autoinc %u cmp %#x%08x now %#"PRIx64"\n",
216 __func__, ci->ci_data.cpu_name,
217 a9tmr_global_read(sc, TMR_GBL_CTL),
218 a9tmr_global_read(sc, TMR_GBL_AUTOINC),
219 a9tmr_global_read(sc, TMR_GBL_CMP_H),
220 a9tmr_global_read(sc, TMR_GBL_CMP_L),
221 a9tmr_gettime(sc));
222
223 int s = splsched();
224 uint64_t when = now;
225 u_int n = 0;
226 while ((now = a9tmr_gettime(sc)) < when) {
227 /* spin */
228 n++;
229 KASSERTMSG(n <= sc->sc_autoinc,
230 "spun %u times but only %"PRIu64" has passed",
231 n, when - now);
232 }
233 printf("%s: %s: status %#x cmp %#x%08x now %#"PRIx64"\n",
234 __func__, ci->ci_data.cpu_name,
235 a9tmr_global_read(sc, TMR_GBL_INT),
236 a9tmr_global_read(sc, TMR_GBL_CMP_H),
237 a9tmr_global_read(sc, TMR_GBL_CMP_L),
238 a9tmr_gettime(sc));
239 splx(s);
240 #elif 0
241 delay(1000000 / hz + 1000);
242 #endif
243 }
244
245 void
a9tmr_cpu_initclocks(void)246 a9tmr_cpu_initclocks(void)
247 {
248 struct a9tmr_softc * const sc = &a9tmr_sc;
249
250 KASSERT(sc->sc_dev != NULL);
251 KASSERT(sc->sc_freq != 0);
252
253 sc->sc_autoinc = sc->sc_freq / hz;
254
255 a9tmr_init_cpu_clock(curcpu());
256
257 a9tmr_timecounter.tc_name = device_xname(sc->sc_dev);
258 a9tmr_timecounter.tc_frequency = sc->sc_freq;
259
260 tc_init(&a9tmr_timecounter);
261 }
262
263 static void
a9tmr_update_freq_cb(void * arg1,void * arg2)264 a9tmr_update_freq_cb(void *arg1, void *arg2)
265 {
266 a9tmr_init_cpu_clock(curcpu());
267 }
268
269 void
a9tmr_update_freq(uint32_t freq)270 a9tmr_update_freq(uint32_t freq)
271 {
272 struct a9tmr_softc * const sc = &a9tmr_sc;
273 uint64_t xc;
274
275 KASSERT(sc->sc_dev != NULL);
276 KASSERT(freq != 0);
277
278 tc_detach(&a9tmr_timecounter);
279
280 sc->sc_freq = freq;
281 sc->sc_autoinc = sc->sc_freq / hz;
282
283 xc = xc_broadcast(0, a9tmr_update_freq_cb, NULL, NULL);
284 xc_wait(xc);
285
286 a9tmr_timecounter.tc_frequency = sc->sc_freq;
287 tc_init(&a9tmr_timecounter);
288 }
289
290 void
a9tmr_delay(unsigned int n)291 a9tmr_delay(unsigned int n)
292 {
293 struct a9tmr_softc * const sc = &a9tmr_sc;
294
295 KASSERT(sc != NULL);
296
297 uint32_t freq = sc->sc_freq ? sc->sc_freq :
298 curcpu()->ci_data.cpu_cc_freq / 2;
299 KASSERT(freq != 0);
300
301 /*
302 * not quite divide by 1000000 but close enough
303 * (higher by 1.3% which means we wait 1.3% longer).
304 */
305 const uint64_t incr_per_us = (freq >> 20) + (freq >> 24);
306
307 const uint64_t delta = n * incr_per_us;
308 const uint64_t base = a9tmr_gettime(sc);
309 const uint64_t finish = base + delta;
310
311 while (a9tmr_gettime(sc) < finish) {
312 /* spin */
313 }
314 }
315
316 /*
317 * a9tmr_intr:
318 *
319 * Handle the hardclock interrupt.
320 */
321 int
a9tmr_intr(void * arg)322 a9tmr_intr(void *arg)
323 {
324 struct clockframe * const cf = arg;
325 struct a9tmr_softc * const sc = &a9tmr_sc;
326 struct cpu_info * const ci = curcpu();
327
328 const uint64_t now = a9tmr_gettime(sc);
329 uint64_t delta = now - ci->ci_lastintr;
330
331 a9tmr_global_write(sc, TMR_GBL_INT, 1); /* Ack the interrupt */
332
333 #if 0
334 printf("%s(%p): %s: now %#"PRIx64" delta %"PRIu64"\n",
335 __func__, cf, ci->ci_data.cpu_name, now, delta);
336 #endif
337 KASSERTMSG(delta > sc->sc_autoinc / 64,
338 "%s: interrupting too quickly (delta=%"PRIu64")",
339 ci->ci_data.cpu_name, delta);
340
341 ci->ci_lastintr = now;
342
343 hardclock(cf);
344
345 if (delta > sc->sc_autoinc) {
346 u_int ticks = hz;
347 for (delta -= sc->sc_autoinc;
348 delta >= sc->sc_autoinc && ticks > 0;
349 delta -= sc->sc_autoinc, ticks--) {
350 #if 0
351 /*
352 * Try to make up up to a seconds amount of
353 * missed clock interrupts
354 */
355 hardclock(cf);
356 #else
357 sc->sc_ev_missing_ticks.ev_count++;
358 #endif
359 }
360 }
361
362 return 1;
363 }
364
365 /* XXX This conflicts with gtmr, hence the temporary weak alias kludge */
366 #if 1
367 void a9tmr_setstatclockrate(int);
368 void
a9tmr_setstatclockrate(int newhz)369 a9tmr_setstatclockrate(int newhz)
370 {
371 }
372 __weak_alias(setstatclockrate, a9tmr_setstatclockrate);
373 #endif
374
375 static u_int
a9tmr_get_timecount(struct timecounter * tc)376 a9tmr_get_timecount(struct timecounter *tc)
377 {
378 struct a9tmr_softc * const sc = tc->tc_priv;
379
380 return (u_int) (a9tmr_gettime(sc));
381 }
382