1 /* $NetBSD: gtmr.c,v 1.28 2018/05/21 10:28:13 jmcneill Exp $ */ 2 3 /*- 4 * Copyright (c) 2012 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Matt Thomas 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __KERNEL_RCSID(0, "$NetBSD: gtmr.c,v 1.28 2018/05/21 10:28:13 jmcneill Exp $"); 34 35 #include <sys/param.h> 36 #include <sys/bus.h> 37 #include <sys/device.h> 38 #include <sys/intr.h> 39 #include <sys/kernel.h> 40 #include <sys/percpu.h> 41 #include <sys/proc.h> 42 #include <sys/systm.h> 43 #include <sys/timetc.h> 44 45 #include <prop/proplib.h> 46 47 #include <arm/locore.h> 48 #include <arm/cpufunc.h> 49 50 #include <arm/cortex/gtmr_var.h> 51 #include <arm/cortex/mpcore_var.h> 52 53 #define stable_write(reg) \ 54 static struct evcnt reg ## _write_ev; \ 55 static void \ 56 reg ## _stable_write(struct gtmr_softc *sc, uint64_t val) \ 57 { \ 58 int retry; \ 59 reg ## _write(val); \ 60 retry = 0; \ 61 while (reg ## _read() != (val) && retry++ < 200) \ 62 reg ## _write(val); \ 63 if (retry > reg ## _write_ev.ev_count) { \ 64 reg ## _write_ev.ev_count = retry; \ 65 } \ 66 } 67 68 stable_write(gtmr_cntv_tval); 69 70 #define stable_read(reg) \ 71 static struct evcnt reg ## _read_ev; \ 72 static uint64_t \ 73 reg ## _stable_read(struct gtmr_softc *sc) \ 74 { \ 75 uint64_t oval, val; \ 76 int retry = 0; \ 77 val = reg ## _read(); \ 78 while (++retry < 200) { \ 79 oval = val; \ 80 val = reg ## _read(); \ 81 if (val == oval) \ 82 break; \ 83 } \ 84 if (retry > reg ## _read_ev.ev_count) { \ 85 reg ## _read_ev.ev_count = retry; \ 86 } \ 87 return val; \ 88 } 89 90 stable_read(gtmr_cntv_cval); 91 stable_read(gtmr_cntvct); 92 stable_read(gtmr_cntpct); 93 94 static int gtmr_match(device_t, cfdata_t, void *); 95 static void gtmr_attach(device_t, device_t, void *); 96 97 static u_int gtmr_get_timecount(struct timecounter *); 98 99 static struct gtmr_softc gtmr_sc; 100 101 struct gtmr_percpu { 102 uint32_t pc_delta; 103 }; 104 105 static struct timecounter gtmr_timecounter = { 106 .tc_get_timecount = gtmr_get_timecount, 107 .tc_poll_pps = 0, 108 .tc_counter_mask = ~0u, 109 .tc_frequency = 0, /* set by cpu_initclocks() */ 110 .tc_name = NULL, /* set by attach */ 111 .tc_quality = 500, 112 .tc_priv = >mr_sc, 113 .tc_next = NULL, 114 }; 115 116 CFATTACH_DECL_NEW(armgtmr, 0, gtmr_match, gtmr_attach, NULL, NULL); 117 118 /* ARGSUSED */ 119 static int 120 gtmr_match(device_t parent, cfdata_t cf, void *aux) 121 { 122 struct mpcore_attach_args * const mpcaa = aux; 123 124 if (gtmr_sc.sc_dev != NULL) 125 return 0; 126 127 /* Genertic Timer is always implemented in ARMv8-A */ 128 if (!cpu_gtmr_exists_p()) 129 return 0; 130 131 if (strcmp(mpcaa->mpcaa_name, cf->cf_name) != 0) 132 return 0; 133 134 return 1; 135 } 136 137 static void 138 gtmr_attach(device_t parent, device_t self, void *aux) 139 { 140 struct mpcore_attach_args * const mpcaa = aux; 141 struct gtmr_softc *sc = >mr_sc; 142 prop_dictionary_t dict = device_properties(self); 143 char freqbuf[sizeof("X.XXX SHz")]; 144 145 /* 146 * This runs at a fixed frequency of 1 to 50MHz. 147 */ 148 if (!prop_dictionary_get_uint32(dict, "frequency", &sc->sc_freq)) 149 sc->sc_freq = gtmr_cntfrq_read(); 150 151 KASSERT(sc->sc_freq != 0); 152 153 humanize_number(freqbuf, sizeof(freqbuf), sc->sc_freq, "Hz", 1000); 154 155 aprint_naive("\n"); 156 aprint_normal(": ARMv7 Generic 64-bit Timer (%s)\n", freqbuf); 157 158 /* 159 * Enable the virtual counter to be accessed from usermode. 160 */ 161 gtmr_cntk_ctl_write(gtmr_cntk_ctl_read() | 162 CNTKCTL_PL0VCTEN | CNTKCTL_PL0PCTEN); 163 164 self->dv_private = sc; 165 sc->sc_dev = self; 166 167 #ifdef DIAGNOSTIC 168 sc->sc_percpu = percpu_alloc(sizeof(struct gtmr_percpu)); 169 #endif 170 171 evcnt_attach_dynamic(&sc->sc_ev_missing_ticks, EVCNT_TYPE_MISC, NULL, 172 device_xname(self), "missing interrupts"); 173 174 evcnt_attach_dynamic(>mr_cntv_tval_write_ev, EVCNT_TYPE_MISC, NULL, 175 device_xname(self), "CNTV_TVAL write retry max"); 176 evcnt_attach_dynamic(>mr_cntv_cval_read_ev, EVCNT_TYPE_MISC, NULL, 177 device_xname(self), "CNTV_CVAL read retry max"); 178 evcnt_attach_dynamic(>mr_cntvct_read_ev, EVCNT_TYPE_MISC, NULL, 179 device_xname(self), "CNTVCT read retry max"); 180 evcnt_attach_dynamic(>mr_cntpct_read_ev, EVCNT_TYPE_MISC, NULL, 181 device_xname(self), "CNTPCT read retry max"); 182 183 if (mpcaa->mpcaa_irq != -1) { 184 sc->sc_global_ih = intr_establish(mpcaa->mpcaa_irq, IPL_CLOCK, 185 IST_LEVEL | IST_MPSAFE, gtmr_intr, NULL); 186 if (sc->sc_global_ih == NULL) 187 panic("%s: unable to register timer interrupt", __func__); 188 aprint_normal_dev(self, "interrupting on irq %d\n", 189 mpcaa->mpcaa_irq); 190 } 191 192 const uint32_t cnt_frq = gtmr_cntfrq_read(); 193 if (cnt_frq == 0) { 194 aprint_verbose_dev(self, "cp15 CNT_FRQ not set\n"); 195 } else if (cnt_frq != sc->sc_freq) { 196 aprint_verbose_dev(self, 197 "cp15 CNT_FRQ (%u) differs from supplied frequency\n", 198 cnt_frq); 199 } 200 201 gtmr_timecounter.tc_name = device_xname(sc->sc_dev); 202 gtmr_timecounter.tc_frequency = sc->sc_freq; 203 gtmr_timecounter.tc_priv = sc; 204 205 tc_init(>mr_timecounter); 206 207 /* Disable the timer until we are ready */ 208 gtmr_cntv_ctl_write(0); 209 gtmr_cntp_ctl_write(0); 210 } 211 212 void 213 gtmr_init_cpu_clock(struct cpu_info *ci) 214 { 215 struct gtmr_softc * const sc = >mr_sc; 216 217 KASSERT(ci == curcpu()); 218 219 int s = splsched(); 220 221 /* 222 * enable timer and stop masking the timer. 223 */ 224 gtmr_cntv_ctl_write(CNTCTL_ENABLE); 225 gtmr_cntp_ctl_write(CNTCTL_ENABLE); 226 227 /* 228 * Get now and update the compare timer. 229 */ 230 arm_isb(); 231 ci->ci_lastintr = gtmr_cntvct_stable_read(sc); 232 gtmr_cntv_tval_stable_write(sc, sc->sc_autoinc); 233 splx(s); 234 KASSERT(gtmr_cntvct_read() != 0); 235 } 236 237 void 238 gtmr_cpu_initclocks(void) 239 { 240 struct gtmr_softc * const sc = >mr_sc; 241 242 KASSERT(sc->sc_dev != NULL); 243 KASSERT(sc->sc_freq != 0); 244 245 sc->sc_autoinc = sc->sc_freq / hz; 246 247 gtmr_init_cpu_clock(curcpu()); 248 } 249 250 void 251 gtmr_delay(unsigned int n) 252 { 253 struct gtmr_softc * const sc = >mr_sc; 254 255 KASSERT(sc != NULL); 256 257 uint32_t freq = sc->sc_freq ? sc->sc_freq : gtmr_cntfrq_read(); 258 KASSERT(freq != 0); 259 260 const unsigned int incr_per_us = howmany(freq, 1000000); 261 unsigned int delta = 0, usecs = 0; 262 263 arm_isb(); 264 uint64_t last = gtmr_cntpct_stable_read(sc); 265 266 while (n > usecs) { 267 arm_isb(); 268 uint64_t curr = gtmr_cntpct_stable_read(sc); 269 if (curr < last) 270 delta += curr + (UINT64_MAX - last); 271 else 272 delta += curr - last; 273 274 last = curr; 275 if (delta >= incr_per_us) { 276 usecs += delta / incr_per_us; 277 delta %= incr_per_us; 278 } 279 } 280 } 281 282 /* 283 * gtmr_intr: 284 * 285 * Handle the hardclock interrupt. 286 */ 287 int 288 gtmr_intr(void *arg) 289 { 290 struct cpu_info * const ci = curcpu(); 291 struct clockframe * const cf = arg; 292 struct gtmr_softc * const sc = >mr_sc; 293 294 arm_isb(); 295 296 const uint32_t ctl = gtmr_cntv_ctl_read(); 297 if ((ctl & CNTCTL_ISTATUS) == 0) 298 return 0; 299 300 const uint64_t now = gtmr_cntvct_stable_read(sc); 301 uint64_t delta = now - ci->ci_lastintr; 302 303 #ifdef DIAGNOSTIC 304 const uint64_t then = gtmr_cntv_cval_stable_read(sc); 305 struct gtmr_percpu * const pc = percpu_getref(sc->sc_percpu); 306 KASSERTMSG(then <= now, "%"PRId64, now - then); 307 KASSERTMSG(then + pc->pc_delta >= ci->ci_lastintr + sc->sc_autoinc, 308 "%"PRId64, then + pc->pc_delta - ci->ci_lastintr - sc->sc_autoinc); 309 #endif 310 311 KASSERTMSG(delta > sc->sc_autoinc / 100, 312 "%s: interrupting too quickly (delta=%"PRIu64") autoinc=%lu", 313 ci->ci_data.cpu_name, delta, sc->sc_autoinc); 314 315 /* 316 * If we got interrupted too soon (delta < sc->sc_autoinc) 317 * or we missed (or almost missed) a tick 318 * (delta >= 7 * sc->sc_autoinc / 4), don't try to adjust for jitter. 319 */ 320 if (delta >= sc->sc_autoinc && delta <= 7 * sc->sc_autoinc / 4) { 321 delta -= sc->sc_autoinc; 322 } else { 323 delta = 0; 324 } 325 gtmr_cntv_tval_stable_write(sc, sc->sc_autoinc - delta); 326 327 ci->ci_lastintr = now; 328 329 #ifdef DIAGNOSTIC 330 KASSERT(delta == (uint32_t) delta); 331 pc->pc_delta = delta; 332 percpu_putref(sc->sc_percpu); 333 #endif 334 335 hardclock(cf); 336 337 sc->sc_ev_missing_ticks.ev_count += delta / sc->sc_autoinc; 338 339 return 1; 340 } 341 342 void 343 setstatclockrate(int newhz) 344 { 345 } 346 347 static u_int 348 gtmr_get_timecount(struct timecounter *tc) 349 { 350 struct gtmr_softc * const sc = tc->tc_priv; 351 arm_isb(); // we want the time NOW, not some instructions later. 352 return (u_int) gtmr_cntpct_stable_read(sc); 353 } 354