1 /* $NetBSD: gtmr.c,v 1.32 2018/06/30 17:30:37 jmcneill Exp $ */ 2 3 /*- 4 * Copyright (c) 2012 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Matt Thomas 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include <sys/cdefs.h> 33 __KERNEL_RCSID(0, "$NetBSD: gtmr.c,v 1.32 2018/06/30 17:30:37 jmcneill Exp $"); 34 35 #include <sys/param.h> 36 #include <sys/bus.h> 37 #include <sys/device.h> 38 #include <sys/intr.h> 39 #include <sys/kernel.h> 40 #include <sys/percpu.h> 41 #include <sys/proc.h> 42 #include <sys/systm.h> 43 #include <sys/timetc.h> 44 45 #include <prop/proplib.h> 46 47 #include <arm/locore.h> 48 #include <arm/cpufunc.h> 49 50 #include <arm/cortex/gtmr_var.h> 51 #include <arm/cortex/mpcore_var.h> 52 53 #define stable_write(reg) \ 54 static struct evcnt reg ## _write_ev; \ 55 static void \ 56 reg ## _stable_write(struct gtmr_softc *sc, uint64_t val) \ 57 { \ 58 int retry; \ 59 reg ## _write(val); \ 60 retry = 0; \ 61 while (reg ## _read() != (val) && retry++ < 200) \ 62 reg ## _write(val); \ 63 if (retry > reg ## _write_ev.ev_count) { \ 64 reg ## _write_ev.ev_count = retry; \ 65 } \ 66 } 67 68 stable_write(gtmr_cntv_tval); 69 70 #define stable_read(reg) \ 71 static struct evcnt reg ## _read_ev; \ 72 static uint64_t \ 73 reg ## _stable_read(struct gtmr_softc *sc) \ 74 { \ 75 uint64_t oval, val; \ 76 int retry = 0; \ 77 val = reg ## _read(); \ 78 while (++retry < 200) { \ 79 oval = val; \ 80 val = reg ## _read(); \ 81 if (val == oval) \ 82 break; \ 83 } \ 84 if (retry > reg ## _read_ev.ev_count) { \ 85 reg ## _read_ev.ev_count = retry; \ 86 } \ 87 return val; \ 88 } 89 90 stable_read(gtmr_cntv_cval); 91 stable_read(gtmr_cntvct); 92 93 static int gtmr_match(device_t, cfdata_t, void *); 94 static void gtmr_attach(device_t, device_t, void *); 95 96 static u_int gtmr_get_timecount(struct timecounter *); 97 98 static struct gtmr_softc gtmr_sc; 99 100 struct gtmr_percpu { 101 uint32_t pc_delta; 102 }; 103 104 static struct timecounter gtmr_timecounter = { 105 .tc_get_timecount = gtmr_get_timecount, 106 .tc_poll_pps = 0, 107 .tc_counter_mask = ~0u, 108 .tc_frequency = 0, /* set by cpu_initclocks() */ 109 .tc_name = NULL, /* set by attach */ 110 .tc_quality = 500, 111 .tc_priv = >mr_sc, 112 .tc_next = NULL, 113 }; 114 115 CFATTACH_DECL_NEW(armgtmr, 0, gtmr_match, gtmr_attach, NULL, NULL); 116 117 /* ARGSUSED */ 118 static int 119 gtmr_match(device_t parent, cfdata_t cf, void *aux) 120 { 121 struct mpcore_attach_args * const mpcaa = aux; 122 123 if (gtmr_sc.sc_dev != NULL) 124 return 0; 125 126 /* Genertic Timer is always implemented in ARMv8-A */ 127 if (!cpu_gtmr_exists_p()) 128 return 0; 129 130 if (strcmp(mpcaa->mpcaa_name, cf->cf_name) != 0) 131 return 0; 132 133 return 1; 134 } 135 136 static void 137 gtmr_attach(device_t parent, device_t self, void *aux) 138 { 139 struct mpcore_attach_args * const mpcaa = aux; 140 struct gtmr_softc *sc = >mr_sc; 141 prop_dictionary_t dict = device_properties(self); 142 char freqbuf[sizeof("X.XXX SHz")]; 143 144 /* 145 * This runs at a fixed frequency of 1 to 50MHz. 146 */ 147 if (!prop_dictionary_get_uint32(dict, "frequency", &sc->sc_freq)) 148 sc->sc_freq = gtmr_cntfrq_read(); 149 150 KASSERT(sc->sc_freq != 0); 151 152 humanize_number(freqbuf, sizeof(freqbuf), sc->sc_freq, "Hz", 1000); 153 154 aprint_naive("\n"); 155 aprint_normal(": ARM Generic Timer (%s)\n", freqbuf); 156 157 /* 158 * Enable the virtual counter to be accessed from usermode. 159 */ 160 gtmr_cntk_ctl_write(gtmr_cntk_ctl_read() | 161 CNTKCTL_PL0VCTEN | CNTKCTL_PL0PCTEN); 162 163 self->dv_private = sc; 164 sc->sc_dev = self; 165 166 #ifdef DIAGNOSTIC 167 sc->sc_percpu = percpu_alloc(sizeof(struct gtmr_percpu)); 168 #endif 169 170 evcnt_attach_dynamic(&sc->sc_ev_missing_ticks, EVCNT_TYPE_MISC, NULL, 171 device_xname(self), "missing interrupts"); 172 173 evcnt_attach_dynamic(>mr_cntv_tval_write_ev, EVCNT_TYPE_MISC, NULL, 174 device_xname(self), "CNTV_TVAL write retry max"); 175 evcnt_attach_dynamic(>mr_cntv_cval_read_ev, EVCNT_TYPE_MISC, NULL, 176 device_xname(self), "CNTV_CVAL read retry max"); 177 evcnt_attach_dynamic(>mr_cntvct_read_ev, EVCNT_TYPE_MISC, NULL, 178 device_xname(self), "CNTVCT read retry max"); 179 180 if (mpcaa->mpcaa_irq != -1) { 181 sc->sc_global_ih = intr_establish(mpcaa->mpcaa_irq, IPL_CLOCK, 182 IST_LEVEL | IST_MPSAFE, gtmr_intr, NULL); 183 if (sc->sc_global_ih == NULL) 184 panic("%s: unable to register timer interrupt", __func__); 185 aprint_normal_dev(self, "interrupting on irq %d\n", 186 mpcaa->mpcaa_irq); 187 } 188 189 const uint32_t cnt_frq = gtmr_cntfrq_read(); 190 if (cnt_frq == 0) { 191 aprint_verbose_dev(self, "cp15 CNT_FRQ not set\n"); 192 } else if (cnt_frq != sc->sc_freq) { 193 aprint_verbose_dev(self, 194 "cp15 CNT_FRQ (%u) differs from supplied frequency\n", 195 cnt_frq); 196 } 197 198 gtmr_timecounter.tc_name = device_xname(sc->sc_dev); 199 gtmr_timecounter.tc_frequency = sc->sc_freq; 200 gtmr_timecounter.tc_priv = sc; 201 202 tc_init(>mr_timecounter); 203 204 /* Disable the timer until we are ready */ 205 gtmr_cntv_ctl_write(0); 206 } 207 208 void 209 gtmr_init_cpu_clock(struct cpu_info *ci) 210 { 211 struct gtmr_softc * const sc = >mr_sc; 212 213 KASSERT(ci == curcpu()); 214 215 int s = splsched(); 216 217 /* 218 * enable timer and stop masking the timer. 219 */ 220 gtmr_cntv_ctl_write(CNTCTL_ENABLE); 221 222 /* 223 * Get now and update the compare timer. 224 */ 225 arm_isb(); 226 ci->ci_lastintr = gtmr_cntvct_stable_read(sc); 227 gtmr_cntv_tval_stable_write(sc, sc->sc_autoinc); 228 splx(s); 229 KASSERT(gtmr_cntvct_read() != 0); 230 } 231 232 void 233 gtmr_cpu_initclocks(void) 234 { 235 struct gtmr_softc * const sc = >mr_sc; 236 237 KASSERT(sc->sc_dev != NULL); 238 KASSERT(sc->sc_freq != 0); 239 240 sc->sc_autoinc = sc->sc_freq / hz; 241 242 gtmr_init_cpu_clock(curcpu()); 243 } 244 245 void 246 gtmr_delay(unsigned int n) 247 { 248 struct gtmr_softc * const sc = >mr_sc; 249 250 KASSERT(sc != NULL); 251 252 uint32_t freq = sc->sc_freq ? sc->sc_freq : gtmr_cntfrq_read(); 253 KASSERT(freq != 0); 254 255 const unsigned int incr_per_us = howmany(freq, 1000000); 256 int64_t ticks = (int64_t)n * incr_per_us; 257 258 arm_isb(); 259 uint64_t last = gtmr_cntvct_stable_read(sc); 260 261 while (ticks > 0) { 262 arm_isb(); 263 uint64_t curr = gtmr_cntvct_stable_read(sc); 264 if (curr >= last) 265 ticks -= (curr - last); 266 else 267 ticks -= (UINT64_MAX - curr + last); 268 last = curr; 269 } 270 } 271 272 /* 273 * gtmr_intr: 274 * 275 * Handle the hardclock interrupt. 276 */ 277 int 278 gtmr_intr(void *arg) 279 { 280 struct cpu_info * const ci = curcpu(); 281 struct clockframe * const cf = arg; 282 struct gtmr_softc * const sc = >mr_sc; 283 284 arm_isb(); 285 286 const uint32_t ctl = gtmr_cntv_ctl_read(); 287 if ((ctl & CNTCTL_ISTATUS) == 0) 288 return 0; 289 290 const uint64_t now = gtmr_cntvct_stable_read(sc); 291 uint64_t delta = now - ci->ci_lastintr; 292 293 #ifdef DIAGNOSTIC 294 const uint64_t then = gtmr_cntv_cval_stable_read(sc); 295 struct gtmr_percpu * const pc = percpu_getref(sc->sc_percpu); 296 KASSERTMSG(then <= now, "%"PRId64, now - then); 297 KASSERTMSG(then + pc->pc_delta >= ci->ci_lastintr + sc->sc_autoinc, 298 "%"PRId64, then + pc->pc_delta - ci->ci_lastintr - sc->sc_autoinc); 299 #endif 300 301 KASSERTMSG(delta > sc->sc_autoinc / 100, 302 "%s: interrupting too quickly (delta=%"PRIu64") autoinc=%lu", 303 ci->ci_data.cpu_name, delta, sc->sc_autoinc); 304 305 /* 306 * If we got interrupted too soon (delta < sc->sc_autoinc) 307 * or we missed (or almost missed) a tick 308 * (delta >= 7 * sc->sc_autoinc / 4), don't try to adjust for jitter. 309 */ 310 if (delta >= sc->sc_autoinc && delta <= 7 * sc->sc_autoinc / 4) { 311 delta -= sc->sc_autoinc; 312 } else { 313 delta = 0; 314 } 315 gtmr_cntv_tval_stable_write(sc, sc->sc_autoinc - delta); 316 317 ci->ci_lastintr = now; 318 319 #ifdef DIAGNOSTIC 320 KASSERT(delta == (uint32_t) delta); 321 pc->pc_delta = delta; 322 percpu_putref(sc->sc_percpu); 323 #endif 324 325 hardclock(cf); 326 327 sc->sc_ev_missing_ticks.ev_count += delta / sc->sc_autoinc; 328 329 return 1; 330 } 331 332 void 333 setstatclockrate(int newhz) 334 { 335 } 336 337 static u_int 338 gtmr_get_timecount(struct timecounter *tc) 339 { 340 struct gtmr_softc * const sc = tc->tc_priv; 341 arm_isb(); // we want the time NOW, not some instructions later. 342 return (u_int) gtmr_cntvct_stable_read(sc); 343 } 344