1*fadfc777Srin /* $NetBSD: gtmr.c,v 1.50 2025/01/09 06:55:25 rin Exp $ */ 299884fb5Smatt 399884fb5Smatt /*- 499884fb5Smatt * Copyright (c) 2012 The NetBSD Foundation, Inc. 599884fb5Smatt * All rights reserved. 699884fb5Smatt * 799884fb5Smatt * This code is derived from software contributed to The NetBSD Foundation 899884fb5Smatt * by Matt Thomas 999884fb5Smatt * 1099884fb5Smatt * Redistribution and use in source and binary forms, with or without 1199884fb5Smatt * modification, are permitted provided that the following conditions 1299884fb5Smatt * are met: 1399884fb5Smatt * 1. Redistributions of source code must retain the above copyright 1499884fb5Smatt * notice, this list of conditions and the following disclaimer. 1599884fb5Smatt * 2. Redistributions in binary form must reproduce the above copyright 1699884fb5Smatt * notice, this list of conditions and the following disclaimer in the 1799884fb5Smatt * documentation and/or other materials provided with the distribution. 1899884fb5Smatt * 1999884fb5Smatt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 2099884fb5Smatt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 2199884fb5Smatt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 2299884fb5Smatt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 2399884fb5Smatt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 2499884fb5Smatt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 2599884fb5Smatt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 2699884fb5Smatt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 2799884fb5Smatt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 2899884fb5Smatt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 2999884fb5Smatt * POSSIBILITY OF SUCH DAMAGE. 3099884fb5Smatt */ 3199884fb5Smatt 3299884fb5Smatt #include <sys/cdefs.h> 33*fadfc777Srin __KERNEL_RCSID(0, "$NetBSD: gtmr.c,v 1.50 2025/01/09 06:55:25 rin Exp $"); 3499884fb5Smatt 3599884fb5Smatt #include <sys/param.h> 3699884fb5Smatt #include <sys/bus.h> 3799884fb5Smatt #include <sys/device.h> 3899884fb5Smatt #include <sys/intr.h> 3999884fb5Smatt #include <sys/kernel.h> 40*fadfc777Srin #include <sys/lock.h> 41aafdd978Smatt #include <sys/percpu.h> 4299884fb5Smatt #include <sys/proc.h> 4399884fb5Smatt #include <sys/systm.h> 4499884fb5Smatt #include <sys/timetc.h> 453da66420Sjmcneill #include <sys/cpu.h> 4699884fb5Smatt 4799884fb5Smatt #include <prop/proplib.h> 4899884fb5Smatt 49d685920aSmatt #include <arm/locore.h> 50fe33aa27Sryo #include <arm/cpufunc.h> 51d685920aSmatt 5299884fb5Smatt #include <arm/cortex/gtmr_var.h> 5399884fb5Smatt #include <arm/cortex/mpcore_var.h> 5499884fb5Smatt 5599884fb5Smatt static int gtmr_match(device_t, cfdata_t, void *); 5699884fb5Smatt static void gtmr_attach(device_t, device_t, void *); 5799884fb5Smatt 5899884fb5Smatt static u_int gtmr_get_timecount(struct timecounter *); 5999884fb5Smatt 6026039c88Sjmcneill static uint64_t gtmr_read_cntct(struct gtmr_softc *); 6126039c88Sjmcneill static uint32_t gtmr_read_ctl(struct gtmr_softc *); 6226039c88Sjmcneill static void gtmr_write_ctl(struct gtmr_softc *, uint32_t); 6326039c88Sjmcneill static void gtmr_write_tval(struct gtmr_softc *, uint32_t); 6426039c88Sjmcneill static void gtmr_write_cval(struct gtmr_softc *, uint64_t); 6526039c88Sjmcneill 6699884fb5Smatt static struct gtmr_softc gtmr_sc; 6799884fb5Smatt 68aafdd978Smatt struct gtmr_percpu { 69aafdd978Smatt uint32_t pc_delta; 70aafdd978Smatt }; 71aafdd978Smatt 7299884fb5Smatt static struct timecounter gtmr_timecounter = { 7399884fb5Smatt .tc_get_timecount = gtmr_get_timecount, 7499884fb5Smatt .tc_poll_pps = 0, 7599884fb5Smatt .tc_counter_mask = ~0u, 7699884fb5Smatt .tc_frequency = 0, /* set by cpu_initclocks() */ 7799884fb5Smatt .tc_name = NULL, /* set by attach */ 7899884fb5Smatt .tc_quality = 500, 7999884fb5Smatt .tc_priv = >mr_sc, 8099884fb5Smatt .tc_next = NULL, 8199884fb5Smatt }; 8299884fb5Smatt 8399884fb5Smatt CFATTACH_DECL_NEW(armgtmr, 0, gtmr_match, gtmr_attach, NULL, NULL); 8499884fb5Smatt 8599884fb5Smatt /* ARGSUSED */ 8699884fb5Smatt static int 8799884fb5Smatt gtmr_match(device_t parent, cfdata_t cf, void *aux) 8899884fb5Smatt { 8999884fb5Smatt struct mpcore_attach_args * const mpcaa = aux; 9099884fb5Smatt 9199884fb5Smatt if (gtmr_sc.sc_dev != NULL) 9299884fb5Smatt return 0; 9399884fb5Smatt 946c0c5f0bSskrll /* Generic Timer is always implemented in ARMv8-A */ 95fe33aa27Sryo if (!cpu_gtmr_exists_p()) 9699884fb5Smatt return 0; 9799884fb5Smatt 9899884fb5Smatt if (strcmp(mpcaa->mpcaa_name, cf->cf_name) != 0) 9999884fb5Smatt return 0; 10099884fb5Smatt 10199884fb5Smatt return 1; 10299884fb5Smatt } 10399884fb5Smatt 10499884fb5Smatt static void 10599884fb5Smatt gtmr_attach(device_t parent, device_t self, void *aux) 10699884fb5Smatt { 1071f9df332Sskrll struct mpcore_attach_args * const mpcaa = aux; 10899884fb5Smatt struct gtmr_softc *sc = >mr_sc; 10982643090Smatt prop_dictionary_t dict = device_properties(self); 11026039c88Sjmcneill prop_dictionary_t pdict = device_properties(device_parent(self)); 11182643090Smatt char freqbuf[sizeof("X.XXX SHz")]; 1121f52e289Sjmcneill bool flag; 11399884fb5Smatt 11499884fb5Smatt /* 11599884fb5Smatt * This runs at a fixed frequency of 1 to 50MHz. 11699884fb5Smatt */ 117439bf4ccSjmcneill if (!prop_dictionary_get_uint32(dict, "frequency", &sc->sc_freq)) 118fe33aa27Sryo sc->sc_freq = gtmr_cntfrq_read(); 119439bf4ccSjmcneill 12026039c88Sjmcneill if (!prop_dictionary_get_bool(dict, "physical", &sc->sc_physical)) 12126039c88Sjmcneill prop_dictionary_get_bool(pdict, "physical", &sc->sc_physical); 12226039c88Sjmcneill 123ac976f3dSmatt KASSERT(sc->sc_freq != 0); 12499884fb5Smatt 12599884fb5Smatt humanize_number(freqbuf, sizeof(freqbuf), sc->sc_freq, "Hz", 1000); 12699884fb5Smatt 12799884fb5Smatt aprint_naive("\n"); 12826039c88Sjmcneill aprint_normal(": Generic Timer (%s, %s)\n", freqbuf, 12926039c88Sjmcneill sc->sc_physical ? "physical" : "virtual"); 13099884fb5Smatt 131725c469aSjmcneill #if defined(__arm__) 132725c469aSjmcneill if (prop_dictionary_get_bool(dict, "arm,cpu-registers-not-fw-configured", &flag) && flag) { 133725c469aSjmcneill sc->sc_flags |= GTMR_FLAG_CPU_REGISTERS_NOT_FW_CONFIGURED; 134725c469aSjmcneill aprint_debug_dev(self, "CPU registers not initialized by firmware\n"); 135725c469aSjmcneill } 136725c469aSjmcneill #endif 137725c469aSjmcneill 1381f52e289Sjmcneill if (prop_dictionary_get_bool(dict, "sun50i-a64-unstable-timer", &flag) && flag) { 1391f52e289Sjmcneill sc->sc_flags |= GTMR_FLAG_SUN50I_A64_UNSTABLE_TIMER; 1401f52e289Sjmcneill aprint_debug_dev(self, "enabling Allwinner A64 timer workaround\n"); 1411f52e289Sjmcneill } 1421f52e289Sjmcneill 1432e6fd77aSriastradh device_set_private(self, sc); 14499884fb5Smatt sc->sc_dev = self; 14599884fb5Smatt 146aafdd978Smatt #ifdef DIAGNOSTIC 147aafdd978Smatt sc->sc_percpu = percpu_alloc(sizeof(struct gtmr_percpu)); 148aafdd978Smatt #endif 149aafdd978Smatt 15099884fb5Smatt evcnt_attach_dynamic(&sc->sc_ev_missing_ticks, EVCNT_TYPE_MISC, NULL, 15199884fb5Smatt device_xname(self), "missing interrupts"); 15299884fb5Smatt 153f0ae4fbdSskrll if (mpcaa->mpcaa_irq != -1) { 1541f9df332Sskrll sc->sc_global_ih = intr_establish(mpcaa->mpcaa_irq, IPL_CLOCK, 1558e9315c8Smatt IST_LEVEL | IST_MPSAFE, gtmr_intr, NULL); 15699884fb5Smatt if (sc->sc_global_ih == NULL) 15799884fb5Smatt panic("%s: unable to register timer interrupt", __func__); 158aafdd978Smatt aprint_normal_dev(self, "interrupting on irq %d\n", 1591f9df332Sskrll mpcaa->mpcaa_irq); 160f0ae4fbdSskrll } 161aafdd978Smatt 162fe33aa27Sryo const uint32_t cnt_frq = gtmr_cntfrq_read(); 163aafdd978Smatt if (cnt_frq == 0) { 164aafdd978Smatt aprint_verbose_dev(self, "cp15 CNT_FRQ not set\n"); 165aafdd978Smatt } else if (cnt_frq != sc->sc_freq) { 166aafdd978Smatt aprint_verbose_dev(self, 167aafdd978Smatt "cp15 CNT_FRQ (%u) differs from supplied frequency\n", 168aafdd978Smatt cnt_frq); 169aafdd978Smatt } 170d8529047Smatt 171d8529047Smatt gtmr_timecounter.tc_name = device_xname(sc->sc_dev); 172d8529047Smatt gtmr_timecounter.tc_frequency = sc->sc_freq; 173c6d1ef8aSjoerg gtmr_timecounter.tc_priv = sc; 174d8529047Smatt 175d8529047Smatt tc_init(>mr_timecounter); 176886d3492Sjmcneill 177886d3492Sjmcneill /* Disable the timer until we are ready */ 17826039c88Sjmcneill gtmr_write_ctl(sc, 0); 17999884fb5Smatt } 18099884fb5Smatt 1811f52e289Sjmcneill static uint64_t 18226039c88Sjmcneill gtmr_read_cntct(struct gtmr_softc *sc) 1831f52e289Sjmcneill { 184ce993bccSskrll isb(); 18526039c88Sjmcneill 1861f52e289Sjmcneill if (ISSET(sc->sc_flags, GTMR_FLAG_SUN50I_A64_UNSTABLE_TIMER)) { 1871f52e289Sjmcneill /* 1881f52e289Sjmcneill * The Allwinner A64 SoC has an unstable architectural timer. 1891f52e289Sjmcneill * To workaround this problem, ignore reads where the lower 1902d4afb62Sjmcneill * 10 bits are all 0s or 1s. 1911f52e289Sjmcneill */ 1921f52e289Sjmcneill uint64_t val; 1931f52e289Sjmcneill u_int bits; 1941f52e289Sjmcneill do { 19526039c88Sjmcneill val = sc->sc_physical ? gtmr_cntpct_read() : gtmr_cntvct_read(); 1962760bc42Sjmcneill bits = val & __BITS(9,0); 1972760bc42Sjmcneill } while (bits == 0 || bits == __BITS(9,0)); 1981f52e289Sjmcneill return val; 1991f52e289Sjmcneill } 2001f52e289Sjmcneill 20126039c88Sjmcneill return sc->sc_physical ? gtmr_cntpct_read() : gtmr_cntvct_read(); 2021f52e289Sjmcneill } 2031f52e289Sjmcneill 20426039c88Sjmcneill static uint32_t 20526039c88Sjmcneill gtmr_read_ctl(struct gtmr_softc *sc) 20626039c88Sjmcneill { 2073da66420Sjmcneill isb(); 2083da66420Sjmcneill 20926039c88Sjmcneill if (sc->sc_physical) 21026039c88Sjmcneill return gtmr_cntp_ctl_read(); 21126039c88Sjmcneill else 21226039c88Sjmcneill return gtmr_cntv_ctl_read(); 21326039c88Sjmcneill } 21426039c88Sjmcneill 21526039c88Sjmcneill static void 21626039c88Sjmcneill gtmr_write_ctl(struct gtmr_softc *sc, uint32_t val) 21726039c88Sjmcneill { 21826039c88Sjmcneill if (sc->sc_physical) 21926039c88Sjmcneill gtmr_cntp_ctl_write(val); 22026039c88Sjmcneill else 22126039c88Sjmcneill gtmr_cntv_ctl_write(val); 22226039c88Sjmcneill 223ce993bccSskrll isb(); 22426039c88Sjmcneill } 22526039c88Sjmcneill 22626039c88Sjmcneill static void 22726039c88Sjmcneill gtmr_write_tval(struct gtmr_softc *sc, uint32_t val) 22826039c88Sjmcneill { 22926039c88Sjmcneill if (sc->sc_physical) 23026039c88Sjmcneill gtmr_cntp_tval_write(val); 23126039c88Sjmcneill else 23226039c88Sjmcneill gtmr_cntv_tval_write(val); 23326039c88Sjmcneill 234ce993bccSskrll isb(); 23526039c88Sjmcneill } 23626039c88Sjmcneill 23726039c88Sjmcneill static void 23826039c88Sjmcneill gtmr_write_cval(struct gtmr_softc *sc, uint64_t val) 23926039c88Sjmcneill { 24026039c88Sjmcneill if (sc->sc_physical) 24126039c88Sjmcneill gtmr_cntp_cval_write(val); 24226039c88Sjmcneill else 24326039c88Sjmcneill gtmr_cntv_cval_write(val); 24426039c88Sjmcneill 245ce993bccSskrll isb(); 24626039c88Sjmcneill } 24726039c88Sjmcneill 24826039c88Sjmcneill 24999884fb5Smatt void 25099884fb5Smatt gtmr_init_cpu_clock(struct cpu_info *ci) 25199884fb5Smatt { 25299884fb5Smatt struct gtmr_softc * const sc = >mr_sc; 2533da66420Sjmcneill uint32_t cntk; 2543da66420Sjmcneill uint64_t ctl; 25599884fb5Smatt 25699884fb5Smatt KASSERT(ci == curcpu()); 25799884fb5Smatt 258e677f16fSskrll /* XXX hmm... called from cpu_hatch which hasn't lowered ipl yet */ 259bee3a2d7Smatt int s = splsched(); 260d8529047Smatt 26113d02103Sjakllsch #if defined(__arm__) 262725c469aSjmcneill if ((sc->sc_flags & GTMR_FLAG_CPU_REGISTERS_NOT_FW_CONFIGURED) != 0) { 263725c469aSjmcneill armreg_cnt_frq_write(sc->sc_freq); 264725c469aSjmcneill } 26513d02103Sjakllsch #endif 266725c469aSjmcneill 26799884fb5Smatt /* 268c72bbf7bSskrll * Allow the virtual and physical counters to be accessed from 269c72bbf7bSskrll * usermode. (PL0) 270c72bbf7bSskrll */ 2713da66420Sjmcneill cntk = gtmr_cntk_ctl_read(); 2723da66420Sjmcneill cntk &= ~(CNTKCTL_PL0PTEN | CNTKCTL_PL0VTEN | CNTKCTL_EVNTEN); 27326039c88Sjmcneill if (sc->sc_physical) { 2743da66420Sjmcneill cntk |= CNTKCTL_PL0PCTEN; 2753da66420Sjmcneill cntk &= ~CNTKCTL_PL0VCTEN; 27626039c88Sjmcneill } else { 2773da66420Sjmcneill cntk |= CNTKCTL_PL0VCTEN; 2783da66420Sjmcneill cntk &= ~CNTKCTL_PL0PCTEN; 27926039c88Sjmcneill } 2803da66420Sjmcneill gtmr_cntk_ctl_write(cntk); 281ce993bccSskrll isb(); 282c72bbf7bSskrll 283c72bbf7bSskrll /* 28499884fb5Smatt * enable timer and stop masking the timer. 28599884fb5Smatt */ 2863da66420Sjmcneill ctl = gtmr_read_ctl(sc); 2873da66420Sjmcneill ctl &= ~CNTCTL_IMASK; 2883da66420Sjmcneill ctl |= CNTCTL_ENABLE; 2893da66420Sjmcneill gtmr_write_ctl(sc, ctl); 290bee3a2d7Smatt 291bee3a2d7Smatt /* 292bee3a2d7Smatt * Get now and update the compare timer. 293bee3a2d7Smatt */ 29426039c88Sjmcneill ci->ci_lastintr = gtmr_read_cntct(sc); 29526039c88Sjmcneill gtmr_write_tval(sc, sc->sc_autoinc); 2963da66420Sjmcneill 297bee3a2d7Smatt splx(s); 2983da66420Sjmcneill 29926039c88Sjmcneill KASSERT(gtmr_read_cntct(sc) != 0); 30099884fb5Smatt } 30199884fb5Smatt 30299884fb5Smatt void 3038ae98764Sjmcneill gtmr_cpu_initclocks(void) 30499884fb5Smatt { 30599884fb5Smatt struct gtmr_softc * const sc = >mr_sc; 30699884fb5Smatt 30799884fb5Smatt KASSERT(sc->sc_dev != NULL); 30899884fb5Smatt KASSERT(sc->sc_freq != 0); 30999884fb5Smatt 31099884fb5Smatt sc->sc_autoinc = sc->sc_freq / hz; 31199884fb5Smatt 31299884fb5Smatt gtmr_init_cpu_clock(curcpu()); 31399884fb5Smatt } 31499884fb5Smatt 31599884fb5Smatt void 31699884fb5Smatt gtmr_delay(unsigned int n) 31799884fb5Smatt { 31899884fb5Smatt struct gtmr_softc * const sc = >mr_sc; 31999884fb5Smatt 32099884fb5Smatt KASSERT(sc != NULL); 32199884fb5Smatt 322fe33aa27Sryo uint32_t freq = sc->sc_freq ? sc->sc_freq : gtmr_cntfrq_read(); 32399884fb5Smatt KASSERT(freq != 0); 32499884fb5Smatt 325446537a1Sskrll const unsigned int incr_per_us = howmany(freq, 1000000); 326a1732b7cSjmcneill int64_t ticks = (int64_t)n * incr_per_us; 32799884fb5Smatt 32826039c88Sjmcneill uint64_t last = gtmr_read_cntct(sc); 32999884fb5Smatt 330a1732b7cSjmcneill while (ticks > 0) { 331*fadfc777Srin SPINLOCK_BACKOFF_HOOK; 33226039c88Sjmcneill uint64_t curr = gtmr_read_cntct(sc); 333255e8f79Sryo if (curr >= last) 334a1732b7cSjmcneill ticks -= (curr - last); 335446537a1Sskrll else 336a1732b7cSjmcneill ticks -= (UINT64_MAX - curr + last); 337446537a1Sskrll last = curr; 33899884fb5Smatt } 33999884fb5Smatt } 34099884fb5Smatt 34199884fb5Smatt /* 342b5ce4e7fSmatt * gtmr_intr: 34399884fb5Smatt * 34499884fb5Smatt * Handle the hardclock interrupt. 34599884fb5Smatt */ 3461f9df332Sskrll int 347b5ce4e7fSmatt gtmr_intr(void *arg) 34899884fb5Smatt { 349aafdd978Smatt struct cpu_info * const ci = curcpu(); 35099884fb5Smatt struct clockframe * const cf = arg; 35199884fb5Smatt struct gtmr_softc * const sc = >mr_sc; 352a1ad7226Sjmcneill 3539e79ad34Sjmcneill const uint32_t ctl = gtmr_read_ctl(sc); 3549e79ad34Sjmcneill if ((ctl & (CNTCTL_ENABLE|CNTCTL_ISTATUS)) != (CNTCTL_ENABLE|CNTCTL_ISTATUS)) { 3559e79ad34Sjmcneill aprint_debug_dev(ci->ci_dev, "spurious timer interrupt (ctl=%#x)\n", ctl); 3569e79ad34Sjmcneill return 0; 3579e79ad34Sjmcneill } 3589e79ad34Sjmcneill 35926039c88Sjmcneill const uint64_t now = gtmr_read_cntct(sc); 360d685920aSmatt uint64_t delta = now - ci->ci_lastintr; 361d685920aSmatt 362bee3a2d7Smatt #ifdef DIAGNOSTIC 363d442e9f0Sjmcneill struct gtmr_percpu *pc = NULL; 364d442e9f0Sjmcneill if (!ISSET(sc->sc_flags, GTMR_FLAG_SUN50I_A64_UNSTABLE_TIMER)) { 36526039c88Sjmcneill const uint64_t then = sc->sc_physical ? gtmr_cntp_cval_read() : gtmr_cntv_cval_read(); 366d442e9f0Sjmcneill pc = percpu_getref(sc->sc_percpu); 367bee3a2d7Smatt KASSERTMSG(then <= now, "%"PRId64, now - then); 368aafdd978Smatt KASSERTMSG(then + pc->pc_delta >= ci->ci_lastintr + sc->sc_autoinc, 369aafdd978Smatt "%"PRId64, then + pc->pc_delta - ci->ci_lastintr - sc->sc_autoinc); 370d442e9f0Sjmcneill } 371bee3a2d7Smatt #endif 372bee3a2d7Smatt 3738ae4ffc8Sjmcneill if (!ISSET(sc->sc_flags, GTMR_FLAG_SUN50I_A64_UNSTABLE_TIMER)) { 37499884fb5Smatt KASSERTMSG(delta > sc->sc_autoinc / 100, 375bee3a2d7Smatt "%s: interrupting too quickly (delta=%"PRIu64") autoinc=%lu", 376bee3a2d7Smatt ci->ci_data.cpu_name, delta, sc->sc_autoinc); 3778ae4ffc8Sjmcneill } 37899884fb5Smatt 37999884fb5Smatt /* 380ae5ac4a7Smatt * If we got interrupted too soon (delta < sc->sc_autoinc) 381ae5ac4a7Smatt * or we missed (or almost missed) a tick 382ae5ac4a7Smatt * (delta >= 7 * sc->sc_autoinc / 4), don't try to adjust for jitter. 38399884fb5Smatt */ 384ae5ac4a7Smatt if (delta >= sc->sc_autoinc && delta <= 7 * sc->sc_autoinc / 4) { 385bee3a2d7Smatt delta -= sc->sc_autoinc; 386ae5ac4a7Smatt } else { 38799884fb5Smatt delta = 0; 38899884fb5Smatt } 3892760bc42Sjmcneill 390ce993bccSskrll isb(); 3912760bc42Sjmcneill if (ISSET(sc->sc_flags, GTMR_FLAG_SUN50I_A64_UNSTABLE_TIMER)) { 39226039c88Sjmcneill gtmr_write_cval(sc, now + sc->sc_autoinc - delta); 3932760bc42Sjmcneill } else { 39426039c88Sjmcneill gtmr_write_tval(sc, sc->sc_autoinc - delta); 3952760bc42Sjmcneill } 39699884fb5Smatt 397aafdd978Smatt ci->ci_lastintr = now; 398aafdd978Smatt 399aafdd978Smatt #ifdef DIAGNOSTIC 400d442e9f0Sjmcneill if (!ISSET(sc->sc_flags, GTMR_FLAG_SUN50I_A64_UNSTABLE_TIMER)) { 401aafdd978Smatt KASSERT(delta == (uint32_t) delta); 402aafdd978Smatt pc->pc_delta = delta; 403aafdd978Smatt percpu_putref(sc->sc_percpu); 404d442e9f0Sjmcneill } 405aafdd978Smatt #endif 40699884fb5Smatt 40799884fb5Smatt hardclock(cf); 40899884fb5Smatt 409aafdd978Smatt sc->sc_ev_missing_ticks.ev_count += delta / sc->sc_autoinc; 41099884fb5Smatt 41199884fb5Smatt return 1; 41299884fb5Smatt } 41399884fb5Smatt 41499884fb5Smatt void 41599884fb5Smatt setstatclockrate(int newhz) 41699884fb5Smatt { 41799884fb5Smatt } 41899884fb5Smatt 41999884fb5Smatt static u_int 42099884fb5Smatt gtmr_get_timecount(struct timecounter *tc) 42199884fb5Smatt { 422c6d1ef8aSjoerg struct gtmr_softc * const sc = tc->tc_priv; 42326039c88Sjmcneill 42426039c88Sjmcneill return (u_int) gtmr_read_cntct(sc); 42599884fb5Smatt } 426