1 /* $OpenBSD: clock.c,v 1.39 2023/09/17 14:50:51 cheloha Exp $ */ 2 3 /* 4 * Copyright (c) 1998-2003 Michael Shalayeff 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT, 20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 24 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 25 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 26 * THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/param.h> 30 #include <sys/systm.h> 31 #include <sys/kernel.h> 32 #include <sys/clockintr.h> 33 #include <sys/stdint.h> 34 #include <sys/timetc.h> 35 36 #include <dev/clock_subr.h> 37 38 #include <machine/pdc.h> 39 #include <machine/iomod.h> 40 #include <machine/psl.h> 41 #include <machine/intr.h> 42 #include <machine/reg.h> 43 #include <machine/cpufunc.h> 44 #include <machine/autoconf.h> 45 46 uint64_t itmr_nsec_cycle_ratio; 47 uint64_t itmr_nsec_max; 48 49 u_int itmr_get_timecount(struct timecounter *); 50 int itmr_intr(void *); 51 void itmr_rearm(void *, uint64_t); 52 void itmr_trigger(void); 53 void itmr_trigger_masked(void); 54 void itmr_trigger_wrapper(void *); 55 56 struct timecounter itmr_timecounter = { 57 .tc_get_timecount = itmr_get_timecount, 58 .tc_counter_mask = 0xffffffff, 59 .tc_frequency = 0, 60 .tc_name = "itmr", 61 .tc_quality = 0, 62 .tc_priv = NULL, 63 .tc_user = 0, 64 }; 65 66 const struct intrclock itmr_intrclock = { 67 .ic_rearm = itmr_rearm, 68 .ic_trigger = itmr_trigger_wrapper 69 }; 70 71 extern todr_chip_handle_t todr_handle; 72 struct todr_chip_handle pdc_todr; 73 74 int 75 pdc_gettime(struct todr_chip_handle *handle, struct timeval *tv) 76 { 77 struct pdc_tod tod PDC_ALIGNMENT; 78 int error; 79 80 if ((error = pdc_call((iodcio_t)pdc, 1, PDC_TOD, PDC_TOD_READ, 81 &tod, 0, 0, 0, 0, 0))) { 82 printf("clock: failed to fetch (%d)\n", error); 83 return EIO; 84 } 85 86 tv->tv_sec = tod.sec; 87 tv->tv_usec = tod.usec; 88 return 0; 89 } 90 91 int 92 pdc_settime(struct todr_chip_handle *handle, struct timeval *tv) 93 { 94 int error; 95 96 if ((error = pdc_call((iodcio_t)pdc, 1, PDC_TOD, PDC_TOD_WRITE, 97 tv->tv_sec, tv->tv_usec))) { 98 printf("clock: failed to save (%d)\n", error); 99 return EIO; 100 } 101 102 return 0; 103 } 104 105 void 106 cpu_initclocks(void) 107 { 108 uint64_t itmr_freq = PAGE0->mem_10msec * 100; 109 110 pdc_todr.todr_gettime = pdc_gettime; 111 pdc_todr.todr_settime = pdc_settime; 112 todr_handle = &pdc_todr; 113 114 itmr_timecounter.tc_frequency = itmr_freq; 115 tc_init(&itmr_timecounter); 116 117 stathz = hz; 118 profhz = stathz * 10; 119 statclock_is_randomized = 1; 120 121 itmr_nsec_cycle_ratio = itmr_freq * (1ULL << 32) / 1000000000; 122 itmr_nsec_max = UINT64_MAX / itmr_nsec_cycle_ratio; 123 } 124 125 void 126 cpu_startclock(void) 127 { 128 clockintr_cpu_init(&itmr_intrclock); 129 clockintr_trigger(); 130 } 131 132 int 133 itmr_intr(void *v) 134 { 135 clockintr_dispatch(v); 136 return (1); 137 } 138 139 void 140 setstatclockrate(int newhz) 141 { 142 } 143 144 u_int 145 itmr_get_timecount(struct timecounter *tc) 146 { 147 u_long __itmr; 148 149 mfctl(CR_ITMR, __itmr); 150 return (__itmr); 151 } 152 153 /* 154 * Program the next clock interrupt, making sure it will 155 * indeed happen in the future. This is done with interrupts 156 * disabled to avoid a possible race. 157 */ 158 void 159 itmr_rearm(void *unused, uint64_t nsecs) 160 { 161 uint32_t cycles, t0, t1; 162 register_t eiem, eirr; 163 164 if (nsecs > itmr_nsec_max) 165 nsecs = itmr_nsec_max; 166 cycles = (nsecs * itmr_nsec_cycle_ratio) >> 32; 167 168 eiem = hppa_intr_disable(); 169 mfctl(CR_ITMR, t0); 170 mtctl(t0 + cycles, CR_ITMR); 171 mfctl(CR_ITMR, t1); 172 mfctl(CR_EIRR, eirr); 173 174 /* 175 * If at least "cycles" ITMR ticks have elapsed and the interrupt 176 * isn't pending, we missed. Fall back to itmr_trigger_masked(). 177 */ 178 if (cycles <= t1 - t0) { 179 if (!ISSET(eirr, 1U << 31)) 180 itmr_trigger_masked(); 181 } 182 hppa_intr_enable(eiem); 183 } 184 185 void 186 itmr_trigger(void) 187 { 188 register_t eiem; 189 190 eiem = hppa_intr_disable(); 191 itmr_trigger_masked(); 192 hppa_intr_enable(eiem); 193 } 194 195 /* Trigger our own ITMR interrupt by setting EIR{0}. */ 196 void 197 itmr_trigger_masked(void) 198 { 199 struct iomod *cpu = (struct iomod *)curcpu()->ci_hpa; 200 201 cpu->io_eir = 0; 202 __asm volatile ("sync" ::: "memory"); 203 } 204 205 void 206 itmr_trigger_wrapper(void *unused) 207 { 208 itmr_trigger(); 209 } 210