1 /* $NetBSD: kern_cctr.c,v 1.14 2023/10/05 12:05:59 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2020 Jason R. Thorpe
5 * Copyright (c) 2018 Naruaki Etomi
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*
30 * Most of the following was adapted from the Linux/ia64 cycle counter
31 * synchronization algorithm:
32 *
33 * IA-64 Linux Kernel: Design and Implementation p356-p361
34 * (Hewlett-Packard Professional Books)
35 *
36 * Here's a rough description of how it works.
37 *
38 * The primary CPU is the reference monotonic counter. Each secondary
39 * CPU is responsible for knowing the offset of its own cycle counter
40 * relative to the primary's. When the time counter is read, the CC
41 * value is adjusted by this delta.
42 *
43 * Calibration happens periodically, and works like this:
44 *
45 * Secondary CPU Primary CPU
46 * Send IPI to publish reference CC
47 * --------->
48 * Indicate Primary Ready
49 * <----------------------------
50 * T0 = local CC
51 * Indicate Secondary Ready
52 * ----------------->
53 * (assume this happens at Tavg) Publish reference CC
54 * Indicate completion
55 * <------------------------
56 * Notice completion
57 * T1 = local CC
58 *
59 * Tavg = (T0 + T1) / 2
60 *
61 * Delta = Tavg - Published primary CC value
62 *
63 * "Notice completion" is performed by waiting for the primary to set
64 * the calibration state to FINISHED. This is a little unfortunate,
65 * because T0->Tavg involves a single store-release on the secondary, and
66 * Tavg->T1 involves a store-relaxed and a store-release. It would be
67 * better to simply wait for the reference CC to transition from 0 to
68 * non-0 (i.e. just wait for a single store-release from Tavg->T1), but
69 * if the cycle counter just happened to read back as 0 at that instant,
70 * we would never break out of the loop.
71 *
72 * We trigger calibration roughly once a second; the period is actually
73 * skewed based on the CPU index in order to avoid lock contention. The
74 * calibration interval does not need to be precise, and so this is fine.
75 */
76
77 #include <sys/cdefs.h>
78 __KERNEL_RCSID(0, "$NetBSD: kern_cctr.c,v 1.14 2023/10/05 12:05:59 riastradh Exp $");
79
80 #include <sys/param.h>
81 #include <sys/atomic.h>
82 #include <sys/systm.h>
83 #include <sys/sysctl.h>
84 #include <sys/timepps.h>
85 #include <sys/time.h>
86 #include <sys/timetc.h>
87 #include <sys/kernel.h>
88 #include <sys/power.h>
89 #include <sys/cpu.h>
90 #include <machine/cpu_counter.h>
91
92 /* XXX make cc_timecounter.tc_frequency settable by sysctl() */
93
94 #if defined(MULTIPROCESSOR)
95 static uint32_t cc_primary __cacheline_aligned;
96 static uint32_t cc_calibration_state __cacheline_aligned;
97 static kmutex_t cc_calibration_lock __cacheline_aligned;
98
99 #define CC_CAL_START 0 /* initial state */
100 #define CC_CAL_PRIMARY_READY 1 /* primary CPU ready to respond */
101 #define CC_CAL_SECONDARY_READY 2 /* secondary CPU ready to receive */
102 #define CC_CAL_FINISHED 3 /* calibration attempt complete */
103 #endif /* MULTIPROCESSOR */
104
105 static struct timecounter cc_timecounter = {
106 .tc_get_timecount = cc_get_timecount,
107 .tc_poll_pps = NULL,
108 .tc_counter_mask = ~0u,
109 .tc_frequency = 0,
110 .tc_name = "unknown cycle counter",
111 /*
112 * don't pick cycle counter automatically
113 * if frequency changes might affect cycle counter
114 */
115 .tc_quality = -100000,
116
117 .tc_priv = NULL,
118 .tc_next = NULL
119 };
120
121 /*
122 * Initialize cycle counter based timecounter. This must be done on the
123 * primary CPU.
124 */
125 struct timecounter *
cc_init(timecounter_get_t getcc,uint64_t freq,const char * name,int quality)126 cc_init(timecounter_get_t getcc, uint64_t freq, const char *name, int quality)
127 {
128 static bool cc_init_done __diagused;
129 struct cpu_info * const ci = curcpu();
130
131 KASSERT(!cc_init_done);
132 KASSERT(cold);
133 KASSERT(CPU_IS_PRIMARY(ci));
134
135 #if defined(MULTIPROCESSOR)
136 mutex_init(&cc_calibration_lock, MUTEX_DEFAULT, IPL_HIGH);
137 #endif
138
139 cc_init_done = true;
140
141 ci->ci_cc.cc_delta = 0;
142 ci->ci_cc.cc_ticks = 0;
143 ci->ci_cc.cc_cal_ticks = 0;
144
145 if (getcc != NULL)
146 cc_timecounter.tc_get_timecount = getcc;
147
148 cc_timecounter.tc_frequency = freq;
149 cc_timecounter.tc_name = name;
150 cc_timecounter.tc_quality = quality;
151 tc_init(&cc_timecounter);
152
153 return &cc_timecounter;
154 }
155
156 /*
157 * Initialize cycle counter timecounter calibration data on a secondary
158 * CPU. Must be called on that secondary CPU.
159 */
160 void
cc_init_secondary(struct cpu_info * const ci)161 cc_init_secondary(struct cpu_info * const ci)
162 {
163 KASSERT(!CPU_IS_PRIMARY(curcpu()));
164 KASSERT(ci == curcpu());
165
166 ci->ci_cc.cc_ticks = 0;
167
168 /*
169 * It's not critical that calibration be performed in
170 * precise intervals, so skew when calibration is done
171 * on each secondary CPU based on it's CPU index to
172 * avoid contending on the calibration lock.
173 */
174 ci->ci_cc.cc_cal_ticks = hz - cpu_index(ci);
175 KASSERT(ci->ci_cc.cc_cal_ticks);
176
177 cc_calibrate_cpu(ci);
178 }
179
180 /*
181 * pick up tick count scaled to reference tick count
182 */
183 u_int
cc_get_timecount(struct timecounter * tc)184 cc_get_timecount(struct timecounter *tc)
185 {
186 #if defined(MULTIPROCESSOR)
187 int64_t rcc;
188 long pctr;
189
190 do {
191 pctr = lwp_pctr();
192 /* N.B. the delta is always 0 on the primary. */
193 rcc = cpu_counter32() - curcpu()->ci_cc.cc_delta;
194 } while (pctr != lwp_pctr());
195
196 return rcc;
197 #else
198 return cpu_counter32();
199 #endif /* MULTIPROCESSOR */
200 }
201
202 #if defined(MULTIPROCESSOR)
203 static inline bool
cc_get_delta(struct cpu_info * const ci)204 cc_get_delta(struct cpu_info * const ci)
205 {
206 int64_t t0, t1, tcenter = 0;
207
208 t0 = cpu_counter32();
209
210 atomic_store_release(&cc_calibration_state, CC_CAL_SECONDARY_READY);
211
212 for (;;) {
213 if (atomic_load_acquire(&cc_calibration_state) ==
214 CC_CAL_FINISHED) {
215 break;
216 }
217 }
218
219 t1 = cpu_counter32();
220
221 if (t1 < t0) {
222 /* Overflow! */
223 return false;
224 }
225
226 /* average t0 and t1 without overflow: */
227 tcenter = (t0 >> 1) + (t1 >> 1);
228 if ((t0 & 1) + (t1 & 1) == 2)
229 tcenter++;
230
231 ci->ci_cc.cc_delta = tcenter - cc_primary;
232
233 return true;
234 }
235 #endif /* MULTIPROCESSOR */
236
237 /*
238 * Called on secondary CPUs to calibrate their cycle counter offset
239 * relative to the primary CPU.
240 */
241 void
cc_calibrate_cpu(struct cpu_info * const ci)242 cc_calibrate_cpu(struct cpu_info * const ci)
243 {
244 #if defined(MULTIPROCESSOR)
245 KASSERT(!CPU_IS_PRIMARY(ci));
246
247 mutex_spin_enter(&cc_calibration_lock);
248
249 retry:
250 atomic_store_release(&cc_calibration_state, CC_CAL_START);
251
252 /* Trigger primary CPU. */
253 cc_get_primary_cc();
254
255 for (;;) {
256 if (atomic_load_acquire(&cc_calibration_state) ==
257 CC_CAL_PRIMARY_READY) {
258 break;
259 }
260 }
261
262 if (! cc_get_delta(ci)) {
263 goto retry;
264 }
265
266 mutex_exit(&cc_calibration_lock);
267 #endif /* MULTIPROCESSOR */
268 }
269
270 void
cc_primary_cc(void)271 cc_primary_cc(void)
272 {
273 #if defined(MULTIPROCESSOR)
274 /* N.B. We expect all interrupts to be blocked. */
275
276 atomic_store_release(&cc_calibration_state, CC_CAL_PRIMARY_READY);
277
278 for (;;) {
279 if (atomic_load_acquire(&cc_calibration_state) ==
280 CC_CAL_SECONDARY_READY) {
281 break;
282 }
283 }
284
285 cc_primary = cpu_counter32();
286 atomic_store_release(&cc_calibration_state, CC_CAL_FINISHED);
287 #endif /* MULTIPROCESSOR */
288 }
289