1 /* $NetBSD: cpu_subr.c,v 1.5 2024/05/03 07:24:31 skrll Exp $ */
2
3 /*-
4 * Copyright (c) 2020 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nick Hudson
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include "opt_ddb.h"
33 #include "opt_multiprocessor.h"
34 #include "opt_riscv_debug.h"
35
36 #include <sys/cdefs.h>
37 __KERNEL_RCSID(0, "$NetBSD: cpu_subr.c,v 1.5 2024/05/03 07:24:31 skrll Exp $");
38
39 #include <sys/param.h>
40 #include <sys/atomic.h>
41 #include <sys/cpu.h>
42 #include <sys/kernel.h>
43 #include <sys/reboot.h>
44 #include <sys/xcall.h>
45
46 #include <machine/db_machdep.h>
47 #include <machine/sbi.h>
48
49 #ifdef DDB
50 #include <ddb/db_output.h>
51 #endif
52
53 #ifdef VERBOSE_INIT_RISCV
54 #define VPRINTF(...) printf(__VA_ARGS__)
55 #else
56 #define VPRINTF(...) __nothing
57 #endif
58
59 unsigned int cpu_hartindex[MAXCPUS] = {
60 [0 ... MAXCPUS - 1] = ~0U,
61 };
62
63 cpuid_t cpu_bphartid = ~0UL;
64
65 #ifdef MULTIPROCESSOR
66
67 kcpuset_t *cpus_halted;
68 kcpuset_t *cpus_hatched;
69 kcpuset_t *cpus_paused;
70 kcpuset_t *cpus_resumed;
71 kcpuset_t *cpus_running;
72
73 #define CPUINDEX_DIVISOR (sizeof(u_long) * NBBY)
74
75 #define N howmany(MAXCPUS, CPUINDEX_DIVISOR)
76
77 /* cpu_hatch_ipi needs fixing for > 1 */
78 CTASSERT(N == 1);
79 volatile u_long riscv_cpu_hatched[N] __cacheline_aligned = { };
80 volatile u_long riscv_cpu_mbox[N] __cacheline_aligned = { };
81
82 /* IPI all APs to GO! */
83 static void
cpu_ipi_aps(void)84 cpu_ipi_aps(void)
85 {
86 unsigned long hartmask = 0;
87
88 // BP is index 0
89 for (size_t i = 1; i < ncpu; i++) {
90 const struct cpu_info * const ci = &cpu_info_store[i];
91 const cpuid_t hartid = ci->ci_cpuid;
92 KASSERT(hartid < sizeof(unsigned long) * NBBY);
93 hartmask |= __BIT(hartid);
94 }
95 struct sbiret sbiret = sbi_send_ipi(hartmask, 0);
96
97 KASSERT(sbiret.error == SBI_SUCCESS);
98 }
99
100 void
cpu_boot_secondary_processors(void)101 cpu_boot_secondary_processors(void)
102 {
103 if ((boothowto & RB_MD1) != 0)
104 return;
105
106 VPRINTF("%s: starting secondary processors\n", __func__);
107
108 /*
109 * send mbox to have secondary processors do cpu_hatch()
110 * store-release matches locore.S
111 */
112 asm volatile("fence rw,w");
113 for (size_t n = 0; n < __arraycount(riscv_cpu_mbox); n++)
114 atomic_or_ulong(&riscv_cpu_mbox[n], riscv_cpu_hatched[n]);
115 cpu_ipi_aps();
116
117 /* wait for all cpus to have done cpu_hatch() */
118 for (u_int cpuindex = 1; cpuindex < ncpu; cpuindex++) {
119 if (!cpu_hatched_p(cpuindex))
120 continue;
121
122 const size_t off = cpuindex / CPUINDEX_DIVISOR;
123 const u_long bit = __BIT(cpuindex % CPUINDEX_DIVISOR);
124
125 /* load-acquire matches cpu_clr_mbox */
126 while (atomic_load_acquire(&riscv_cpu_mbox[off]) & bit) {
127 /* spin - it shouldn't be long */
128 ;
129 }
130 struct cpu_info *ci = &cpu_info_store[cpuindex];
131 atomic_or_ulong(&ci->ci_flags, CPUF_RUNNING);
132 kcpuset_set(cpus_running, cpu_index(ci));
133 }
134
135 VPRINTF("%s: secondary processors hatched. %d running\n", __func__,
136 kcpuset_countset(cpus_running));
137 }
138
139 bool
cpu_hatched_p(u_int cpuindex)140 cpu_hatched_p(u_int cpuindex)
141 {
142 const u_int off = cpuindex / CPUINDEX_DIVISOR;
143 const u_int bit = cpuindex % CPUINDEX_DIVISOR;
144
145 /* load-acquire matches cpu_set_hatched */
146 return (atomic_load_acquire(&riscv_cpu_hatched[off]) & __BIT(bit)) != 0;
147 }
148
149
150 void
cpu_set_hatched(u_int cpuindex)151 cpu_set_hatched(u_int cpuindex)
152 {
153
154 const size_t off = cpuindex / CPUINDEX_DIVISOR;
155 const u_long bit = __BIT(cpuindex % CPUINDEX_DIVISOR);
156
157 /* store-release matches cpu_hatched_p */
158 asm volatile("fence rw, w" ::: "memory");
159 atomic_or_ulong(&riscv_cpu_hatched[off], bit);
160
161 asm volatile("fence w, rw" ::: "memory");
162 }
163
164 void
cpu_clr_mbox(u_int cpuindex)165 cpu_clr_mbox(u_int cpuindex)
166 {
167
168 const size_t off = cpuindex / CPUINDEX_DIVISOR;
169 const u_long bit = __BIT(cpuindex % CPUINDEX_DIVISOR);
170
171 /* store-release matches locore.S */
172 asm volatile("fence rw,w" ::: "memory");
173 atomic_and_ulong(&riscv_cpu_mbox[off], ~bit);
174
175 asm volatile("fence w, rw" ::: "memory");
176 }
177
178
179 void
cpu_broadcast_ipi(int tag)180 cpu_broadcast_ipi(int tag)
181 {
182
183 /*
184 * No reason to remove ourselves since multicast_ipi will do that
185 * for us.
186 */
187 cpu_multicast_ipi(cpus_running, tag);
188 }
189
190 void
cpu_multicast_ipi(const kcpuset_t * kcp,int tag)191 cpu_multicast_ipi(const kcpuset_t *kcp, int tag)
192 {
193 struct cpu_info * const ci = curcpu();
194 kcpuset_t *kcp2 = ci->ci_multicastcpus;
195
196 if (kcpuset_match(cpus_running, ci->ci_data.cpu_kcpuset))
197 return;
198
199 kcpuset_copy(kcp2, kcp);
200 kcpuset_remove(kcp2, ci->ci_data.cpu_kcpuset);
201 for (unsigned int cii; (cii = kcpuset_ffs(kcp2)) != 0; ) {
202 kcpuset_clear(kcp2, --cii);
203 (void)cpu_send_ipi(cpu_lookup(cii), tag);
204 }
205 }
206
207 static void
cpu_ipi_wait(const char * s,const kcpuset_t * watchset,const kcpuset_t * wanted)208 cpu_ipi_wait(const char *s, const kcpuset_t *watchset, const kcpuset_t *wanted)
209 {
210 bool done = false;
211 struct cpu_info * const ci = curcpu();
212 kcpuset_t *kcp = ci->ci_watchcpus;
213
214 /* some finite amount of time */
215 for (u_long limit = ci->ci_cpu_freq /* / 10 */; !done && limit--; ) {
216 kcpuset_copy(kcp, watchset);
217 kcpuset_intersect(kcp, wanted);
218 done = kcpuset_match(kcp, wanted);
219 }
220
221 if (!done) {
222 cpuid_t cii;
223 kcpuset_copy(kcp, wanted);
224 kcpuset_remove(kcp, watchset);
225 if ((cii = kcpuset_ffs(kcp)) != 0) {
226 printf("Failed to %s:", s);
227 do {
228 kcpuset_clear(kcp, --cii);
229 printf(" cpu%lu", cii);
230 } while ((cii = kcpuset_ffs(kcp)) != 0);
231 printf("\n");
232 }
233 }
234 }
235
236 /*
237 * Halt this cpu
238 */
239 void
cpu_halt(void)240 cpu_halt(void)
241 {
242 cpuid_t cii = cpu_index(curcpu());
243
244 printf("cpu%lu: shutting down\n", cii);
245 kcpuset_atomic_set(cpus_halted, cii);
246 spl0(); /* allow interrupts e.g. further ipi ? */
247 for (;;) ; /* spin */
248
249 /* NOTREACHED */
250 }
251
252 /*
253 * Halt all running cpus, excluding current cpu.
254 */
255 void
cpu_halt_others(void)256 cpu_halt_others(void)
257 {
258 kcpuset_t *kcp;
259
260 // If we are the only CPU running, there's nothing to do.
261 if (kcpuset_match(cpus_running, curcpu()->ci_data.cpu_kcpuset))
262 return;
263
264 // Get all running CPUs
265 kcpuset_clone(&kcp, cpus_running);
266 // Remove ourself
267 kcpuset_remove(kcp, curcpu()->ci_data.cpu_kcpuset);
268 // Remove any halted CPUs
269 kcpuset_remove(kcp, cpus_halted);
270 // If there are CPUs left, send the IPIs
271 if (!kcpuset_iszero(kcp)) {
272 cpu_multicast_ipi(kcp, IPI_HALT);
273 cpu_ipi_wait("halt", cpus_halted, kcp);
274 }
275 kcpuset_destroy(kcp);
276
277 /*
278 * TBD
279 * Depending on available firmware methods, other cpus will
280 * either shut down themselves, or spin and wait for us to
281 * stop them.
282 */
283 }
284
285 /*
286 * Pause this cpu
287 */
288 void
cpu_pause(void)289 cpu_pause(void)
290 {
291 const int s = splhigh();
292 cpuid_t cii = cpu_index(curcpu());
293
294 if (__predict_false(cold)) {
295 splx(s);
296 return;
297 }
298
299 do {
300 kcpuset_atomic_set(cpus_paused, cii);
301 do {
302 ;
303 } while (kcpuset_isset(cpus_paused, cii));
304 kcpuset_atomic_set(cpus_resumed, cii);
305 #if defined(DDB)
306 if (ddb_running_on_this_cpu_p())
307 cpu_Debugger();
308 if (ddb_running_on_any_cpu_p())
309 continue;
310 #endif
311 } while (false);
312
313 splx(s);
314 }
315
316 /*
317 * Pause all running cpus, excluding current cpu.
318 */
319 void
cpu_pause_others(void)320 cpu_pause_others(void)
321 {
322 struct cpu_info * const ci = curcpu();
323
324 if (cold || kcpuset_match(cpus_running, ci->ci_data.cpu_kcpuset))
325 return;
326
327 kcpuset_t *kcp = ci->ci_ddbcpus;
328
329 kcpuset_copy(kcp, cpus_running);
330 kcpuset_remove(kcp, ci->ci_data.cpu_kcpuset);
331 kcpuset_remove(kcp, cpus_paused);
332
333 cpu_broadcast_ipi(IPI_SUSPEND);
334 cpu_ipi_wait("pause", cpus_paused, kcp);
335 }
336
337 /*
338 * Resume a single cpu
339 */
340 void
cpu_resume(cpuid_t cii)341 cpu_resume(cpuid_t cii)
342 {
343
344 if (__predict_false(cold))
345 return;
346
347 struct cpu_info * const ci = curcpu();
348 kcpuset_t *kcp = ci->ci_ddbcpus;
349
350 kcpuset_zero(kcp);
351 kcpuset_set(kcp, cii);
352 kcpuset_atomicly_remove(cpus_resumed, cpus_resumed);
353 kcpuset_atomic_clear(cpus_paused, cii);
354
355 cpu_ipi_wait("resume", cpus_resumed, kcp);
356 }
357
358 /*
359 * Resume all paused cpus.
360 */
361 void
cpu_resume_others(void)362 cpu_resume_others(void)
363 {
364
365 if (__predict_false(cold))
366 return;
367
368 struct cpu_info * const ci = curcpu();
369 kcpuset_t *kcp = ci->ci_ddbcpus;
370
371 kcpuset_atomicly_remove(cpus_resumed, cpus_resumed);
372 kcpuset_copy(kcp, cpus_paused);
373 kcpuset_atomicly_remove(cpus_paused, cpus_paused);
374
375 /* CPUs awake on cpus_paused clear */
376 cpu_ipi_wait("resume", cpus_resumed, kcp);
377 }
378
379 bool
cpu_is_paused(cpuid_t cii)380 cpu_is_paused(cpuid_t cii)
381 {
382
383 return !cold && kcpuset_isset(cpus_paused, cii);
384 }
385
386 #ifdef DDB
387 void
cpu_debug_dump(void)388 cpu_debug_dump(void)
389 {
390 CPU_INFO_ITERATOR cii;
391 struct cpu_info *ci;
392 char running, hatched, paused, resumed, halted;
393 db_printf("CPU CPUID STATE CPUINFO CPL INT MTX IPIS(A/R)\n");
394 for (CPU_INFO_FOREACH(cii, ci)) {
395 hatched = (kcpuset_isset(cpus_hatched, cpu_index(ci)) ? 'H' : '-');
396 running = (kcpuset_isset(cpus_running, cpu_index(ci)) ? 'R' : '-');
397 paused = (kcpuset_isset(cpus_paused, cpu_index(ci)) ? 'P' : '-');
398 resumed = (kcpuset_isset(cpus_resumed, cpu_index(ci)) ? 'r' : '-');
399 halted = (kcpuset_isset(cpus_halted, cpu_index(ci)) ? 'h' : '-');
400 db_printf("%3d 0x%03lx%c%c%c%c%c%c%c %p "
401 "%3d %3d %3d 0x%02lx/0x%02lx\n",
402 cpu_index(ci), ci->ci_cpuid,
403 ci == curcpu() ? '<' : ' ',
404 CPU_IS_PRIMARY(ci) ? '*' : ' ',
405 hatched, running, paused, resumed, halted,
406 ci, ci->ci_cpl, ci->ci_intr_depth, ci->ci_mtx_count,
407 ci->ci_active_ipis, ci->ci_request_ipis);
408 }
409 }
410 #endif
411
412 void
xc_send_ipi(struct cpu_info * ci)413 xc_send_ipi(struct cpu_info *ci)
414 {
415 KASSERT(kpreempt_disabled());
416 KASSERT(curcpu() != ci);
417
418 cpu_send_ipi(ci, IPI_XCALL);
419 }
420
421 void
cpu_ipi(struct cpu_info * ci)422 cpu_ipi(struct cpu_info *ci)
423 {
424 KASSERT(kpreempt_disabled());
425 KASSERT(curcpu() != ci);
426
427 cpu_send_ipi(ci, IPI_GENERIC);
428 }
429
430 #endif
431