1 /* $NetBSD: ipifuncs.c,v 1.57 2022/05/03 20:52:31 andvar Exp $ */
2
3 /*-
4 * Copyright (c) 2004 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __KERNEL_RCSID(0, "$NetBSD: ipifuncs.c,v 1.57 2022/05/03 20:52:31 andvar Exp $");
31
32 #include "opt_ddb.h"
33
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/xcall.h>
37 #include <sys/ipi.h>
38
39 #include <machine/db_machdep.h>
40
41 #include <machine/cpu.h>
42 #include <machine/cpu_counter.h>
43 #include <machine/ctlreg.h>
44 #include <machine/pmap.h>
45 #include <machine/sparc64.h>
46
47 #include <sparc64/sparc64/cache.h>
48
49 #if defined(DDB) || defined(KGDB)
50 #ifdef DDB
51 #include <ddb/db_command.h>
52 #include <ddb/db_output.h>
53 #endif
54 #endif
55
56 #define SPARC64_IPI_RETRIES 10000
57
58 /* CPU sets containing halted, paused and resumed cpus */
59 static volatile sparc64_cpuset_t cpus_halted;
60 static volatile sparc64_cpuset_t cpus_spinning;
61 static volatile sparc64_cpuset_t cpus_paused;
62 static volatile sparc64_cpuset_t cpus_resumed;
63
64 /* IPI handlers. */
65 static int sparc64_ipi_wait(sparc64_cpuset_t volatile *, sparc64_cpuset_t);
66 static void sparc64_ipi_error(const char *, sparc64_cpuset_t, sparc64_cpuset_t);
67
68 /* Send IPI functions for supported platforms */
69 static void sparc64_send_ipi_sun4u(int, ipifunc_t, uint64_t, uint64_t);
70 static void sparc64_send_ipi_sun4v(int, ipifunc_t, uint64_t, uint64_t);
71 void (*sparc64_send_ipi)(int, ipifunc_t, uint64_t, uint64_t) = NULL;
72
73 /*
74 * These are the "function" entry points in locore.s/mp_subr.s to handle IPI's.
75 */
76 void sparc64_ipi_halt(void *, void *);
77 void sparc64_ipi_pause(void *, void *);
78 void sparc64_ipi_flush_pte_us(void *, void *);
79 void sparc64_ipi_flush_pte_usiii(void *, void *);
80 void sparc64_ipi_flush_pte_sun4v(void *, void *);
81 void sparc64_ipi_dcache_flush_page_us(void *, void *);
82 void sparc64_ipi_dcache_flush_page_usiii(void *, void *);
83 void sparc64_ipi_dcache_flush_page_sun4v(void *, void *);
84 void sparc64_ipi_blast_dcache(void *, void *);
85 void sparc64_ipi_ccall(void *, void *);
86
87 /* Function pointers to be setup in sparc64_ipi_init() */
88 static ipifunc_t smp_tlb_flush_pte_func = NULL;
89 static ipifunc_t sparc64_ipi_dcache_flush_page_func = NULL;
90
91 /*
92 * Process cpu stop-self event.
93 */
94 void
sparc64_ipi_halt_thiscpu(void * arg,void * arg2)95 sparc64_ipi_halt_thiscpu(void *arg, void *arg2)
96 {
97 extern void prom_printf(const char *fmt, ...);
98
99 printf("cpu%d: shutting down\n", cpu_number());
100 if (prom_has_stop_other() || !prom_has_stopself()) {
101 /*
102 * just loop here, the final cpu will stop us later
103 */
104 CPUSET_ADD(cpus_spinning, cpu_number());
105 CPUSET_ADD(cpus_halted, cpu_number());
106 spl0();
107 while (1)
108 /* nothing */;
109 } else {
110 CPUSET_ADD(cpus_halted, cpu_number());
111 prom_stopself();
112 }
113 }
114
115 void
sparc64_do_pause(void)116 sparc64_do_pause(void)
117 {
118 #if defined(DDB)
119 extern bool ddb_running_on_this_cpu(void);
120 extern void db_resume_others(void);
121 #endif
122
123 CPUSET_ADD(cpus_paused, cpu_number());
124
125 do {
126 membar_Sync();
127 } while(CPUSET_HAS(cpus_paused, cpu_number()));
128 membar_Sync();
129 CPUSET_ADD(cpus_resumed, cpu_number());
130
131 #if defined(DDB)
132 if (ddb_running_on_this_cpu()) {
133 db_command_loop();
134 db_resume_others();
135 }
136 #endif
137 }
138
139 /*
140 * Pause cpu. This is called from locore.s after setting up a trapframe.
141 */
142 void
sparc64_ipi_pause_thiscpu(void * arg)143 sparc64_ipi_pause_thiscpu(void *arg)
144 {
145 int s;
146 #if defined(DDB)
147 extern void fill_ddb_regs_from_tf(struct trapframe64 *tf);
148 extern void ddb_restore_state(void);
149
150 if (arg)
151 fill_ddb_regs_from_tf(arg);
152 #endif
153
154 s = intr_disable();
155 sparc64_do_pause();
156
157 #if defined(DDB)
158 if (arg) {
159 ddb_restore_state();
160 curcpu()->ci_ddb_regs = NULL;
161 }
162 #endif
163
164 intr_restore(s);
165 }
166
167 /*
168 * Initialize IPI machinery.
169 */
170 void
sparc64_ipi_init(void)171 sparc64_ipi_init(void)
172 {
173
174 /* Clear all cpu sets. */
175 CPUSET_CLEAR(cpus_halted);
176 CPUSET_CLEAR(cpus_spinning);
177 CPUSET_CLEAR(cpus_paused);
178 CPUSET_CLEAR(cpus_resumed);
179
180 /*
181 * Prepare cpu type dependent function pointers
182 */
183
184 if (CPU_ISSUN4V) {
185 smp_tlb_flush_pte_func = sparc64_ipi_flush_pte_sun4v;
186 sparc64_ipi_dcache_flush_page_func =
187 sparc64_ipi_dcache_flush_page_sun4v;
188 }
189 else if (CPU_IS_USIII_UP()) {
190 smp_tlb_flush_pte_func = sparc64_ipi_flush_pte_usiii;
191 sparc64_ipi_dcache_flush_page_func =
192 sparc64_ipi_dcache_flush_page_usiii;
193 }
194 else {
195 smp_tlb_flush_pte_func = sparc64_ipi_flush_pte_us;
196 sparc64_ipi_dcache_flush_page_func =
197 sparc64_ipi_dcache_flush_page_us;
198 }
199
200 if (CPU_ISSUN4V)
201 sparc64_send_ipi = sparc64_send_ipi_sun4v;
202 else
203 sparc64_send_ipi = sparc64_send_ipi_sun4u;
204
205 }
206
207 /*
208 * Send an IPI to all in the list but ourselves.
209 */
210 void
sparc64_multicast_ipi(sparc64_cpuset_t cpuset,ipifunc_t func,uint64_t arg1,uint64_t arg2)211 sparc64_multicast_ipi(sparc64_cpuset_t cpuset, ipifunc_t func, uint64_t arg1,
212 uint64_t arg2)
213 {
214 struct cpu_info *ci;
215
216 CPUSET_DEL(cpuset, cpu_number());
217 if (CPUSET_EMPTY(cpuset))
218 return;
219
220 for (ci = cpus; ci != NULL; ci = ci->ci_next) {
221 if (CPUSET_HAS(cpuset, ci->ci_index)) {
222 CPUSET_DEL(cpuset, ci->ci_index);
223 sparc64_send_ipi(ci->ci_cpuid, func, arg1, arg2);
224 }
225 }
226 }
227
228 /*
229 * Broadcast an IPI to all but ourselves.
230 */
231 void
sparc64_broadcast_ipi(ipifunc_t func,uint64_t arg1,uint64_t arg2)232 sparc64_broadcast_ipi(ipifunc_t func, uint64_t arg1, uint64_t arg2)
233 {
234
235 sparc64_multicast_ipi(CPUSET_EXCEPT(cpus_active, cpu_number()), func,
236 arg1, arg2);
237 }
238
239 /*
240 * Send an interprocessor interrupt - sun4u.
241 */
242 void
sparc64_send_ipi_sun4u(int upaid,ipifunc_t func,uint64_t arg1,uint64_t arg2)243 sparc64_send_ipi_sun4u(int upaid, ipifunc_t func, uint64_t arg1, uint64_t arg2)
244 {
245 int i, ik, shift = 0;
246 uint64_t intr_func;
247
248 KASSERT(upaid != curcpu()->ci_cpuid);
249
250 /*
251 * UltraSPARC-IIIi CPUs select the BUSY/NACK pair based on the
252 * lower two bits of the ITID.
253 */
254 if (CPU_IS_USIIIi())
255 shift = (upaid & 0x3) * 2;
256
257 if (ldxa(0, ASI_IDSR) & (IDSR_BUSY << shift))
258 panic("recursive IPI?");
259
260 intr_func = (uint64_t)(u_long)func;
261
262 /* Schedule an interrupt. */
263 for (i = 0; i < 10000; i++) {
264 int s = intr_disable();
265
266 stxa(IDDR_0H, ASI_INTERRUPT_DISPATCH, intr_func);
267 stxa(IDDR_1H, ASI_INTERRUPT_DISPATCH, arg1);
268 stxa(IDDR_2H, ASI_INTERRUPT_DISPATCH, arg2);
269 stxa(IDCR(upaid), ASI_INTERRUPT_DISPATCH, 0);
270 membar_Sync();
271 /* Workaround for SpitFire erratum #54, from FreeBSD */
272 if (CPU_IS_SPITFIRE()) {
273 (void)ldxa(P_DCR_0, ASI_INTERRUPT_RECEIVE_DATA);
274 membar_Sync();
275 }
276
277 for (ik = 0; ik < 1000000; ik++) {
278 if (ldxa(0, ASI_IDSR) & (IDSR_BUSY << shift))
279 continue;
280 else
281 break;
282 }
283 intr_restore(s);
284
285 if (ik == 1000000)
286 break;
287
288 if ((ldxa(0, ASI_IDSR) & (IDSR_NACK << shift)) == 0)
289 return;
290 /*
291 * Wait for a while with enabling interrupts to avoid
292 * deadlocks. XXX - random value is better.
293 */
294 DELAY(1);
295 }
296
297 if (panicstr == NULL)
298 panic("cpu%d: ipi_send: couldn't send ipi to UPAID %u"
299 " (tried %d times)", cpu_number(), upaid, i);
300 }
301
302 /*
303 * Send an interprocessor interrupt - sun4v.
304 */
305 void
sparc64_send_ipi_sun4v(int cpuid,ipifunc_t func,uint64_t arg1,uint64_t arg2)306 sparc64_send_ipi_sun4v(int cpuid, ipifunc_t func, uint64_t arg1, uint64_t arg2)
307 {
308 struct cpu_info *ci = curcpu();
309 int err, i;
310
311 stha(ci->ci_cpuset, ASI_PHYS_CACHED, cpuid);
312 stxa(ci->ci_mondo, ASI_PHYS_CACHED, (vaddr_t)func);
313 stxa(ci->ci_mondo + 8, ASI_PHYS_CACHED, arg1);
314 stxa(ci->ci_mondo + 16, ASI_PHYS_CACHED, arg2);
315
316 for (i = 0; i < SPARC64_IPI_RETRIES; i++) {
317 err = hv_cpu_mondo_send(1, ci->ci_cpuset, ci->ci_mondo);
318 if (err != H_EWOULDBLOCK)
319 break;
320 delay(10);
321 }
322 if (err != H_EOK)
323 panic("Unable to send mondo %lx to cpu %d: %d",
324 (long unsigned int)func, cpuid, err);
325 }
326
327 /*
328 * Wait for IPI operation to complete.
329 * Return 0 on success.
330 */
331 int
sparc64_ipi_wait(sparc64_cpuset_t volatile * cpus_watchset,sparc64_cpuset_t cpus_mask)332 sparc64_ipi_wait(sparc64_cpuset_t volatile *cpus_watchset, sparc64_cpuset_t cpus_mask)
333 {
334 uint64_t limit = gettick() + cpu_frequency(curcpu());
335
336 while (gettick() < limit) {
337 membar_Sync();
338 if (CPUSET_EQUAL(*cpus_watchset, cpus_mask))
339 return 0;
340 }
341 return 1;
342 }
343
344 /*
345 * Halt all cpus but ourselves.
346 */
347 void
mp_halt_cpus(void)348 mp_halt_cpus(void)
349 {
350 sparc64_cpuset_t cpumask, cpuset;
351 struct cpu_info *ci;
352
353 CPUSET_ASSIGN(cpuset, cpus_active);
354 CPUSET_DEL(cpuset, cpu_number());
355 CPUSET_ASSIGN(cpumask, cpuset);
356 CPUSET_SUB(cpuset, cpus_halted);
357
358 if (CPUSET_EMPTY(cpuset))
359 return;
360
361 CPUSET_CLEAR(cpus_spinning);
362 sparc64_multicast_ipi(cpuset, sparc64_ipi_halt, 0, 0);
363 if (sparc64_ipi_wait(&cpus_halted, cpumask))
364 sparc64_ipi_error("halt", cpumask, cpus_halted);
365
366 /*
367 * Depending on available firmware methods, other cpus will
368 * either shut down themselves, or spin and wait for us to
369 * stop them.
370 */
371 if (CPUSET_EMPTY(cpus_spinning)) {
372 /* give other cpus a few cycles to actually power down */
373 delay(10000);
374 return;
375 }
376 /* there are cpus spinning - shut them down if we can */
377 if (prom_has_stop_other()) {
378 for (ci = cpus; ci != NULL; ci = ci->ci_next) {
379 if (!CPUSET_HAS(cpus_spinning, ci->ci_index)) continue;
380 prom_stop_other(ci->ci_cpuid);
381 }
382 }
383 }
384
385 /*
386 * Pause all cpus but ourselves.
387 */
388 void
mp_pause_cpus(void)389 mp_pause_cpus(void)
390 {
391 int i = 3;
392 sparc64_cpuset_t cpuset;
393
394 CPUSET_ASSIGN(cpuset, cpus_active);
395 CPUSET_DEL(cpuset, cpu_number());
396 while (i-- > 0) {
397 if (CPUSET_EMPTY(cpuset))
398 return;
399
400 sparc64_multicast_ipi(cpuset, sparc64_ipi_pause, 0, 0);
401 if (!sparc64_ipi_wait(&cpus_paused, cpuset))
402 return;
403 CPUSET_SUB(cpuset, cpus_paused);
404 }
405 sparc64_ipi_error("pause", cpus_paused, cpuset);
406 }
407
408 /*
409 * Resume a single cpu
410 */
411 void
mp_resume_cpu(int cno)412 mp_resume_cpu(int cno)
413 {
414 CPUSET_DEL(cpus_paused, cno);
415 membar_Sync();
416 }
417
418 /*
419 * Resume all paused cpus.
420 */
421 void
mp_resume_cpus(void)422 mp_resume_cpus(void)
423 {
424 int i = 3;
425 sparc64_cpuset_t cpuset;
426
427 CPUSET_CLEAR(cpuset); /* XXX: gcc -Wuninitialized */
428
429 while (i-- > 0) {
430 CPUSET_CLEAR(cpus_resumed);
431 CPUSET_ASSIGN(cpuset, cpus_paused);
432 membar_Sync();
433 CPUSET_CLEAR(cpus_paused);
434
435 /* CPUs awake on cpus_paused clear */
436 if (!sparc64_ipi_wait(&cpus_resumed, cpuset))
437 return;
438 }
439 sparc64_ipi_error("resume", cpus_resumed, cpuset);
440 }
441
442 int
mp_cpu_is_paused(sparc64_cpuset_t cpunum)443 mp_cpu_is_paused(sparc64_cpuset_t cpunum)
444 {
445
446 return CPUSET_HAS(cpus_paused, cpunum);
447 }
448
449 /*
450 * Flush pte on all active processors.
451 */
452 void
smp_tlb_flush_pte(vaddr_t va,struct pmap * pm)453 smp_tlb_flush_pte(vaddr_t va, struct pmap * pm)
454 {
455 sparc64_cpuset_t cpuset;
456 struct cpu_info *ci;
457 int ctx;
458 bool kpm = (pm == pmap_kernel());
459 /* Flush our own TLB */
460 ctx = pm->pm_ctx[cpu_number()];
461 KASSERT(ctx >= 0);
462 if (kpm || ctx > 0)
463 sp_tlb_flush_pte(va, ctx);
464
465 CPUSET_ASSIGN(cpuset, cpus_active);
466 CPUSET_DEL(cpuset, cpu_number());
467 if (CPUSET_EMPTY(cpuset))
468 return;
469
470 /* Flush others */
471 for (ci = cpus; ci != NULL; ci = ci->ci_next) {
472 if (CPUSET_HAS(cpuset, ci->ci_index)) {
473 CPUSET_DEL(cpuset, ci->ci_index);
474 ctx = pm->pm_ctx[ci->ci_index];
475 KASSERT(ctx >= 0);
476 if (!kpm && ctx == 0)
477 continue;
478 sparc64_send_ipi(ci->ci_cpuid, smp_tlb_flush_pte_func, va, ctx);
479 }
480 }
481 }
482
483 /*
484 * Make sure this page is flushed from all/some CPUs.
485 */
486 void
smp_dcache_flush_page_cpuset(paddr_t pa,sparc64_cpuset_t activecpus)487 smp_dcache_flush_page_cpuset(paddr_t pa, sparc64_cpuset_t activecpus)
488 {
489 sparc64_multicast_ipi(activecpus, sparc64_ipi_dcache_flush_page_func, pa, dcache_line_size);
490 sp_dcache_flush_page(pa);
491 }
492
493 void
smp_dcache_flush_page_allcpu(paddr_t pa)494 smp_dcache_flush_page_allcpu(paddr_t pa)
495 {
496
497 smp_dcache_flush_page_cpuset(pa, cpus_active);
498 }
499
500 /*
501 * Flush the D$ on all CPUs.
502 */
503 void
smp_blast_dcache(void)504 smp_blast_dcache(void)
505 {
506
507 sparc64_multicast_ipi(cpus_active, sparc64_ipi_blast_dcache,
508 dcache_size, dcache_line_size);
509 sp_blast_dcache(dcache_size, dcache_line_size);
510 }
511
512 /*
513 * Print an error message.
514 */
515 void
sparc64_ipi_error(const char * s,sparc64_cpuset_t cpus_succeeded,sparc64_cpuset_t cpus_expected)516 sparc64_ipi_error(const char *s, sparc64_cpuset_t cpus_succeeded,
517 sparc64_cpuset_t cpus_expected)
518 {
519 int cpuid;
520
521 CPUSET_DEL(cpus_expected, cpus_succeeded);
522 if (!CPUSET_EMPTY(cpus_expected)) {
523 printf("Failed to %s:", s);
524 do {
525 cpuid = CPUSET_NEXT(cpus_expected);
526 CPUSET_DEL(cpus_expected, cpuid);
527 printf(" cpu%d", cpuid);
528 } while(!CPUSET_EMPTY(cpus_expected));
529 }
530
531 printf("\n");
532 }
533
534 /*
535 * MD support for xcall(9) interface.
536 */
537
538 void
sparc64_generic_xcall(struct cpu_info * target,ipi_c_call_func_t func,void * arg)539 sparc64_generic_xcall(struct cpu_info *target, ipi_c_call_func_t func,
540 void *arg)
541 {
542 /* if target == NULL broadcast to everything but curcpu */
543 if (target)
544 sparc64_send_ipi(target->ci_cpuid, sparc64_ipi_ccall,
545 (uint64_t)(uintptr_t)func, (uint64_t)(uintptr_t)arg);
546 else {
547
548 sparc64_multicast_ipi(cpus_active, sparc64_ipi_ccall,
549 (uint64_t)(uintptr_t)func, (uint64_t)(uintptr_t)arg);
550 }
551 }
552
553 void
xc_send_ipi(struct cpu_info * target)554 xc_send_ipi(struct cpu_info *target)
555 {
556
557 sparc64_generic_xcall(target, (ipi_c_call_func_t)xc_ipi_handler, NULL);
558 }
559
560 void
cpu_ipi(struct cpu_info * target)561 cpu_ipi(struct cpu_info *target)
562 {
563
564 sparc64_generic_xcall(target, (ipi_c_call_func_t)ipi_cpu_handler, NULL);
565 }
566