xref: /netbsd-src/sys/arch/riscv/riscv/ipifuncs.c (revision dd75ac5b443e967e26b4d18cc8cd5eb98512bfbf)
1 /*	$NetBSD: ipifuncs.c,v 1.1 2023/06/12 19:04:14 skrll Exp $	*/
2 
3 /*-
4  * Copyright (c) 2010 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Matt Thomas of 3am Software Foundry.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: ipifuncs.c,v 1.1 2023/06/12 19:04:14 skrll Exp $");
34 
35 #include <sys/param.h>
36 
37 #include <sys/cpu.h>
38 #include <sys/device.h>
39 #include <sys/intr.h>
40 #include <sys/ipi.h>
41 #include <sys/xcall.h>
42 
43 #include <uvm/uvm_extern.h>
44 #include <uvm/pmap/pmap_synci.h>
45 #include <uvm/pmap/pmap_tlb.h>
46 
47 static void ipi_halt(void) __dead;
48 
49 static const char * const ipi_names[] = {
50 	[IPI_NOP]	= "ipi nop",
51 	[IPI_AST]	= "ipi ast",
52 	[IPI_SHOOTDOWN]	= "ipi shootdown",
53 	[IPI_SYNCICACHE]= "ipi isync",
54 	[IPI_KPREEMPT]	= "ipi kpreempt",
55 	[IPI_SUSPEND]	= "ipi suspend",
56 	[IPI_HALT]	= "ipi halt",
57 	[IPI_XCALL]	= "ipi xcall",
58 	[IPI_GENERIC]	= "ipi generic",
59 };
60 
61 static void
62 ipi_nop(struct cpu_info *ci)
63 {
64 	/*
65 	 * This is just a reason to get an interrupt so we get
66 	 * kicked out of cpu_idle().
67 	 */
68 }
69 
70 static void
71 ipi_ast(struct cpu_info *ci)
72 {
73 	ci->ci_onproc->l_md.md_astpending = 1;
74 }
75 
76 static void
77 ipi_shootdown(struct cpu_info *ci)
78 {
79 	pmap_tlb_shootdown_process();
80 }
81 
82 static inline void
83 ipi_syncicache(struct cpu_info *ci)
84 {
85 	pmap_tlb_syncicache_wanted(ci);
86 }
87 
88 #ifdef __HAVE_PREEMPTION
89 static inline void
90 ipi_kpreempt(struct cpu_info *ci)
91 {
92 	softint_trigger(SOFTINT_KPREEMPT);
93 }
94 #endif
95 
96 /*
97  * Process cpu stop-self event.
98  * XXX could maybe add/use locoresw halt function?
99  */
100 static void
101 ipi_halt(void)
102 {
103 	const u_int my_cpu = cpu_number();
104 	printf("cpu%u: shutting down\n", my_cpu);
105 	kcpuset_set(cpus_halted, my_cpu);
106 	splhigh();
107 	for (;;)
108 		;
109 	/* NOTREACHED */
110 }
111 
112 void
113 ipi_process(struct cpu_info *ci, unsigned long ipi_mask)
114 {
115 	KASSERT(cpu_intr_p());
116 
117 	if (ipi_mask & __BIT(IPI_NOP)) {
118 		ci->ci_evcnt_per_ipi[IPI_NOP].ev_count++;
119 		ipi_nop(ci);
120 	}
121 	if (ipi_mask & __BIT(IPI_AST)) {
122 		ci->ci_evcnt_per_ipi[IPI_AST].ev_count++;
123 		ipi_ast(ci);
124 	}
125 	if (ipi_mask & __BIT(IPI_SHOOTDOWN)) {
126 		ci->ci_evcnt_per_ipi[IPI_SHOOTDOWN].ev_count++;
127 		ipi_shootdown(ci);
128 	}
129 	if (ipi_mask & __BIT(IPI_SYNCICACHE)) {
130 		ci->ci_evcnt_per_ipi[IPI_SYNCICACHE].ev_count++;
131 		ipi_syncicache(ci);
132 	}
133 	if (ipi_mask & __BIT(IPI_SUSPEND)) {
134 		ci->ci_evcnt_per_ipi[IPI_SUSPEND].ev_count++;
135 		cpu_pause();
136 	}
137 	if (ipi_mask & __BIT(IPI_HALT)) {
138 		ci->ci_evcnt_per_ipi[IPI_HALT].ev_count++;
139 		ipi_halt();
140 	}
141 	if (ipi_mask & __BIT(IPI_XCALL)) {
142 		ci->ci_evcnt_per_ipi[IPI_XCALL].ev_count++;
143 		xc_ipi_handler();
144 	}
145 	if (ipi_mask & __BIT(IPI_GENERIC)) {
146 		ci->ci_evcnt_per_ipi[IPI_GENERIC].ev_count++;
147 		ipi_cpu_handler();
148 	}
149 #ifdef __HAVE_PREEMPTION
150 	if (ipi_mask & __BIT(IPI_KPREEMPT)) {
151 		ci->ci_evcnt_per_ipi[IPI_KPREEMPT].ev_count++;
152 		ipi_kpreempt(ci);
153 	}
154 #endif
155 }
156 
157 void
158 ipi_init(struct cpu_info *ci)
159 {
160 	evcnt_attach_dynamic(&ci->ci_evcnt_all_ipis, EVCNT_TYPE_INTR,
161 	    NULL, device_xname(ci->ci_dev), "ipi");
162 
163 	for (size_t i = 0; i < NIPIS; i++) {
164 		KASSERTMSG(ipi_names[i] != NULL, "%zu", i);
165 		evcnt_attach_dynamic(&ci->ci_evcnt_per_ipi[i], EVCNT_TYPE_INTR,
166 		    NULL, device_xname(ci->ci_dev), ipi_names[i]);
167 	}
168 }
169