xref: /netbsd-src/sys/arch/x86/x86/ipi.c (revision 57eb66c673f44c443943833f001cb8aafea6809f)
1 /*	$NetBSD: ipi.c,v 1.30 2019/12/01 15:34:46 ad Exp $	*/
2 
3 /*-
4  * Copyright (c) 2000, 2008, 2009, 2019 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by RedBack Networks Inc.
9  *
10  * Author: Bill Sommerfeld
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: ipi.c,v 1.30 2019/12/01 15:34:46 ad Exp $");
36 
37 #include "opt_mtrr.h"
38 
39 #include <sys/param.h>
40 #include <sys/device.h>
41 #include <sys/systm.h>
42 #include <sys/atomic.h>
43 #include <sys/intr.h>
44 #include <sys/ipi.h>
45 #include <sys/cpu.h>
46 #include <sys/xcall.h>
47 
48 #ifdef MULTIPROCESSOR
49 
50 #include <machine/cpufunc.h>
51 #include <machine/cpuvar.h>
52 #include <machine/i82093var.h>
53 #include <machine/i82489reg.h>
54 #include <machine/i82489var.h>
55 #include <machine/mtrr.h>
56 #include <machine/gdt.h>
57 
58 #include "acpica.h"
59 
60 #include <x86/fpu.h>
61 
62 static void	x86_ipi_ast(struct cpu_info *);
63 static void	x86_ipi_halt(struct cpu_info *);
64 static void	x86_ipi_kpreempt(struct cpu_info *);
65 static void	x86_ipi_xcall(struct cpu_info *);
66 static void	x86_ipi_generic(struct cpu_info *);
67 
68 #ifdef MTRR
69 static void	x86_ipi_reload_mtrr(struct cpu_info *);
70 #else
71 #define		x86_ipi_reload_mtrr	NULL
72 #endif
73 
74 #if NACPICA > 0
75 void	acpi_cpu_sleep(struct cpu_info *);
76 #else
77 #define	acpi_cpu_sleep	NULL
78 #endif
79 
80 static void	x86_ipi_synch_fpu(struct cpu_info *);
81 
82 void (* const ipifunc[X86_NIPI])(struct cpu_info *) =
83 {
84 	x86_ipi_halt,		/* X86_IPI_HALT */
85 	x86_ipi_ast,		/* X86_IPI_AST */
86 	x86_ipi_generic,	/* X86_IPI_GENERIC */
87 	x86_ipi_synch_fpu,	/* X86_IPI_SYNCH_FPU */
88 	x86_ipi_reload_mtrr,	/* X86_IPI_MTRR */
89 	NULL,			/* X86_IPI_GDT */
90 	x86_ipi_xcall,		/* X86_IPI_XCALL */
91 	acpi_cpu_sleep,		/* X86_IPI_ACPI_CPU_SLEEP */
92 	x86_ipi_kpreempt	/* X86_IPI_KPREEMPT */
93 };
94 
95 /*
96  * x86 IPI interface.
97  */
98 
99 int
x86_send_ipi(struct cpu_info * ci,int ipimask)100 x86_send_ipi(struct cpu_info *ci, int ipimask)
101 {
102 	uint32_t o, n;
103 	int ret = 0;
104 
105 	/* Don't send IPI to CPU which isn't (yet) running. */
106 	if (__predict_false((ci->ci_flags & CPUF_RUNNING) == 0))
107 		return ENOENT;
108 
109 	/* Set in new IPI bit, and capture previous state. */
110 	for (o = 0;; o = n) {
111 		n = atomic_cas_32(&ci->ci_ipis, o, o | ipimask);
112 		if (__predict_true(o == n)) {
113 			break;
114 		}
115 	}
116 
117 	/* If no IPI already pending, send one. */
118 	if (o == 0) {
119 		ret = x86_ipi(LAPIC_IPI_VECTOR, ci->ci_cpuid, LAPIC_DLMODE_FIXED);
120 		if (ret != 0) {
121 			printf("ipi of %x from %s to %s failed\n",
122 			    ipimask,
123 			    device_xname(curcpu()->ci_dev),
124 			    device_xname(ci->ci_dev));
125 		}
126 	}
127 
128 	return ret;
129 }
130 
131 void
x86_broadcast_ipi(int ipimask)132 x86_broadcast_ipi(int ipimask)
133 {
134 	struct cpu_info *ci, *self = curcpu();
135 	int count = 0;
136 	CPU_INFO_ITERATOR cii;
137 
138 	for (CPU_INFO_FOREACH(cii, ci)) {
139 		if (ci == self)
140 			continue;
141 		if ((ci->ci_flags & CPUF_RUNNING) == 0)
142 			continue;
143 		atomic_or_32(&ci->ci_ipis, ipimask);
144 		count++;
145 	}
146 	if (!count)
147 		return;
148 
149 	x86_ipi(LAPIC_IPI_VECTOR, LAPIC_DEST_ALLEXCL, LAPIC_DLMODE_FIXED);
150 }
151 
152 void
x86_ipi_handler(void)153 x86_ipi_handler(void)
154 {
155 	struct cpu_info *ci = curcpu();
156 	uint32_t pending;
157 	int bit;
158 
159 	pending = atomic_swap_32(&ci->ci_ipis, 0);
160 
161 	KDASSERT((pending >> X86_NIPI) == 0);
162 	while ((bit = ffs(pending)) != 0) {
163 		bit--;
164 		pending &= ~(1 << bit);
165 		ci->ci_ipi_events[bit].ev_count++;
166 		(*ipifunc[bit])(ci);
167 	}
168 }
169 
170 /*
171  * Common x86 IPI handlers.
172  */
173 
174 static void
x86_ipi_halt(struct cpu_info * ci)175 x86_ipi_halt(struct cpu_info *ci)
176 {
177 
178 	x86_disable_intr();
179 	atomic_and_32(&ci->ci_flags, ~CPUF_RUNNING);
180 
181 	for (;;) {
182 		x86_hlt();
183 	}
184 }
185 
186 static void
x86_ipi_synch_fpu(struct cpu_info * ci)187 x86_ipi_synch_fpu(struct cpu_info *ci)
188 {
189 
190 	panic("%s: impossible", __func__);
191 }
192 
193 #ifdef MTRR
194 static void
x86_ipi_reload_mtrr(struct cpu_info * ci)195 x86_ipi_reload_mtrr(struct cpu_info *ci)
196 {
197 
198 	if (mtrr_funcs != NULL) {
199 		/*
200 		 * mtrr_reload_cpu() is a macro in mtrr.h which picks
201 		 * the appropriate function to use.
202 		 */
203 		mtrr_reload_cpu(ci);
204 	}
205 }
206 #endif
207 
208 static void
x86_ipi_kpreempt(struct cpu_info * ci)209 x86_ipi_kpreempt(struct cpu_info *ci)
210 {
211 
212 	softint_trigger(1 << SIR_PREEMPT);
213 }
214 
215 static void
x86_ipi_ast(struct cpu_info * ci)216 x86_ipi_ast(struct cpu_info *ci)
217 {
218 
219 	aston(ci->ci_onproc);
220 }
221 
222 /*
223  * MD support for xcall(9) interface.
224  */
225 
226 static void
x86_ipi_xcall(struct cpu_info * ci)227 x86_ipi_xcall(struct cpu_info *ci)
228 {
229 	xc_ipi_handler();
230 }
231 
232 static void
x86_ipi_generic(struct cpu_info * ci)233 x86_ipi_generic(struct cpu_info *ci)
234 {
235 	ipi_cpu_handler();
236 }
237 
238 void
xc_send_ipi(struct cpu_info * ci)239 xc_send_ipi(struct cpu_info *ci)
240 {
241 
242 	KASSERT(kpreempt_disabled());
243 	KASSERT(curcpu() != ci);
244 
245 	if (ci) {
246 		/* Unicast: remote CPU. */
247 		x86_send_ipi(ci, X86_IPI_XCALL);
248 	} else {
249 		/* Broadcast: all, but local CPU (caller will handle it). */
250 		x86_broadcast_ipi(X86_IPI_XCALL);
251 	}
252 }
253 
254 void
cpu_ipi(struct cpu_info * ci)255 cpu_ipi(struct cpu_info *ci)
256 {
257 	KASSERT(kpreempt_disabled());
258 	KASSERT(curcpu() != ci);
259 
260 	if (ci) {
261 		/* Unicast: remote CPU. */
262 		x86_send_ipi(ci, X86_IPI_GENERIC);
263 	} else {
264 		/* Broadcast: all, but local CPU (caller will handle it). */
265 		x86_broadcast_ipi(X86_IPI_GENERIC);
266 	}
267 }
268 
269 #else
270 
271 int
x86_send_ipi(struct cpu_info * ci,int ipimask)272 x86_send_ipi(struct cpu_info *ci, int ipimask)
273 {
274 
275 	return 0;
276 }
277 
278 void
x86_broadcast_ipi(int ipimask)279 x86_broadcast_ipi(int ipimask)
280 {
281 
282 }
283 
284 void
cpu_ipi(struct cpu_info * ci)285 cpu_ipi(struct cpu_info *ci)
286 {
287 }
288 
289 #endif
290