xref: /netbsd-src/sys/arch/mips/mips/ipifuncs.c (revision 3be0b0f961563790daf38383976a7e1fe2b41f63)
1 /*	$NetBSD: ipifuncs.c,v 1.14 2020/07/22 15:00:49 jmcneill Exp $	*/
2 
3 /*-
4  * Copyright (c) 2010 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Matt Thomas of 3am Software Foundry.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include "opt_ddb.h"
33 
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: ipifuncs.c,v 1.14 2020/07/22 15:00:49 jmcneill Exp $");
36 
37 #include <sys/param.h>
38 #include <sys/cpu.h>
39 #include <sys/device.h>
40 #include <sys/intr.h>
41 #include <sys/xcall.h>
42 #include <sys/ipi.h>
43 
44 #include <uvm/uvm_extern.h>
45 
46 #include <mips/cache.h>
47 #include <mips/locore.h>
48 #ifdef DDB
49 #include <mips/db_machdep.h>
50 #endif
51 
52 static void ipi_halt(void) __dead;
53 
54 static const char * const ipi_names[] = {
55 	[IPI_NOP]	= "ipi nop",
56 	[IPI_AST]	= "ipi ast",
57 	[IPI_SHOOTDOWN]	= "ipi shootdown",
58 	[IPI_SYNCICACHE]	= "ipi isync",
59 	[IPI_KPREEMPT]	= "ipi kpreempt",
60 	[IPI_SUSPEND]	= "ipi suspend",
61 	[IPI_HALT]	= "ipi halt",
62 	[IPI_XCALL]	= "ipi xcall",
63 	[IPI_GENERIC]	= "ipi generic",
64 	[IPI_WDOG]	= "ipi wdog",
65 };
66 
67 static void
ipi_nop(struct cpu_info * ci)68 ipi_nop(struct cpu_info *ci)
69 {
70 	/*
71 	 * This is just a reason to get an interrupt so we get
72 	 * kicked out of cpu_idle().
73 	 */
74 }
75 
76 static void
ipi_ast(struct cpu_info * ci)77 ipi_ast(struct cpu_info *ci)
78 {
79 	ci->ci_onproc->l_md.md_astpending = 1;
80 }
81 
82 static void
ipi_shootdown(struct cpu_info * ci)83 ipi_shootdown(struct cpu_info *ci)
84 {
85 	pmap_tlb_shootdown_process();
86 }
87 
88 static inline void
ipi_syncicache(struct cpu_info * ci)89 ipi_syncicache(struct cpu_info *ci)
90 {
91 	pmap_tlb_syncicache_wanted(ci);
92 }
93 
94 #ifdef __HAVE_PREEMPTION
95 static inline void
ipi_kpreempt(struct cpu_info * ci)96 ipi_kpreempt(struct cpu_info *ci)
97 {
98 	softint_trigger(SOFTINT_KPREEMPT);
99 }
100 #endif
101 
102 /*
103  * Process cpu stop-self event.
104  * XXX could maybe add/use locoresw halt function?
105  */
106 static void
ipi_halt(void)107 ipi_halt(void)
108 {
109 	const u_int my_cpu = cpu_number();
110 	printf("cpu%u: shutting down\n", my_cpu);
111 	kcpuset_set(cpus_halted, my_cpu);
112 	splhigh();
113 	for (;;)
114 		;
115 	/* NOTREACHED */
116 }
117 
118 void
ipi_process(struct cpu_info * ci,uint64_t ipi_mask)119 ipi_process(struct cpu_info *ci, uint64_t ipi_mask)
120 {
121 	KASSERT(cpu_intr_p());
122 
123 	if (ipi_mask & __BIT(IPI_NOP)) {
124 		ci->ci_evcnt_per_ipi[IPI_NOP].ev_count++;
125 		ipi_nop(ci);
126 	}
127 	if (ipi_mask & __BIT(IPI_AST)) {
128 		ci->ci_evcnt_per_ipi[IPI_AST].ev_count++;
129 		ipi_ast(ci);
130 	}
131 	if (ipi_mask & __BIT(IPI_SHOOTDOWN)) {
132 		ci->ci_evcnt_per_ipi[IPI_SHOOTDOWN].ev_count++;
133 		ipi_shootdown(ci);
134 	}
135 	if (ipi_mask & __BIT(IPI_SYNCICACHE)) {
136 		ci->ci_evcnt_per_ipi[IPI_SYNCICACHE].ev_count++;
137 		ipi_syncicache(ci);
138 	}
139 	if (ipi_mask & __BIT(IPI_SUSPEND)) {
140 		ci->ci_evcnt_per_ipi[IPI_SUSPEND].ev_count++;
141 		cpu_pause(NULL);
142 	}
143 	if (ipi_mask & __BIT(IPI_HALT)) {
144 		ci->ci_evcnt_per_ipi[IPI_HALT].ev_count++;
145 		ipi_halt();
146 	}
147 	if (ipi_mask & __BIT(IPI_XCALL)) {
148 		ci->ci_evcnt_per_ipi[IPI_XCALL].ev_count++;
149 		xc_ipi_handler();
150 	}
151 	if (ipi_mask & __BIT(IPI_GENERIC)) {
152 		ci->ci_evcnt_per_ipi[IPI_GENERIC].ev_count++;
153 		ipi_cpu_handler();
154 	}
155 #ifdef __HAVE_PREEMPTION
156 	if (ipi_mask & __BIT(IPI_KPREEMPT)) {
157 		ci->ci_evcnt_per_ipi[IPI_KPREEMPT].ev_count++;
158 		ipi_kpreempt(ci);
159 	}
160 #endif
161 }
162 
163 void
ipi_init(struct cpu_info * ci)164 ipi_init(struct cpu_info *ci)
165 {
166 	evcnt_attach_dynamic(&ci->ci_evcnt_all_ipis, EVCNT_TYPE_INTR,
167 	    NULL, device_xname(ci->ci_dev), "ipi");
168 
169 	for (size_t i = 0; i < NIPIS; i++) {
170 		KASSERTMSG(ipi_names[i] != NULL, "%zu", i);
171 		evcnt_attach_dynamic(&ci->ci_evcnt_per_ipi[i], EVCNT_TYPE_INTR,
172 		    NULL, device_xname(ci->ci_dev), ipi_names[i]);
173 	}
174 }
175