xref: /netbsd-src/sys/kern/kern_cpu.c (revision 8b0f9554ff8762542c4defc4f70e1eb76fb508fa)
1 /*	$NetBSD: kern_cpu.c,v 1.15 2007/12/05 07:06:52 ad Exp $	*/
2 
3 /*-
4  * Copyright (c) 2007 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*-
40  * Copyright (c)2007 YAMAMOTO Takashi,
41  * All rights reserved.
42  *
43  * Redistribution and use in source and binary forms, with or without
44  * modification, are permitted provided that the following conditions
45  * are met:
46  * 1. Redistributions of source code must retain the above copyright
47  *    notice, this list of conditions and the following disclaimer.
48  * 2. Redistributions in binary form must reproduce the above copyright
49  *    notice, this list of conditions and the following disclaimer in the
50  *    documentation and/or other materials provided with the distribution.
51  *
52  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
53  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
54  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
55  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
56  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
57  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
58  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
59  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
60  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
61  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62  * SUCH DAMAGE.
63  */
64 
65 #include <sys/cdefs.h>
66 
67 __KERNEL_RCSID(0, "$NetBSD: kern_cpu.c,v 1.15 2007/12/05 07:06:52 ad Exp $");
68 
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/idle.h>
72 #include <sys/sched.h>
73 #include <sys/intr.h>
74 #include <sys/conf.h>
75 #include <sys/cpu.h>
76 #include <sys/cpuio.h>
77 #include <sys/proc.h>
78 #include <sys/kernel.h>
79 #include <sys/kauth.h>
80 #include <sys/xcall.h>
81 #include <sys/pool.h>
82 
83 #include <uvm/uvm_extern.h>
84 
85 void	cpuctlattach(int);
86 
87 static void	cpu_xc_online(struct cpu_info *);
88 static void	cpu_xc_offline(struct cpu_info *);
89 
90 dev_type_ioctl(cpuctl_ioctl);
91 
92 const struct cdevsw cpuctl_cdevsw = {
93 	nullopen, nullclose, nullread, nullwrite, cpuctl_ioctl,
94 	nullstop, notty, nopoll, nommap, nokqfilter,
95 	D_OTHER | D_MPSAFE
96 };
97 
98 kmutex_t cpu_lock;
99 int	ncpu;
100 int	ncpuonline;
101 
102 int
103 mi_cpu_attach(struct cpu_info *ci)
104 {
105 	struct schedstate_percpu *spc = &ci->ci_schedstate;
106 	int error;
107 
108 	ci->ci_index = ncpu;
109 
110 	mutex_init(&spc->spc_lwplock, MUTEX_DEFAULT, IPL_SCHED);
111 	sched_cpuattach(ci);
112 	uvm_cpu_attach(ci);
113 
114 	error = create_idle_lwp(ci);
115 	if (error != 0) {
116 		/* XXX revert sched_cpuattach */
117 		return error;
118 	}
119 
120 	if (ci == curcpu())
121 		ci->ci_data.cpu_onproc = curlwp;
122 	else
123 		ci->ci_data.cpu_onproc = ci->ci_data.cpu_idlelwp;
124 
125 	softint_init(ci);
126 	xc_init_cpu(ci);
127 	pool_cache_cpu_init(ci);
128 	TAILQ_INIT(&ci->ci_data.cpu_biodone);
129 	ncpu++;
130 	ncpuonline++;
131 
132 	return 0;
133 }
134 
135 void
136 cpuctlattach(int dummy)
137 {
138 
139 }
140 
141 int
142 cpuctl_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
143 {
144 	CPU_INFO_ITERATOR cii;
145 	cpustate_t *cs;
146 	struct cpu_info *ci;
147 	int error, i;
148 	u_int id;
149 
150 	error = 0;
151 
152 	mutex_enter(&cpu_lock);
153 	switch (cmd) {
154 	case IOC_CPU_SETSTATE:
155 		error = kauth_authorize_generic(l->l_cred,
156 		    KAUTH_GENERIC_ISSUSER, NULL);
157 		if (error != 0)
158 			break;
159 		cs = data;
160 		if ((ci = cpu_lookup(cs->cs_id)) == NULL) {
161 			error = ESRCH;
162 			break;
163 		}
164 		if (!cs->cs_intr) {
165 			error = EOPNOTSUPP;
166 			break;
167 		}
168 		error = cpu_setonline(ci, cs->cs_online);
169 		break;
170 
171 	case IOC_CPU_GETSTATE:
172 		cs = data;
173 		id = cs->cs_id;
174 		memset(cs, 0, sizeof(*cs));
175 		cs->cs_id = id;
176 		if ((ci = cpu_lookup(id)) == NULL) {
177 			error = ESRCH;
178 			break;
179 		}
180 		if ((ci->ci_schedstate.spc_flags & SPCF_OFFLINE) != 0)
181 			cs->cs_online = false;
182 		else
183 			cs->cs_online = true;
184 		cs->cs_intr = true;
185 		cs->cs_lastmod = ci->ci_schedstate.spc_lastmod;
186 		break;
187 
188 	case IOC_CPU_MAPID:
189 		i = 0;
190 		for (CPU_INFO_FOREACH(cii, ci)) {
191 			if (i++ == *(int *)data)
192 				break;
193 		}
194 		if (ci == NULL)
195 			error = ESRCH;
196 		else
197 			*(int *)data = ci->ci_cpuid;
198 		break;
199 
200 	case IOC_CPU_GETCOUNT:
201 		*(int *)data = ncpu;
202 		break;
203 
204 	default:
205 		error = ENOTTY;
206 		break;
207 	}
208 	mutex_exit(&cpu_lock);
209 
210 	return error;
211 }
212 
213 struct cpu_info *
214 cpu_lookup(cpuid_t id)
215 {
216 	CPU_INFO_ITERATOR cii;
217 	struct cpu_info *ci;
218 
219 	for (CPU_INFO_FOREACH(cii, ci)) {
220 		if (ci->ci_cpuid == id)
221 			return ci;
222 	}
223 
224 	return NULL;
225 }
226 
227 static void
228 cpu_xc_offline(struct cpu_info *ci)
229 {
230 	struct schedstate_percpu *spc, *mspc = NULL;
231 	struct cpu_info *mci;
232 	struct lwp *l;
233 	CPU_INFO_ITERATOR cii;
234 	int s;
235 
236 	spc = &ci->ci_schedstate;
237 	s = splsched();
238 	spc->spc_flags |= SPCF_OFFLINE;
239 	splx(s);
240 
241 	/* Take the first available CPU for the migration */
242 	for (CPU_INFO_FOREACH(cii, mci)) {
243 		mspc = &mci->ci_schedstate;
244 		if ((mspc->spc_flags & SPCF_OFFLINE) == 0)
245 			break;
246 	}
247 	KASSERT(mci != NULL);
248 
249 	/*
250 	 * Migrate all non-bound threads to the other CPU.
251 	 * Please note, that this runs from the xcall thread, thus handling
252 	 * of LSONPROC is not needed.
253 	 */
254 	mutex_enter(&proclist_lock);
255 
256 	/*
257 	 * Note that threads on the runqueue might sleep after this, but
258 	 * sched_takecpu() would migrate such threads to the appropriate CPU.
259 	 */
260 	LIST_FOREACH(l, &alllwp, l_list) {
261 		lwp_lock(l);
262 		if (l->l_cpu == ci && (l->l_stat == LSSLEEP ||
263 		    l->l_stat == LSSTOP || l->l_stat == LSSUSPENDED)) {
264 			KASSERT((l->l_flag & LW_RUNNING) == 0);
265 			l->l_cpu = mci;
266 		}
267 		lwp_unlock(l);
268 	}
269 
270 	/*
271 	 * Runqueues are locked with the global lock if pointers match,
272 	 * thus hold only one.  Otherwise, double-lock the runqueues.
273 	 */
274 	if (spc->spc_mutex == mspc->spc_mutex) {
275 		spc_lock(ci);
276 	} else if (ci < mci) {
277 		spc_lock(ci);
278 		spc_lock(mci);
279 	} else {
280 		spc_lock(mci);
281 		spc_lock(ci);
282 	}
283 
284 	/* Handle LSRUN and LSIDL cases */
285 	LIST_FOREACH(l, &alllwp, l_list) {
286 		if (l->l_cpu != ci || (l->l_flag & LW_BOUND))
287 			continue;
288 		if (l->l_stat == LSRUN && (l->l_flag & LW_INMEM) != 0) {
289 			sched_dequeue(l);
290 			l->l_cpu = mci;
291 			lwp_setlock(l, mspc->spc_mutex);
292 			sched_enqueue(l, false);
293 		} else if (l->l_stat == LSRUN || l->l_stat == LSIDL) {
294 			l->l_cpu = mci;
295 			lwp_setlock(l, mspc->spc_mutex);
296 		}
297 	}
298 	if (spc->spc_mutex == mspc->spc_mutex) {
299 		spc_unlock(ci);
300 	} else {
301 		spc_unlock(ci);
302 		spc_unlock(mci);
303 	}
304 
305 	mutex_exit(&proclist_lock);
306 }
307 
308 static void
309 cpu_xc_online(struct cpu_info *ci)
310 {
311 	struct schedstate_percpu *spc;
312 	int s;
313 
314 	spc = &ci->ci_schedstate;
315 	s = splsched();
316 	spc->spc_flags &= ~SPCF_OFFLINE;
317 	splx(s);
318 }
319 
320 int
321 cpu_setonline(struct cpu_info *ci, bool online)
322 {
323 	struct schedstate_percpu *spc;
324 	CPU_INFO_ITERATOR cii;
325 	struct cpu_info *ci2;
326 	uint64_t where;
327 	xcfunc_t func;
328 	int nonline;
329 
330 	spc = &ci->ci_schedstate;
331 
332 	KASSERT(mutex_owned(&cpu_lock));
333 
334 	if (online) {
335 		if ((spc->spc_flags & SPCF_OFFLINE) == 0)
336 			return 0;
337 		func = (xcfunc_t)cpu_xc_online;
338 		ncpuonline++;
339 	} else {
340 		if ((spc->spc_flags & SPCF_OFFLINE) != 0)
341 			return 0;
342 		nonline = 0;
343 		for (CPU_INFO_FOREACH(cii, ci2)) {
344 			nonline += ((ci2->ci_schedstate.spc_flags &
345 			    SPCF_OFFLINE) == 0);
346 		}
347 		if (nonline == 1)
348 			return EBUSY;
349 		func = (xcfunc_t)cpu_xc_offline;
350 		ncpuonline--;
351 	}
352 
353 	where = xc_unicast(0, func, ci, NULL, ci);
354 	xc_wait(where);
355 	if (online) {
356 		KASSERT((spc->spc_flags & SPCF_OFFLINE) == 0);
357 	} else {
358 		KASSERT(spc->spc_flags & SPCF_OFFLINE);
359 	}
360 	spc->spc_lastmod = time_second;
361 
362 	return 0;
363 }
364