xref: /netbsd-src/sys/kern/kern_cpu.c (revision b1c86f5f087524e68db12794ee9c3e3da1ab17a0)
1 /*	$NetBSD: kern_cpu.c,v 1.44 2010/04/25 15:57:59 ad Exp $	*/
2 
3 /*-
4  * Copyright (c) 2007, 2008, 2009, 2010 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*-
33  * Copyright (c)2007 YAMAMOTO Takashi,
34  * All rights reserved.
35  *
36  * Redistribution and use in source and binary forms, with or without
37  * modification, are permitted provided that the following conditions
38  * are met:
39  * 1. Redistributions of source code must retain the above copyright
40  *    notice, this list of conditions and the following disclaimer.
41  * 2. Redistributions in binary form must reproduce the above copyright
42  *    notice, this list of conditions and the following disclaimer in the
43  *    documentation and/or other materials provided with the distribution.
44  *
45  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
46  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
49  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55  * SUCH DAMAGE.
56  */
57 
58 #include <sys/cdefs.h>
59 __KERNEL_RCSID(0, "$NetBSD: kern_cpu.c,v 1.44 2010/04/25 15:57:59 ad Exp $");
60 
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/idle.h>
64 #include <sys/sched.h>
65 #include <sys/intr.h>
66 #include <sys/conf.h>
67 #include <sys/cpu.h>
68 #include <sys/cpuio.h>
69 #include <sys/proc.h>
70 #include <sys/percpu.h>
71 #include <sys/kernel.h>
72 #include <sys/kauth.h>
73 #include <sys/xcall.h>
74 #include <sys/pool.h>
75 #include <sys/kmem.h>
76 #include <sys/select.h>
77 #include <sys/namei.h>
78 #include <sys/callout.h>
79 
80 #include <uvm/uvm_extern.h>
81 
82 void	cpuctlattach(int);
83 
84 static void	cpu_xc_online(struct cpu_info *);
85 static void	cpu_xc_offline(struct cpu_info *);
86 
87 dev_type_ioctl(cpuctl_ioctl);
88 
89 const struct cdevsw cpuctl_cdevsw = {
90 	nullopen, nullclose, nullread, nullwrite, cpuctl_ioctl,
91 	nullstop, notty, nopoll, nommap, nokqfilter,
92 	D_OTHER | D_MPSAFE
93 };
94 
95 kmutex_t cpu_lock;
96 int	ncpu;
97 int	ncpuonline;
98 bool	mp_online;
99 struct	cpuqueue cpu_queue = CIRCLEQ_HEAD_INITIALIZER(cpu_queue);
100 
101 static struct cpu_info **cpu_infos;
102 
103 int
104 mi_cpu_attach(struct cpu_info *ci)
105 {
106 	int error;
107 
108 	KASSERT(maxcpus > 0);
109 
110 	ci->ci_index = ncpu;
111 	CIRCLEQ_INSERT_TAIL(&cpu_queue, ci, ci_data.cpu_qchain);
112 	TAILQ_INIT(&ci->ci_data.cpu_ld_locks);
113 	__cpu_simple_lock_init(&ci->ci_data.cpu_ld_lock);
114 
115 	/* This is useful for eg, per-cpu evcnt */
116 	snprintf(ci->ci_data.cpu_name, sizeof(ci->ci_data.cpu_name), "cpu%d",
117 	    cpu_index(ci));
118 
119 	sched_cpuattach(ci);
120 
121 	error = create_idle_lwp(ci);
122 	if (error != 0) {
123 		/* XXX revert sched_cpuattach */
124 		return error;
125 	}
126 
127 	if (ci == curcpu())
128 		ci->ci_data.cpu_onproc = curlwp;
129 	else
130 		ci->ci_data.cpu_onproc = ci->ci_data.cpu_idlelwp;
131 
132 	percpu_init_cpu(ci);
133 	softint_init(ci);
134 	callout_init_cpu(ci);
135 	xc_init_cpu(ci);
136 	pool_cache_cpu_init(ci);
137 	selsysinit(ci);
138 	cache_cpu_init(ci);
139 	TAILQ_INIT(&ci->ci_data.cpu_biodone);
140 	ncpu++;
141 	ncpuonline++;
142 
143 	if (cpu_infos == NULL) {
144 		cpu_infos =
145 		    kmem_zalloc(sizeof(cpu_infos[0]) * maxcpus, KM_SLEEP);
146 	}
147 	cpu_infos[cpu_index(ci)] = ci;
148 
149 	return 0;
150 }
151 
152 void
153 cpuctlattach(int dummy)
154 {
155 
156 	KASSERT(cpu_infos != NULL);
157 }
158 
159 int
160 cpuctl_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
161 {
162 	CPU_INFO_ITERATOR cii;
163 	cpustate_t *cs;
164 	struct cpu_info *ci;
165 	int error, i;
166 	u_int id;
167 
168 	error = 0;
169 
170 	mutex_enter(&cpu_lock);
171 	switch (cmd) {
172 	case IOC_CPU_SETSTATE:
173 		if (error == 0)
174 			cs = data;
175 		error = kauth_authorize_system(l->l_cred,
176 		    KAUTH_SYSTEM_CPU, KAUTH_REQ_SYSTEM_CPU_SETSTATE, cs, NULL,
177 		    NULL);
178 		if (error != 0)
179 			break;
180 		if (cs->cs_id >= maxcpus ||
181 		    (ci = cpu_lookup(cs->cs_id)) == NULL) {
182 			error = ESRCH;
183 			break;
184 		}
185 		error = cpu_setintr(ci, cs->cs_intr);
186 		error = cpu_setstate(ci, cs->cs_online);
187 		break;
188 
189 	case IOC_CPU_GETSTATE:
190 		if (error == 0)
191 			cs = data;
192 		id = cs->cs_id;
193 		memset(cs, 0, sizeof(*cs));
194 		cs->cs_id = id;
195 		if (cs->cs_id >= maxcpus ||
196 		    (ci = cpu_lookup(id)) == NULL) {
197 			error = ESRCH;
198 			break;
199 		}
200 		if ((ci->ci_schedstate.spc_flags & SPCF_OFFLINE) != 0)
201 			cs->cs_online = false;
202 		else
203 			cs->cs_online = true;
204 		if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) != 0)
205 			cs->cs_intr = false;
206 		else
207 			cs->cs_intr = true;
208 		cs->cs_lastmod = (int32_t)ci->ci_schedstate.spc_lastmod;
209 		cs->cs_lastmodhi = (int32_t)
210 		    (ci->ci_schedstate.spc_lastmod >> 32);
211 		cs->cs_intrcnt = cpu_intr_count(ci) + 1;
212 		break;
213 
214 	case IOC_CPU_MAPID:
215 		i = 0;
216 		for (CPU_INFO_FOREACH(cii, ci)) {
217 			if (i++ == *(int *)data)
218 				break;
219 		}
220 		if (ci == NULL)
221 			error = ESRCH;
222 		else
223 			*(int *)data = cpu_index(ci);
224 		break;
225 
226 	case IOC_CPU_GETCOUNT:
227 		*(int *)data = ncpu;
228 		break;
229 
230 	default:
231 		error = ENOTTY;
232 		break;
233 	}
234 	mutex_exit(&cpu_lock);
235 
236 	return error;
237 }
238 
239 struct cpu_info *
240 cpu_lookup(u_int idx)
241 {
242 	struct cpu_info *ci;
243 
244 	KASSERT(idx < maxcpus);
245 
246 	if (__predict_false(cpu_infos == NULL)) {
247 		KASSERT(idx == 0);
248 		return curcpu();
249 	}
250 
251 	ci = cpu_infos[idx];
252 	KASSERT(ci == NULL || cpu_index(ci) == idx);
253 
254 	return ci;
255 }
256 
257 static void
258 cpu_xc_offline(struct cpu_info *ci)
259 {
260 	struct schedstate_percpu *spc, *mspc = NULL;
261 	struct cpu_info *target_ci;
262 	struct lwp *l;
263 	CPU_INFO_ITERATOR cii;
264 	int s;
265 
266 	/*
267 	 * Thread that made the cross call (separate context) holds
268 	 * cpu_lock on our behalf.
269 	 */
270 	spc = &ci->ci_schedstate;
271 	s = splsched();
272 	spc->spc_flags |= SPCF_OFFLINE;
273 	splx(s);
274 
275 	/* Take the first available CPU for the migration. */
276 	for (CPU_INFO_FOREACH(cii, target_ci)) {
277 		mspc = &target_ci->ci_schedstate;
278 		if ((mspc->spc_flags & SPCF_OFFLINE) == 0)
279 			break;
280 	}
281 	KASSERT(target_ci != NULL);
282 
283 	/*
284 	 * Migrate all non-bound threads to the other CPU.  Note that this
285 	 * runs from the xcall thread, thus handling of LSONPROC is not needed.
286 	 */
287 	mutex_enter(proc_lock);
288 	LIST_FOREACH(l, &alllwp, l_list) {
289 		struct cpu_info *mci;
290 
291 		lwp_lock(l);
292 		if (l->l_cpu != ci || (l->l_pflag & (LP_BOUND | LP_INTR))) {
293 			lwp_unlock(l);
294 			continue;
295 		}
296 		/* Normal case - no affinity */
297 		if ((l->l_flag & LW_AFFINITY) == 0) {
298 			lwp_migrate(l, target_ci);
299 			continue;
300 		}
301 		/* Affinity is set, find an online CPU in the set */
302 		KASSERT(l->l_affinity != NULL);
303 		for (CPU_INFO_FOREACH(cii, mci)) {
304 			mspc = &mci->ci_schedstate;
305 			if ((mspc->spc_flags & SPCF_OFFLINE) == 0 &&
306 			    kcpuset_isset(cpu_index(mci), l->l_affinity))
307 				break;
308 		}
309 		if (mci == NULL) {
310 			lwp_unlock(l);
311 			mutex_exit(proc_lock);
312 			goto fail;
313 		}
314 		lwp_migrate(l, mci);
315 	}
316 	mutex_exit(proc_lock);
317 
318 #ifdef __HAVE_MD_CPU_OFFLINE
319 	cpu_offline_md();
320 #endif
321 	return;
322 fail:
323 	/* Just unset the SPCF_OFFLINE flag, caller will check */
324 	s = splsched();
325 	spc->spc_flags &= ~SPCF_OFFLINE;
326 	splx(s);
327 }
328 
329 static void
330 cpu_xc_online(struct cpu_info *ci)
331 {
332 	struct schedstate_percpu *spc;
333 	int s;
334 
335 	spc = &ci->ci_schedstate;
336 	s = splsched();
337 	spc->spc_flags &= ~SPCF_OFFLINE;
338 	splx(s);
339 }
340 
341 int
342 cpu_setstate(struct cpu_info *ci, bool online)
343 {
344 	struct schedstate_percpu *spc;
345 	CPU_INFO_ITERATOR cii;
346 	struct cpu_info *ci2;
347 	uint64_t where;
348 	xcfunc_t func;
349 	int nonline;
350 
351 	spc = &ci->ci_schedstate;
352 
353 	KASSERT(mutex_owned(&cpu_lock));
354 
355 	if (online) {
356 		if ((spc->spc_flags & SPCF_OFFLINE) == 0)
357 			return 0;
358 		func = (xcfunc_t)cpu_xc_online;
359 		ncpuonline++;
360 	} else {
361 		if ((spc->spc_flags & SPCF_OFFLINE) != 0)
362 			return 0;
363 		nonline = 0;
364 		/*
365 		 * Ensure that at least one CPU within the processor set
366 		 * stays online.  Revisit this later.
367 		 */
368 		for (CPU_INFO_FOREACH(cii, ci2)) {
369 			if ((ci2->ci_schedstate.spc_flags & SPCF_OFFLINE) != 0)
370 				continue;
371 			if (ci2->ci_schedstate.spc_psid != spc->spc_psid)
372 				continue;
373 			nonline++;
374 		}
375 		if (nonline == 1)
376 			return EBUSY;
377 		func = (xcfunc_t)cpu_xc_offline;
378 		ncpuonline--;
379 	}
380 
381 	where = xc_unicast(0, func, ci, NULL, ci);
382 	xc_wait(where);
383 	if (online) {
384 		KASSERT((spc->spc_flags & SPCF_OFFLINE) == 0);
385 	} else if ((spc->spc_flags & SPCF_OFFLINE) == 0) {
386 		/* If was not set offline, then it is busy */
387 		return EBUSY;
388 	}
389 
390 	spc->spc_lastmod = time_second;
391 	return 0;
392 }
393 
394 #ifdef __HAVE_INTR_CONTROL
395 static void
396 cpu_xc_intr(struct cpu_info *ci)
397 {
398 	struct schedstate_percpu *spc;
399 	int s;
400 
401 	spc = &ci->ci_schedstate;
402 	s = splsched();
403 	spc->spc_flags &= ~SPCF_NOINTR;
404 	splx(s);
405 }
406 
407 static void
408 cpu_xc_nointr(struct cpu_info *ci)
409 {
410 	struct schedstate_percpu *spc;
411 	int s;
412 
413 	spc = &ci->ci_schedstate;
414 	s = splsched();
415 	spc->spc_flags |= SPCF_NOINTR;
416 	splx(s);
417 }
418 
419 int
420 cpu_setintr(struct cpu_info *ci, bool intr)
421 {
422 	struct schedstate_percpu *spc;
423 	CPU_INFO_ITERATOR cii;
424 	struct cpu_info *ci2;
425 	uint64_t where;
426 	xcfunc_t func;
427 	int nintr;
428 
429 	spc = &ci->ci_schedstate;
430 
431 	KASSERT(mutex_owned(&cpu_lock));
432 
433 	if (intr) {
434 		if ((spc->spc_flags & SPCF_NOINTR) == 0)
435 			return 0;
436 		func = (xcfunc_t)cpu_xc_intr;
437 	} else {
438 		if ((spc->spc_flags & SPCF_NOINTR) != 0)
439 			return 0;
440 		/*
441 		 * Ensure that at least one CPU within the system
442 		 * is handing device interrupts.
443 		 */
444 		nintr = 0;
445 		for (CPU_INFO_FOREACH(cii, ci2)) {
446 			if ((ci2->ci_schedstate.spc_flags & SPCF_NOINTR) != 0)
447 				continue;
448 			if (ci2 == ci)
449 				continue;
450 			nintr++;
451 		}
452 		if (nintr == 0)
453 			return EBUSY;
454 		func = (xcfunc_t)cpu_xc_nointr;
455 	}
456 
457 	where = xc_unicast(0, func, ci, NULL, ci);
458 	xc_wait(where);
459 	if (intr) {
460 		KASSERT((spc->spc_flags & SPCF_NOINTR) == 0);
461 	} else if ((spc->spc_flags & SPCF_NOINTR) == 0) {
462 		/* If was not set offline, then it is busy */
463 		return EBUSY;
464 	}
465 
466 	/* Direct interrupts away from the CPU and record the change. */
467 	cpu_intr_redistribute();
468 	spc->spc_lastmod = time_second;
469 	return 0;
470 }
471 #else	/* __HAVE_INTR_CONTROL */
472 int
473 cpu_setintr(struct cpu_info *ci, bool intr)
474 {
475 
476 	return EOPNOTSUPP;
477 }
478 
479 u_int
480 cpu_intr_count(struct cpu_info *ci)
481 {
482 
483 	return 0;	/* 0 == "don't know" */
484 }
485 #endif	/* __HAVE_INTR_CONTROL */
486 
487 bool
488 cpu_softintr_p(void)
489 {
490 
491 	return (curlwp->l_pflag & LP_INTR) != 0;
492 }
493