xref: /netbsd-src/sys/kern/kern_cpu.c (revision ba65fde2d7fefa7d39838fa5fa855e62bd606b5e)
1 /*	$NetBSD: kern_cpu.c,v 1.59 2012/10/17 20:19:55 drochner Exp $	*/
2 
3 /*-
4  * Copyright (c) 2007, 2008, 2009, 2010, 2012 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*-
33  * Copyright (c)2007 YAMAMOTO Takashi,
34  * All rights reserved.
35  *
36  * Redistribution and use in source and binary forms, with or without
37  * modification, are permitted provided that the following conditions
38  * are met:
39  * 1. Redistributions of source code must retain the above copyright
40  *    notice, this list of conditions and the following disclaimer.
41  * 2. Redistributions in binary form must reproduce the above copyright
42  *    notice, this list of conditions and the following disclaimer in the
43  *    documentation and/or other materials provided with the distribution.
44  *
45  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
46  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
49  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55  * SUCH DAMAGE.
56  */
57 
58 #include <sys/cdefs.h>
59 __KERNEL_RCSID(0, "$NetBSD: kern_cpu.c,v 1.59 2012/10/17 20:19:55 drochner Exp $");
60 
61 #include "opt_cpu_ucode.h"
62 #include "opt_compat_netbsd.h"
63 
64 #include <sys/param.h>
65 #include <sys/systm.h>
66 #include <sys/idle.h>
67 #include <sys/sched.h>
68 #include <sys/intr.h>
69 #include <sys/conf.h>
70 #include <sys/cpu.h>
71 #include <sys/cpuio.h>
72 #include <sys/proc.h>
73 #include <sys/percpu.h>
74 #include <sys/kernel.h>
75 #include <sys/kauth.h>
76 #include <sys/xcall.h>
77 #include <sys/pool.h>
78 #include <sys/kmem.h>
79 #include <sys/select.h>
80 #include <sys/namei.h>
81 #include <sys/callout.h>
82 
83 #include <uvm/uvm_extern.h>
84 
85 /*
86  * If the port has stated that cpu_data is the first thing in cpu_info,
87  * verify that the claim is true. This will prevent them from getting out
88  * of sync.
89  */
90 #ifdef __HAVE_CPU_DATA_FIRST
91 CTASSERT(offsetof(struct cpu_info, ci_data) == 0);
92 #else
93 CTASSERT(offsetof(struct cpu_info, ci_data) != 0);
94 #endif
95 
96 void	cpuctlattach(int);
97 
98 static void	cpu_xc_online(struct cpu_info *);
99 static void	cpu_xc_offline(struct cpu_info *);
100 
101 dev_type_ioctl(cpuctl_ioctl);
102 
103 const struct cdevsw cpuctl_cdevsw = {
104 	nullopen, nullclose, nullread, nullwrite, cpuctl_ioctl,
105 	nullstop, notty, nopoll, nommap, nokqfilter,
106 	D_OTHER | D_MPSAFE
107 };
108 
109 kmutex_t	cpu_lock		__cacheline_aligned;
110 int		ncpu			__read_mostly;
111 int		ncpuonline		__read_mostly;
112 bool		mp_online		__read_mostly;
113 
114 /* Note: set on mi_cpu_attach() and idle_loop(). */
115 kcpuset_t *	kcpuset_attached	__read_mostly	= NULL;
116 kcpuset_t *	kcpuset_running		__read_mostly	= NULL;
117 
118 struct cpuqueue	cpu_queue		__cacheline_aligned
119     = CIRCLEQ_HEAD_INITIALIZER(cpu_queue);
120 
121 static struct cpu_info **cpu_infos	__read_mostly;
122 
123 /*
124  * mi_cpu_init: early initialisation of MI CPU related structures.
125  *
126  * Note: may not block and memory allocator is not yet available.
127  */
128 void
129 mi_cpu_init(void)
130 {
131 
132 	mutex_init(&cpu_lock, MUTEX_DEFAULT, IPL_NONE);
133 
134 	kcpuset_create(&kcpuset_attached, true);
135 	kcpuset_create(&kcpuset_running, true);
136 	kcpuset_set(kcpuset_running, 0);
137 }
138 
139 int
140 mi_cpu_attach(struct cpu_info *ci)
141 {
142 	int error;
143 
144 	KASSERT(maxcpus > 0);
145 
146 	ci->ci_index = ncpu;
147 	kcpuset_set(kcpuset_attached, cpu_index(ci));
148 
149 	/*
150 	 * Create a convenience cpuset of just ourselves.
151 	 */
152 	kcpuset_create(&ci->ci_data.cpu_kcpuset, true);
153 	kcpuset_set(ci->ci_data.cpu_kcpuset, cpu_index(ci));
154 
155 	CIRCLEQ_INSERT_TAIL(&cpu_queue, ci, ci_data.cpu_qchain);
156 	TAILQ_INIT(&ci->ci_data.cpu_ld_locks);
157 	__cpu_simple_lock_init(&ci->ci_data.cpu_ld_lock);
158 
159 	/* This is useful for eg, per-cpu evcnt */
160 	snprintf(ci->ci_data.cpu_name, sizeof(ci->ci_data.cpu_name), "cpu%d",
161 	    cpu_index(ci));
162 
163 	if (__predict_false(cpu_infos == NULL)) {
164 		cpu_infos =
165 		    kmem_zalloc(sizeof(cpu_infos[0]) * maxcpus, KM_SLEEP);
166 	}
167 	cpu_infos[cpu_index(ci)] = ci;
168 
169 	sched_cpuattach(ci);
170 
171 	error = create_idle_lwp(ci);
172 	if (error != 0) {
173 		/* XXX revert sched_cpuattach */
174 		return error;
175 	}
176 
177 	if (ci == curcpu())
178 		ci->ci_data.cpu_onproc = curlwp;
179 	else
180 		ci->ci_data.cpu_onproc = ci->ci_data.cpu_idlelwp;
181 
182 	percpu_init_cpu(ci);
183 	softint_init(ci);
184 	callout_init_cpu(ci);
185 	xc_init_cpu(ci);
186 	pool_cache_cpu_init(ci);
187 	selsysinit(ci);
188 	cache_cpu_init(ci);
189 	TAILQ_INIT(&ci->ci_data.cpu_biodone);
190 	ncpu++;
191 	ncpuonline++;
192 
193 	return 0;
194 }
195 
196 void
197 cpuctlattach(int dummy)
198 {
199 
200 	KASSERT(cpu_infos != NULL);
201 }
202 
203 int
204 cpuctl_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
205 {
206 	CPU_INFO_ITERATOR cii;
207 	cpustate_t *cs;
208 	struct cpu_info *ci;
209 	int error, i;
210 	u_int id;
211 
212 	error = 0;
213 
214 	mutex_enter(&cpu_lock);
215 	switch (cmd) {
216 	case IOC_CPU_SETSTATE:
217 		cs = data;
218 		error = kauth_authorize_system(l->l_cred,
219 		    KAUTH_SYSTEM_CPU, KAUTH_REQ_SYSTEM_CPU_SETSTATE, cs, NULL,
220 		    NULL);
221 		if (error != 0)
222 			break;
223 		if (cs->cs_id >= maxcpus ||
224 		    (ci = cpu_lookup(cs->cs_id)) == NULL) {
225 			error = ESRCH;
226 			break;
227 		}
228 		cpu_setintr(ci, cs->cs_intr);
229 		error = cpu_setstate(ci, cs->cs_online);
230 		break;
231 
232 	case IOC_CPU_GETSTATE:
233 		cs = data;
234 		id = cs->cs_id;
235 		memset(cs, 0, sizeof(*cs));
236 		cs->cs_id = id;
237 		if (cs->cs_id >= maxcpus ||
238 		    (ci = cpu_lookup(id)) == NULL) {
239 			error = ESRCH;
240 			break;
241 		}
242 		if ((ci->ci_schedstate.spc_flags & SPCF_OFFLINE) != 0)
243 			cs->cs_online = false;
244 		else
245 			cs->cs_online = true;
246 		if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) != 0)
247 			cs->cs_intr = false;
248 		else
249 			cs->cs_intr = true;
250 		cs->cs_lastmod = (int32_t)ci->ci_schedstate.spc_lastmod;
251 		cs->cs_lastmodhi = (int32_t)
252 		    (ci->ci_schedstate.spc_lastmod >> 32);
253 		cs->cs_intrcnt = cpu_intr_count(ci) + 1;
254 		cs->cs_hwid = ci->ci_cpuid;
255 		break;
256 
257 	case IOC_CPU_MAPID:
258 		i = 0;
259 		for (CPU_INFO_FOREACH(cii, ci)) {
260 			if (i++ == *(int *)data)
261 				break;
262 		}
263 		if (ci == NULL)
264 			error = ESRCH;
265 		else
266 			*(int *)data = cpu_index(ci);
267 		break;
268 
269 	case IOC_CPU_GETCOUNT:
270 		*(int *)data = ncpu;
271 		break;
272 
273 #ifdef CPU_UCODE
274 	case IOC_CPU_UCODE_GET_VERSION:
275 		error = cpu_ucode_get_version((struct cpu_ucode_version *)data);
276 		break;
277 
278 #ifdef COMPAT_60
279 	case OIOC_CPU_UCODE_GET_VERSION:
280 		error = compat6_cpu_ucode_get_version((struct compat6_cpu_ucode *)data);
281 		break;
282 #endif
283 
284 	case IOC_CPU_UCODE_APPLY:
285 		error = kauth_authorize_machdep(l->l_cred,
286 		    KAUTH_MACHDEP_CPU_UCODE_APPLY,
287 		    NULL, NULL, NULL, NULL);
288 		if (error != 0)
289 			break;
290 		error = cpu_ucode_apply((const struct cpu_ucode *)data);
291 		break;
292 
293 #ifdef COMPAT_60
294 	case OIOC_CPU_UCODE_APPLY:
295 		error = kauth_authorize_machdep(l->l_cred,
296 		    KAUTH_MACHDEP_CPU_UCODE_APPLY,
297 		    NULL, NULL, NULL, NULL);
298 		if (error != 0)
299 			break;
300 		error = compat6_cpu_ucode_apply((const struct compat6_cpu_ucode *)data);
301 		break;
302 #endif
303 #endif
304 
305 	default:
306 		error = ENOTTY;
307 		break;
308 	}
309 	mutex_exit(&cpu_lock);
310 
311 	return error;
312 }
313 
314 struct cpu_info *
315 cpu_lookup(u_int idx)
316 {
317 	struct cpu_info *ci;
318 
319 	KASSERT(idx < maxcpus);
320 
321 	if (__predict_false(cpu_infos == NULL)) {
322 		KASSERT(idx == 0);
323 		return curcpu();
324 	}
325 
326 	ci = cpu_infos[idx];
327 	KASSERT(ci == NULL || cpu_index(ci) == idx);
328 
329 	return ci;
330 }
331 
332 static void
333 cpu_xc_offline(struct cpu_info *ci)
334 {
335 	struct schedstate_percpu *spc, *mspc = NULL;
336 	struct cpu_info *target_ci;
337 	struct lwp *l;
338 	CPU_INFO_ITERATOR cii;
339 	int s;
340 
341 	/*
342 	 * Thread that made the cross call (separate context) holds
343 	 * cpu_lock on our behalf.
344 	 */
345 	spc = &ci->ci_schedstate;
346 	s = splsched();
347 	spc->spc_flags |= SPCF_OFFLINE;
348 	splx(s);
349 
350 	/* Take the first available CPU for the migration. */
351 	for (CPU_INFO_FOREACH(cii, target_ci)) {
352 		mspc = &target_ci->ci_schedstate;
353 		if ((mspc->spc_flags & SPCF_OFFLINE) == 0)
354 			break;
355 	}
356 	KASSERT(target_ci != NULL);
357 
358 	/*
359 	 * Migrate all non-bound threads to the other CPU.  Note that this
360 	 * runs from the xcall thread, thus handling of LSONPROC is not needed.
361 	 */
362 	mutex_enter(proc_lock);
363 	LIST_FOREACH(l, &alllwp, l_list) {
364 		struct cpu_info *mci;
365 
366 		lwp_lock(l);
367 		if (l->l_cpu != ci || (l->l_pflag & (LP_BOUND | LP_INTR))) {
368 			lwp_unlock(l);
369 			continue;
370 		}
371 		/* Regular case - no affinity. */
372 		if (l->l_affinity == NULL) {
373 			lwp_migrate(l, target_ci);
374 			continue;
375 		}
376 		/* Affinity is set, find an online CPU in the set. */
377 		for (CPU_INFO_FOREACH(cii, mci)) {
378 			mspc = &mci->ci_schedstate;
379 			if ((mspc->spc_flags & SPCF_OFFLINE) == 0 &&
380 			    kcpuset_isset(l->l_affinity, cpu_index(mci)))
381 				break;
382 		}
383 		if (mci == NULL) {
384 			lwp_unlock(l);
385 			mutex_exit(proc_lock);
386 			goto fail;
387 		}
388 		lwp_migrate(l, mci);
389 	}
390 	mutex_exit(proc_lock);
391 
392 #ifdef __HAVE_MD_CPU_OFFLINE
393 	cpu_offline_md();
394 #endif
395 	return;
396 fail:
397 	/* Just unset the SPCF_OFFLINE flag, caller will check */
398 	s = splsched();
399 	spc->spc_flags &= ~SPCF_OFFLINE;
400 	splx(s);
401 }
402 
403 static void
404 cpu_xc_online(struct cpu_info *ci)
405 {
406 	struct schedstate_percpu *spc;
407 	int s;
408 
409 	spc = &ci->ci_schedstate;
410 	s = splsched();
411 	spc->spc_flags &= ~SPCF_OFFLINE;
412 	splx(s);
413 }
414 
415 int
416 cpu_setstate(struct cpu_info *ci, bool online)
417 {
418 	struct schedstate_percpu *spc;
419 	CPU_INFO_ITERATOR cii;
420 	struct cpu_info *ci2;
421 	uint64_t where;
422 	xcfunc_t func;
423 	int nonline;
424 
425 	spc = &ci->ci_schedstate;
426 
427 	KASSERT(mutex_owned(&cpu_lock));
428 
429 	if (online) {
430 		if ((spc->spc_flags & SPCF_OFFLINE) == 0)
431 			return 0;
432 		func = (xcfunc_t)cpu_xc_online;
433 		ncpuonline++;
434 	} else {
435 		if ((spc->spc_flags & SPCF_OFFLINE) != 0)
436 			return 0;
437 		nonline = 0;
438 		/*
439 		 * Ensure that at least one CPU within the processor set
440 		 * stays online.  Revisit this later.
441 		 */
442 		for (CPU_INFO_FOREACH(cii, ci2)) {
443 			if ((ci2->ci_schedstate.spc_flags & SPCF_OFFLINE) != 0)
444 				continue;
445 			if (ci2->ci_schedstate.spc_psid != spc->spc_psid)
446 				continue;
447 			nonline++;
448 		}
449 		if (nonline == 1)
450 			return EBUSY;
451 		func = (xcfunc_t)cpu_xc_offline;
452 		ncpuonline--;
453 	}
454 
455 	where = xc_unicast(0, func, ci, NULL, ci);
456 	xc_wait(where);
457 	if (online) {
458 		KASSERT((spc->spc_flags & SPCF_OFFLINE) == 0);
459 	} else if ((spc->spc_flags & SPCF_OFFLINE) == 0) {
460 		/* If was not set offline, then it is busy */
461 		return EBUSY;
462 	}
463 
464 	spc->spc_lastmod = time_second;
465 	return 0;
466 }
467 
468 #ifdef __HAVE_INTR_CONTROL
469 static void
470 cpu_xc_intr(struct cpu_info *ci)
471 {
472 	struct schedstate_percpu *spc;
473 	int s;
474 
475 	spc = &ci->ci_schedstate;
476 	s = splsched();
477 	spc->spc_flags &= ~SPCF_NOINTR;
478 	splx(s);
479 }
480 
481 static void
482 cpu_xc_nointr(struct cpu_info *ci)
483 {
484 	struct schedstate_percpu *spc;
485 	int s;
486 
487 	spc = &ci->ci_schedstate;
488 	s = splsched();
489 	spc->spc_flags |= SPCF_NOINTR;
490 	splx(s);
491 }
492 
493 int
494 cpu_setintr(struct cpu_info *ci, bool intr)
495 {
496 	struct schedstate_percpu *spc;
497 	CPU_INFO_ITERATOR cii;
498 	struct cpu_info *ci2;
499 	uint64_t where;
500 	xcfunc_t func;
501 	int nintr;
502 
503 	spc = &ci->ci_schedstate;
504 
505 	KASSERT(mutex_owned(&cpu_lock));
506 
507 	if (intr) {
508 		if ((spc->spc_flags & SPCF_NOINTR) == 0)
509 			return 0;
510 		func = (xcfunc_t)cpu_xc_intr;
511 	} else {
512 		if ((spc->spc_flags & SPCF_NOINTR) != 0)
513 			return 0;
514 		/*
515 		 * Ensure that at least one CPU within the system
516 		 * is handing device interrupts.
517 		 */
518 		nintr = 0;
519 		for (CPU_INFO_FOREACH(cii, ci2)) {
520 			if ((ci2->ci_schedstate.spc_flags & SPCF_NOINTR) != 0)
521 				continue;
522 			if (ci2 == ci)
523 				continue;
524 			nintr++;
525 		}
526 		if (nintr == 0)
527 			return EBUSY;
528 		func = (xcfunc_t)cpu_xc_nointr;
529 	}
530 
531 	where = xc_unicast(0, func, ci, NULL, ci);
532 	xc_wait(where);
533 	if (intr) {
534 		KASSERT((spc->spc_flags & SPCF_NOINTR) == 0);
535 	} else if ((spc->spc_flags & SPCF_NOINTR) == 0) {
536 		/* If was not set offline, then it is busy */
537 		return EBUSY;
538 	}
539 
540 	/* Direct interrupts away from the CPU and record the change. */
541 	cpu_intr_redistribute();
542 	spc->spc_lastmod = time_second;
543 	return 0;
544 }
545 #else	/* __HAVE_INTR_CONTROL */
546 int
547 cpu_setintr(struct cpu_info *ci, bool intr)
548 {
549 
550 	return EOPNOTSUPP;
551 }
552 
553 u_int
554 cpu_intr_count(struct cpu_info *ci)
555 {
556 
557 	return 0;	/* 0 == "don't know" */
558 }
559 #endif	/* __HAVE_INTR_CONTROL */
560 
561 bool
562 cpu_softintr_p(void)
563 {
564 
565 	return (curlwp->l_pflag & LP_INTR) != 0;
566 }
567 
568 #ifdef CPU_UCODE
569 int
570 cpu_ucode_load(struct cpu_ucode_softc *sc, const char *fwname)
571 {
572 	firmware_handle_t fwh;
573 	int error;
574 
575 	if (sc->sc_blob != NULL) {
576 		firmware_free(sc->sc_blob, 0);
577 		sc->sc_blob = NULL;
578 		sc->sc_blobsize = 0;
579 	}
580 
581 	error = cpu_ucode_md_open(&fwh, sc->loader_version, fwname);
582 	if (error != 0) {
583 		aprint_error("ucode: firmware_open failed: %i\n", error);
584 		goto err0;
585 	}
586 
587 	sc->sc_blobsize = firmware_get_size(fwh);
588 	sc->sc_blob = firmware_malloc(sc->sc_blobsize);
589 	if (sc->sc_blob == NULL) {
590 		error = ENOMEM;
591 		firmware_close(fwh);
592 		goto err0;
593 	}
594 
595 	error = firmware_read(fwh, 0, sc->sc_blob, sc->sc_blobsize);
596 	firmware_close(fwh);
597 	if (error != 0)
598 		goto err1;
599 
600 	return 0;
601 
602 err1:
603 	firmware_free(sc->sc_blob, 0);
604 	sc->sc_blob = NULL;
605 	sc->sc_blobsize = 0;
606 err0:
607 	return error;
608 }
609 #endif
610