xref: /netbsd-src/sys/kern/kern_cpu.c (revision cdd498c00dcf861038107c594e5bfe5c0990efb6)
1*cdd498c0Smrg /*	$NetBSD: kern_cpu.c,v 1.98 2025/01/17 04:11:33 mrg Exp $	*/
218af8ee9Sad 
318af8ee9Sad /*-
45d954ab6Sad  * Copyright (c) 2007, 2008, 2009, 2010, 2012, 2019 The NetBSD Foundation, Inc.
518af8ee9Sad  * All rights reserved.
618af8ee9Sad  *
718af8ee9Sad  * This code is derived from software contributed to The NetBSD Foundation
818af8ee9Sad  * by Andrew Doran.
918af8ee9Sad  *
1018af8ee9Sad  * Redistribution and use in source and binary forms, with or without
1118af8ee9Sad  * modification, are permitted provided that the following conditions
1218af8ee9Sad  * are met:
1318af8ee9Sad  * 1. Redistributions of source code must retain the above copyright
1418af8ee9Sad  *    notice, this list of conditions and the following disclaimer.
1518af8ee9Sad  * 2. Redistributions in binary form must reproduce the above copyright
1618af8ee9Sad  *    notice, this list of conditions and the following disclaimer in the
1718af8ee9Sad  *    documentation and/or other materials provided with the distribution.
1818af8ee9Sad  *
1918af8ee9Sad  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
2018af8ee9Sad  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
2118af8ee9Sad  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
2218af8ee9Sad  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
2318af8ee9Sad  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
2418af8ee9Sad  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
2518af8ee9Sad  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
2618af8ee9Sad  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
2718af8ee9Sad  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
2818af8ee9Sad  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
2918af8ee9Sad  * POSSIBILITY OF SUCH DAMAGE.
3018af8ee9Sad  */
31f0301095Syamt 
32f0301095Syamt /*-
33f0301095Syamt  * Copyright (c)2007 YAMAMOTO Takashi,
34f0301095Syamt  * All rights reserved.
35f0301095Syamt  *
36f0301095Syamt  * Redistribution and use in source and binary forms, with or without
37f0301095Syamt  * modification, are permitted provided that the following conditions
38f0301095Syamt  * are met:
39f0301095Syamt  * 1. Redistributions of source code must retain the above copyright
40f0301095Syamt  *    notice, this list of conditions and the following disclaimer.
41f0301095Syamt  * 2. Redistributions in binary form must reproduce the above copyright
42f0301095Syamt  *    notice, this list of conditions and the following disclaimer in the
43f0301095Syamt  *    documentation and/or other materials provided with the distribution.
44f0301095Syamt  *
45f0301095Syamt  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
46f0301095Syamt  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47f0301095Syamt  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48f0301095Syamt  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
49f0301095Syamt  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50f0301095Syamt  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51f0301095Syamt  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52f0301095Syamt  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53f0301095Syamt  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54f0301095Syamt  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55f0301095Syamt  * SUCH DAMAGE.
56f0301095Syamt  */
57f0301095Syamt 
58dd632e58Sad /*
59dd632e58Sad  * CPU related routines not shared with rump.
60dd632e58Sad  */
61dd632e58Sad 
62f0301095Syamt #include <sys/cdefs.h>
63*cdd498c0Smrg __KERNEL_RCSID(0, "$NetBSD: kern_cpu.c,v 1.98 2025/01/17 04:11:33 mrg Exp $");
64a3f6c067Scegger 
65a98966d3Sad #ifdef _KERNEL_OPT
66a3f6c067Scegger #include "opt_cpu_ucode.h"
6712861a66Sriastradh #include "opt_heartbeat.h"
68a98966d3Sad #endif
69f0301095Syamt 
70f0301095Syamt #include <sys/param.h>
71f0301095Syamt #include <sys/systm.h>
72f0301095Syamt #include <sys/idle.h>
73f0301095Syamt #include <sys/sched.h>
74342d5fc9Sad #include <sys/intr.h>
7518af8ee9Sad #include <sys/conf.h>
7618af8ee9Sad #include <sys/cpu.h>
7718af8ee9Sad #include <sys/cpuio.h>
7818af8ee9Sad #include <sys/proc.h>
79ea8e7591Syamt #include <sys/percpu.h>
8018af8ee9Sad #include <sys/kernel.h>
8118af8ee9Sad #include <sys/kauth.h>
82451aacdaSad #include <sys/xcall.h>
83451aacdaSad #include <sys/pool.h>
8460c1b884Sad #include <sys/kmem.h>
8540379c87Sad #include <sys/select.h>
861e11b07bSad #include <sys/namei.h>
87ecebc8b4Sad #include <sys/callout.h>
8869aeb16cSdrochner #include <sys/pcu.h>
8912861a66Sriastradh #include <sys/heartbeat.h>
90f0301095Syamt 
91b5866eb2Sad #include <uvm/uvm_extern.h>
92b5866eb2Sad 
939e1f6b1aSchristos #include "ioconf.h"
949e1f6b1aSchristos 
9586c8e703Smatt /*
96f34c1ce2Sjym  * If the port has stated that cpu_data is the first thing in cpu_info,
97f34c1ce2Sjym  * verify that the claim is true. This will prevent them from getting out
9886c8e703Smatt  * of sync.
9986c8e703Smatt  */
10086c8e703Smatt #ifdef __HAVE_CPU_DATA_FIRST
10186c8e703Smatt CTASSERT(offsetof(struct cpu_info, ci_data) == 0);
10286c8e703Smatt #else
10386c8e703Smatt CTASSERT(offsetof(struct cpu_info, ci_data) != 0);
10486c8e703Smatt #endif
10586c8e703Smatt 
106d65c57aaSad int (*compat_cpuctl_ioctl)(struct lwp *, u_long, void *) = (void *)enosys;
107d65c57aaSad 
1089d5b26a9Suwe static void	cpu_xc_online(struct cpu_info *, void *);
1099d5b26a9Suwe static void	cpu_xc_offline(struct cpu_info *, void *);
110451aacdaSad 
11118af8ee9Sad dev_type_ioctl(cpuctl_ioctl);
11218af8ee9Sad 
11318af8ee9Sad const struct cdevsw cpuctl_cdevsw = {
114a68f9396Sdholland 	.d_open = nullopen,
115a68f9396Sdholland 	.d_close = nullclose,
116a68f9396Sdholland 	.d_read = nullread,
117a68f9396Sdholland 	.d_write = nullwrite,
118a68f9396Sdholland 	.d_ioctl = cpuctl_ioctl,
119a68f9396Sdholland 	.d_stop = nullstop,
120a68f9396Sdholland 	.d_tty = notty,
121a68f9396Sdholland 	.d_poll = nopoll,
122a68f9396Sdholland 	.d_mmap = nommap,
123a68f9396Sdholland 	.d_kqfilter = nokqfilter,
124f9228f42Sdholland 	.d_discard = nodiscard,
125a68f9396Sdholland 	.d_flag = D_OTHER | D_MPSAFE
12618af8ee9Sad };
12718af8ee9Sad 
128f0301095Syamt int
129f0301095Syamt mi_cpu_attach(struct cpu_info *ci)
130f0301095Syamt {
131f0301095Syamt 	int error;
132f0301095Syamt 
133a0f75dc2Sad 	KASSERT(maxcpus > 0);
134a0f75dc2Sad 
135f6dedffaSad 	if ((ci->ci_index = ncpu) >= maxcpus)
136f6dedffaSad 		panic("Too many CPUs.  Increase MAXCPUS?");
137f7666738Srmind 	kcpuset_set(kcpuset_attached, cpu_index(ci));
138f7666738Srmind 
139584846faSmatt 	/*
140584846faSmatt 	 * Create a convenience cpuset of just ourselves.
141584846faSmatt 	 */
142c591e46dSskrll 	kcpuset_create(&ci->ci_kcpuset, true);
143c591e46dSskrll 	kcpuset_set(ci->ci_kcpuset, cpu_index(ci));
144584846faSmatt 
145a4e0004bSad 	TAILQ_INIT(&ci->ci_data.cpu_ld_locks);
146a4e0004bSad 	__cpu_simple_lock_init(&ci->ci_data.cpu_ld_lock);
147c8c02436Srmind 
148efc854cfSmrg 	/* This is useful for eg, per-cpu evcnt */
149efc854cfSmrg 	snprintf(ci->ci_data.cpu_name, sizeof(ci->ci_data.cpu_name), "cpu%d",
150efc854cfSmrg 	    cpu_index(ci));
151efc854cfSmrg 
1522699f92cSmatt 	if (__predict_false(cpu_infos == NULL)) {
153757ba594Smlelstv 		size_t ci_bufsize = (maxcpus + 1) * sizeof(struct cpu_info *);
154757ba594Smlelstv 		cpu_infos = kmem_zalloc(ci_bufsize, KM_SLEEP);
1552699f92cSmatt 	}
1562699f92cSmatt 	cpu_infos[cpu_index(ci)] = ci;
1572699f92cSmatt 
158f0301095Syamt 	sched_cpuattach(ci);
159f0301095Syamt 
160f0301095Syamt 	error = create_idle_lwp(ci);
161f0301095Syamt 	if (error != 0) {
162f0301095Syamt 		/* XXX revert sched_cpuattach */
163f0301095Syamt 		return error;
164f0301095Syamt 	}
165f0301095Syamt 
166d831186dSad 	if (ci == curcpu())
16757eb66c6Sad 		ci->ci_onproc = curlwp;
168d831186dSad 	else
16957eb66c6Sad 		ci->ci_onproc = ci->ci_data.cpu_idlelwp;
170d831186dSad 
171ea8e7591Syamt 	percpu_init_cpu(ci);
172342d5fc9Sad 	softint_init(ci);
173ecebc8b4Sad 	callout_init_cpu(ci);
174451aacdaSad 	xc_init_cpu(ci);
175d18c6ca4Sad 	pool_cache_cpu_init(ci);
17640379c87Sad 	selsysinit(ci);
1771e11b07bSad 	cache_cpu_init(ci);
178451aacdaSad 	TAILQ_INIT(&ci->ci_data.cpu_biodone);
179f0301095Syamt 	ncpu++;
1809c3109d6Sad 	ncpuonline++;
181f0301095Syamt 
182f0301095Syamt 	return 0;
183f0301095Syamt }
18418af8ee9Sad 
18518af8ee9Sad void
186bf97b903Suebayasi cpuctlattach(int dummy __unused)
18718af8ee9Sad {
18818af8ee9Sad 
189a0f75dc2Sad 	KASSERT(cpu_infos != NULL);
19018af8ee9Sad }
19118af8ee9Sad 
19218af8ee9Sad int
19318af8ee9Sad cpuctl_ioctl(dev_t dev, u_long cmd, void *data, int flag, lwp_t *l)
19418af8ee9Sad {
19518af8ee9Sad 	CPU_INFO_ITERATOR cii;
19618af8ee9Sad 	cpustate_t *cs;
19718af8ee9Sad 	struct cpu_info *ci;
19818af8ee9Sad 	int error, i;
19918af8ee9Sad 	u_int id;
20018af8ee9Sad 
20118af8ee9Sad 	error = 0;
20218af8ee9Sad 
20318af8ee9Sad 	mutex_enter(&cpu_lock);
20418af8ee9Sad 	switch (cmd) {
20518af8ee9Sad 	case IOC_CPU_SETSTATE:
206cd23f363Selad 		cs = data;
207cd23f363Selad 		error = kauth_authorize_system(l->l_cred,
208cd23f363Selad 		    KAUTH_SYSTEM_CPU, KAUTH_REQ_SYSTEM_CPU_SETSTATE, cs, NULL,
209cd23f363Selad 		    NULL);
21018af8ee9Sad 		if (error != 0)
21118af8ee9Sad 			break;
212a0f75dc2Sad 		if (cs->cs_id >= maxcpus ||
2131ec58d56Sad 		    (ci = cpu_lookup(cs->cs_id)) == NULL) {
21418af8ee9Sad 			error = ESRCH;
21518af8ee9Sad 			break;
21618af8ee9Sad 		}
2175b350e80Srin 		cpu_setintr(ci, cs->cs_intr);	/* XXX neglect errors */
2188f1873eaSrmind 		error = cpu_setstate(ci, cs->cs_online);
21918af8ee9Sad 		break;
22018af8ee9Sad 
22118af8ee9Sad 	case IOC_CPU_GETSTATE:
22218af8ee9Sad 		cs = data;
22318af8ee9Sad 		id = cs->cs_id;
22455ec699fSad 		memset(cs, 0, sizeof(*cs));
22518af8ee9Sad 		cs->cs_id = id;
226a0f75dc2Sad 		if (cs->cs_id >= maxcpus ||
2271ec58d56Sad 		    (ci = cpu_lookup(id)) == NULL) {
22818af8ee9Sad 			error = ESRCH;
22918af8ee9Sad 			break;
23018af8ee9Sad 		}
23118af8ee9Sad 		if ((ci->ci_schedstate.spc_flags & SPCF_OFFLINE) != 0)
23218af8ee9Sad 			cs->cs_online = false;
23318af8ee9Sad 		else
23418af8ee9Sad 			cs->cs_online = true;
2354d8f47aeSad 		if ((ci->ci_schedstate.spc_flags & SPCF_NOINTR) != 0)
2364d8f47aeSad 			cs->cs_intr = false;
2374d8f47aeSad 		else
23818af8ee9Sad 			cs->cs_intr = true;
2394d8f47aeSad 		cs->cs_lastmod = (int32_t)ci->ci_schedstate.spc_lastmod;
2404d8f47aeSad 		cs->cs_lastmodhi = (int32_t)
2414d8f47aeSad 		    (ci->ci_schedstate.spc_lastmod >> 32);
2424d8f47aeSad 		cs->cs_intrcnt = cpu_intr_count(ci) + 1;
243f8dbae1dSjdc 		cs->cs_hwid = ci->ci_cpuid;
24418af8ee9Sad 		break;
24518af8ee9Sad 
24618af8ee9Sad 	case IOC_CPU_MAPID:
24718af8ee9Sad 		i = 0;
24818af8ee9Sad 		for (CPU_INFO_FOREACH(cii, ci)) {
24918af8ee9Sad 			if (i++ == *(int *)data)
25018af8ee9Sad 				break;
25118af8ee9Sad 		}
25218af8ee9Sad 		if (ci == NULL)
25318af8ee9Sad 			error = ESRCH;
25418af8ee9Sad 		else
2559d3a4ed2Srmind 			*(int *)data = cpu_index(ci);
25618af8ee9Sad 		break;
25718af8ee9Sad 
25818af8ee9Sad 	case IOC_CPU_GETCOUNT:
25918af8ee9Sad 		*(int *)data = ncpu;
26018af8ee9Sad 		break;
26118af8ee9Sad 
262a3f6c067Scegger #ifdef CPU_UCODE
263a3f6c067Scegger 	case IOC_CPU_UCODE_GET_VERSION:
264312c3390Sdrochner 		error = cpu_ucode_get_version((struct cpu_ucode_version *)data);
265312c3390Sdrochner 		break;
266312c3390Sdrochner 
267a3f6c067Scegger 	case IOC_CPU_UCODE_APPLY:
268a3f6c067Scegger 		error = kauth_authorize_machdep(l->l_cred,
269a3f6c067Scegger 		    KAUTH_MACHDEP_CPU_UCODE_APPLY,
270a3f6c067Scegger 		    NULL, NULL, NULL, NULL);
271a3f6c067Scegger 		if (error != 0)
272a3f6c067Scegger 			break;
273312c3390Sdrochner 		error = cpu_ucode_apply((const struct cpu_ucode *)data);
274312c3390Sdrochner 		break;
275035939beSdrochner #endif
276a3f6c067Scegger 
27718af8ee9Sad 	default:
278cf143c37Schristos 		error = (*compat_cpuctl_ioctl)(l, cmd, data);
27918af8ee9Sad 		break;
28018af8ee9Sad 	}
28118af8ee9Sad 	mutex_exit(&cpu_lock);
28218af8ee9Sad 
28318af8ee9Sad 	return error;
28418af8ee9Sad }
28518af8ee9Sad 
28618af8ee9Sad struct cpu_info *
2871ec58d56Sad cpu_lookup(u_int idx)
288c70d5dc9Syamt {
289a0f75dc2Sad 	struct cpu_info *ci;
290c70d5dc9Syamt 
2919f7f1452Sskrll 	/*
2929f7f1452Sskrll 	 * cpu_infos is a NULL terminated array of MAXCPUS + 1 entries,
2939f7f1452Sskrll 	 * so an index of MAXCPUS here is ok.  See mi_cpu_attach.
2949f7f1452Sskrll 	 */
2959f7f1452Sskrll 	KASSERT(idx <= maxcpus);
296a0f75dc2Sad 
297a0f75dc2Sad 	if (__predict_false(cpu_infos == NULL)) {
298a0f75dc2Sad 		KASSERT(idx == 0);
299a0f75dc2Sad 		return curcpu();
300a0f75dc2Sad 	}
301a0f75dc2Sad 
302a0f75dc2Sad 	ci = cpu_infos[idx];
303c70d5dc9Syamt 	KASSERT(ci == NULL || cpu_index(ci) == idx);
3049f7f1452Sskrll 	KASSERTMSG(idx < maxcpus || ci == NULL, "idx %d ci %p", idx, ci);
305c70d5dc9Syamt 
306c70d5dc9Syamt 	return ci;
307c70d5dc9Syamt }
308c70d5dc9Syamt 
309451aacdaSad static void
3109d5b26a9Suwe cpu_xc_offline(struct cpu_info *ci, void *unused)
311451aacdaSad {
3127c0340a7Srmind 	struct schedstate_percpu *spc, *mspc = NULL;
3138f1873eaSrmind 	struct cpu_info *target_ci;
3147c0340a7Srmind 	struct lwp *l;
3157c0340a7Srmind 	CPU_INFO_ITERATOR cii;
316451aacdaSad 	int s;
317451aacdaSad 
3188f1873eaSrmind 	/*
3194d8f47aeSad 	 * Thread that made the cross call (separate context) holds
3204d8f47aeSad 	 * cpu_lock on our behalf.
3218f1873eaSrmind 	 */
3227c0340a7Srmind 	spc = &ci->ci_schedstate;
323451aacdaSad 	s = splsched();
324451aacdaSad 	spc->spc_flags |= SPCF_OFFLINE;
325451aacdaSad 	splx(s);
3267c0340a7Srmind 
3274d8f47aeSad 	/* Take the first available CPU for the migration. */
3288f1873eaSrmind 	for (CPU_INFO_FOREACH(cii, target_ci)) {
3298f1873eaSrmind 		mspc = &target_ci->ci_schedstate;
3307c0340a7Srmind 		if ((mspc->spc_flags & SPCF_OFFLINE) == 0)
3317c0340a7Srmind 			break;
3327c0340a7Srmind 	}
3338f1873eaSrmind 	KASSERT(target_ci != NULL);
3347c0340a7Srmind 
3357c0340a7Srmind 	/*
3368f1873eaSrmind 	 * Migrate all non-bound threads to the other CPU.  Note that this
3378f1873eaSrmind 	 * runs from the xcall thread, thus handling of LSONPROC is not needed.
3387c0340a7Srmind 	 */
3390eaaa024Sad 	mutex_enter(&proc_lock);
3407c0340a7Srmind 	LIST_FOREACH(l, &alllwp, l_list) {
3418f1873eaSrmind 		struct cpu_info *mci;
3428f1873eaSrmind 
34352bfe819Syamt 		lwp_lock(l);
3448f1873eaSrmind 		if (l->l_cpu != ci || (l->l_pflag & (LP_BOUND | LP_INTR))) {
34552bfe819Syamt 			lwp_unlock(l);
3468f1873eaSrmind 			continue;
3477c0340a7Srmind 		}
348501dd321Srmind 		/* Regular case - no affinity. */
349501dd321Srmind 		if (l->l_affinity == NULL) {
3508f1873eaSrmind 			lwp_migrate(l, target_ci);
3518f1873eaSrmind 			continue;
3528f1873eaSrmind 		}
353501dd321Srmind 		/* Affinity is set, find an online CPU in the set. */
3548f1873eaSrmind 		for (CPU_INFO_FOREACH(cii, mci)) {
3558f1873eaSrmind 			mspc = &mci->ci_schedstate;
3568f1873eaSrmind 			if ((mspc->spc_flags & SPCF_OFFLINE) == 0 &&
35752b220e9Srmind 			    kcpuset_isset(l->l_affinity, cpu_index(mci)))
3588f1873eaSrmind 				break;
3598f1873eaSrmind 		}
3608f1873eaSrmind 		if (mci == NULL) {
3618f1873eaSrmind 			lwp_unlock(l);
3620eaaa024Sad 			mutex_exit(&proc_lock);
3638f1873eaSrmind 			goto fail;
3648f1873eaSrmind 		}
3658f1873eaSrmind 		lwp_migrate(l, mci);
3667c0340a7Srmind 	}
3670eaaa024Sad 	mutex_exit(&proc_lock);
3680c3d8168Sjoerg 
36969aeb16cSdrochner #if PCU_UNIT_COUNT > 0
37069aeb16cSdrochner 	pcu_save_all_on_cpu();
37169aeb16cSdrochner #endif
37269aeb16cSdrochner 
373b339b887Sriastradh 	heartbeat_suspend();
374b339b887Sriastradh 
3750c3d8168Sjoerg #ifdef __HAVE_MD_CPU_OFFLINE
3760c3d8168Sjoerg 	cpu_offline_md();
3770c3d8168Sjoerg #endif
3788f1873eaSrmind 	return;
3798f1873eaSrmind fail:
3808f1873eaSrmind 	/* Just unset the SPCF_OFFLINE flag, caller will check */
3818f1873eaSrmind 	s = splsched();
3828f1873eaSrmind 	spc->spc_flags &= ~SPCF_OFFLINE;
3838f1873eaSrmind 	splx(s);
384451aacdaSad }
385451aacdaSad 
386451aacdaSad static void
3879d5b26a9Suwe cpu_xc_online(struct cpu_info *ci, void *unused)
388451aacdaSad {
3897c0340a7Srmind 	struct schedstate_percpu *spc;
390451aacdaSad 	int s;
391451aacdaSad 
39212861a66Sriastradh 	heartbeat_resume();
39312861a66Sriastradh 
3947c0340a7Srmind 	spc = &ci->ci_schedstate;
395451aacdaSad 	s = splsched();
396451aacdaSad 	spc->spc_flags &= ~SPCF_OFFLINE;
397451aacdaSad 	splx(s);
398451aacdaSad }
399451aacdaSad 
40018af8ee9Sad int
4018f1873eaSrmind cpu_setstate(struct cpu_info *ci, bool online)
40218af8ee9Sad {
40318af8ee9Sad 	struct schedstate_percpu *spc;
40418af8ee9Sad 	CPU_INFO_ITERATOR cii;
40518af8ee9Sad 	struct cpu_info *ci2;
406451aacdaSad 	uint64_t where;
407451aacdaSad 	xcfunc_t func;
40818af8ee9Sad 	int nonline;
40918af8ee9Sad 
41018af8ee9Sad 	spc = &ci->ci_schedstate;
41118af8ee9Sad 
41218af8ee9Sad 	KASSERT(mutex_owned(&cpu_lock));
41318af8ee9Sad 
41418af8ee9Sad 	if (online) {
41518af8ee9Sad 		if ((spc->spc_flags & SPCF_OFFLINE) == 0)
41618af8ee9Sad 			return 0;
417451aacdaSad 		func = (xcfunc_t)cpu_xc_online;
41818af8ee9Sad 	} else {
41918af8ee9Sad 		if ((spc->spc_flags & SPCF_OFFLINE) != 0)
42018af8ee9Sad 			return 0;
42118af8ee9Sad 		nonline = 0;
422bce675d0Sad 		/*
423bce675d0Sad 		 * Ensure that at least one CPU within the processor set
424bce675d0Sad 		 * stays online.  Revisit this later.
425bce675d0Sad 		 */
42618af8ee9Sad 		for (CPU_INFO_FOREACH(cii, ci2)) {
427bce675d0Sad 			if ((ci2->ci_schedstate.spc_flags & SPCF_OFFLINE) != 0)
428bce675d0Sad 				continue;
429bce675d0Sad 			if (ci2->ci_schedstate.spc_psid != spc->spc_psid)
430bce675d0Sad 				continue;
431bce675d0Sad 			nonline++;
43218af8ee9Sad 		}
43318af8ee9Sad 		if (nonline == 1)
43418af8ee9Sad 			return EBUSY;
435451aacdaSad 		func = (xcfunc_t)cpu_xc_offline;
43618af8ee9Sad 	}
43718af8ee9Sad 
4387c0340a7Srmind 	where = xc_unicast(0, func, ci, NULL, ci);
439451aacdaSad 	xc_wait(where);
4407c0340a7Srmind 	if (online) {
4417c0340a7Srmind 		KASSERT((spc->spc_flags & SPCF_OFFLINE) == 0);
442dd1f1613Smaxv 		ncpuonline++;
443dd1f1613Smaxv 	} else {
444dd1f1613Smaxv 		if ((spc->spc_flags & SPCF_OFFLINE) == 0) {
4458f1873eaSrmind 			/* If was not set offline, then it is busy */
4468f1873eaSrmind 			return EBUSY;
4477c0340a7Srmind 		}
448dd1f1613Smaxv 		ncpuonline--;
449dd1f1613Smaxv 	}
450451aacdaSad 
4518f1873eaSrmind 	spc->spc_lastmod = time_second;
45218af8ee9Sad 	return 0;
45318af8ee9Sad }
4547ab18287Sad 
455*cdd498c0Smrg bool
456*cdd498c0Smrg cpu_is_type(struct cpu_info *ci, int wanted)
457*cdd498c0Smrg {
458*cdd498c0Smrg 
459*cdd498c0Smrg 	return (ci->ci_schedstate.spc_flags & wanted) == wanted;
460*cdd498c0Smrg }
461*cdd498c0Smrg 
462*cdd498c0Smrg bool
463*cdd498c0Smrg cpu_is_idle_1stclass(struct cpu_info *ci)
464*cdd498c0Smrg {
465*cdd498c0Smrg 	const int wanted = SPCF_IDLE | SPCF_1STCLASS;
466*cdd498c0Smrg 
467*cdd498c0Smrg 	return cpu_is_type(ci, wanted);
468*cdd498c0Smrg }
469*cdd498c0Smrg 
470*cdd498c0Smrg bool
471*cdd498c0Smrg cpu_is_1stclass(struct cpu_info *ci)
472*cdd498c0Smrg {
473*cdd498c0Smrg 	const int wanted = SPCF_1STCLASS;
474*cdd498c0Smrg 
475*cdd498c0Smrg 	return cpu_is_type(ci, wanted);
476*cdd498c0Smrg }
477*cdd498c0Smrg 
478*cdd498c0Smrg bool
479*cdd498c0Smrg cpu_is_better(struct cpu_info *ci1, struct cpu_info *ci2)
480*cdd498c0Smrg {
481*cdd498c0Smrg 	const int ci1_flags = ci1->ci_schedstate.spc_flags;
482*cdd498c0Smrg 	const int ci2_flags = ci2->ci_schedstate.spc_flags;
483*cdd498c0Smrg 
484*cdd498c0Smrg 	if ((ci1_flags & SPCF_1STCLASS) != 0 &&
485*cdd498c0Smrg 	    (ci2_flags & SPCF_1STCLASS) == 0)
486*cdd498c0Smrg 		return ci1;
487*cdd498c0Smrg 
488*cdd498c0Smrg 	return ci2;
489*cdd498c0Smrg }
490*cdd498c0Smrg 
491dd632e58Sad #if defined(__HAVE_INTR_CONTROL)
4924d8f47aeSad static void
4939d5b26a9Suwe cpu_xc_intr(struct cpu_info *ci, void *unused)
4944d8f47aeSad {
4954d8f47aeSad 	struct schedstate_percpu *spc;
4964d8f47aeSad 	int s;
4974d8f47aeSad 
4984d8f47aeSad 	spc = &ci->ci_schedstate;
4994d8f47aeSad 	s = splsched();
5004d8f47aeSad 	spc->spc_flags &= ~SPCF_NOINTR;
5014d8f47aeSad 	splx(s);
5024d8f47aeSad }
5034d8f47aeSad 
5044d8f47aeSad static void
5059d5b26a9Suwe cpu_xc_nointr(struct cpu_info *ci, void *unused)
5064d8f47aeSad {
5074d8f47aeSad 	struct schedstate_percpu *spc;
5084d8f47aeSad 	int s;
5094d8f47aeSad 
5104d8f47aeSad 	spc = &ci->ci_schedstate;
5114d8f47aeSad 	s = splsched();
5124d8f47aeSad 	spc->spc_flags |= SPCF_NOINTR;
5134d8f47aeSad 	splx(s);
5144d8f47aeSad }
5154d8f47aeSad 
5164d8f47aeSad int
5174d8f47aeSad cpu_setintr(struct cpu_info *ci, bool intr)
5184d8f47aeSad {
5194d8f47aeSad 	struct schedstate_percpu *spc;
5204d8f47aeSad 	CPU_INFO_ITERATOR cii;
5214d8f47aeSad 	struct cpu_info *ci2;
5224d8f47aeSad 	uint64_t where;
5234d8f47aeSad 	xcfunc_t func;
5244d8f47aeSad 	int nintr;
5254d8f47aeSad 
5264d8f47aeSad 	spc = &ci->ci_schedstate;
5274d8f47aeSad 
5284d8f47aeSad 	KASSERT(mutex_owned(&cpu_lock));
5294d8f47aeSad 
5304d8f47aeSad 	if (intr) {
5314d8f47aeSad 		if ((spc->spc_flags & SPCF_NOINTR) == 0)
5324d8f47aeSad 			return 0;
5334d8f47aeSad 		func = (xcfunc_t)cpu_xc_intr;
5344d8f47aeSad 	} else {
5355b350e80Srin 		if (CPU_IS_PRIMARY(ci))	/* XXX kern/45117 */
5360a95079bSjruoho 			return EINVAL;
5374d8f47aeSad 		if ((spc->spc_flags & SPCF_NOINTR) != 0)
5384d8f47aeSad 			return 0;
5394d8f47aeSad 		/*
5404d8f47aeSad 		 * Ensure that at least one CPU within the system
5414d8f47aeSad 		 * is handing device interrupts.
5424d8f47aeSad 		 */
5434d8f47aeSad 		nintr = 0;
5444d8f47aeSad 		for (CPU_INFO_FOREACH(cii, ci2)) {
5454d8f47aeSad 			if ((ci2->ci_schedstate.spc_flags & SPCF_NOINTR) != 0)
5464d8f47aeSad 				continue;
5474d8f47aeSad 			if (ci2 == ci)
5484d8f47aeSad 				continue;
5494d8f47aeSad 			nintr++;
5504d8f47aeSad 		}
5514d8f47aeSad 		if (nintr == 0)
5524d8f47aeSad 			return EBUSY;
5534d8f47aeSad 		func = (xcfunc_t)cpu_xc_nointr;
5544d8f47aeSad 	}
5554d8f47aeSad 
5564d8f47aeSad 	where = xc_unicast(0, func, ci, NULL, ci);
5574d8f47aeSad 	xc_wait(where);
5584d8f47aeSad 	if (intr) {
5594d8f47aeSad 		KASSERT((spc->spc_flags & SPCF_NOINTR) == 0);
5604d8f47aeSad 	} else if ((spc->spc_flags & SPCF_NOINTR) == 0) {
5614d8f47aeSad 		/* If was not set offline, then it is busy */
5624d8f47aeSad 		return EBUSY;
5634d8f47aeSad 	}
5644d8f47aeSad 
5654d8f47aeSad 	/* Direct interrupts away from the CPU and record the change. */
5664d8f47aeSad 	cpu_intr_redistribute();
5674d8f47aeSad 	spc->spc_lastmod = time_second;
5684d8f47aeSad 	return 0;
5694d8f47aeSad }
5704d8f47aeSad #else	/* __HAVE_INTR_CONTROL */
5714d8f47aeSad int
5724d8f47aeSad cpu_setintr(struct cpu_info *ci, bool intr)
5734d8f47aeSad {
5744d8f47aeSad 
5754d8f47aeSad 	return EOPNOTSUPP;
5764d8f47aeSad }
5774d8f47aeSad 
5784d8f47aeSad u_int
5794d8f47aeSad cpu_intr_count(struct cpu_info *ci)
5804d8f47aeSad {
5814d8f47aeSad 
5824d8f47aeSad 	return 0;	/* 0 == "don't know" */
5834d8f47aeSad }
5844d8f47aeSad #endif	/* __HAVE_INTR_CONTROL */
5854d8f47aeSad 
586a3f6c067Scegger #ifdef CPU_UCODE
587a3f6c067Scegger int
588a3f6c067Scegger cpu_ucode_load(struct cpu_ucode_softc *sc, const char *fwname)
589a3f6c067Scegger {
590a3f6c067Scegger 	firmware_handle_t fwh;
591a3f6c067Scegger 	int error;
592a3f6c067Scegger 
593a3f6c067Scegger 	if (sc->sc_blob != NULL) {
5943cde4cbcSozaki-r 		firmware_free(sc->sc_blob, sc->sc_blobsize);
595a3f6c067Scegger 		sc->sc_blob = NULL;
596a3f6c067Scegger 		sc->sc_blobsize = 0;
597a3f6c067Scegger 	}
598a3f6c067Scegger 
599312c3390Sdrochner 	error = cpu_ucode_md_open(&fwh, sc->loader_version, fwname);
600a3f6c067Scegger 	if (error != 0) {
601d96b7a13Smrg #ifdef DEBUG
602d96b7a13Smrg 		printf("ucode: firmware_open(%s) failed: %i\n", fwname, error);
603d96b7a13Smrg #endif
604a3f6c067Scegger 		goto err0;
605a3f6c067Scegger 	}
606a3f6c067Scegger 
607a3f6c067Scegger 	sc->sc_blobsize = firmware_get_size(fwh);
608bf88e6faSmsaitoh 	if (sc->sc_blobsize == 0) {
609bf88e6faSmsaitoh 		error = EFTYPE;
610bf88e6faSmsaitoh 		firmware_close(fwh);
611bf88e6faSmsaitoh 		goto err0;
612bf88e6faSmsaitoh 	}
613a3f6c067Scegger 	sc->sc_blob = firmware_malloc(sc->sc_blobsize);
614a3f6c067Scegger 	if (sc->sc_blob == NULL) {
615a3f6c067Scegger 		error = ENOMEM;
616a3f6c067Scegger 		firmware_close(fwh);
617a3f6c067Scegger 		goto err0;
618a3f6c067Scegger 	}
619a3f6c067Scegger 
620a3f6c067Scegger 	error = firmware_read(fwh, 0, sc->sc_blob, sc->sc_blobsize);
621a3f6c067Scegger 	firmware_close(fwh);
622a3f6c067Scegger 	if (error != 0)
623a3f6c067Scegger 		goto err1;
624a3f6c067Scegger 
625a3f6c067Scegger 	return 0;
626a3f6c067Scegger 
627a3f6c067Scegger err1:
6283cde4cbcSozaki-r 	firmware_free(sc->sc_blob, sc->sc_blobsize);
629a3f6c067Scegger 	sc->sc_blob = NULL;
630a3f6c067Scegger 	sc->sc_blobsize = 0;
631a3f6c067Scegger err0:
632a3f6c067Scegger 	return error;
633a3f6c067Scegger }
634a3f6c067Scegger #endif
635