1*04615d56Smrg /* $NetBSD: kern_idle.c,v 1.36 2024/03/01 04:32:38 mrg Exp $ */
2f0301095Syamt
3f0301095Syamt /*-
4f0301095Syamt * Copyright (c)2002, 2006, 2007 YAMAMOTO Takashi,
5f0301095Syamt * All rights reserved.
6f0301095Syamt *
7f0301095Syamt * Redistribution and use in source and binary forms, with or without
8f0301095Syamt * modification, are permitted provided that the following conditions
9f0301095Syamt * are met:
10f0301095Syamt * 1. Redistributions of source code must retain the above copyright
11f0301095Syamt * notice, this list of conditions and the following disclaimer.
12f0301095Syamt * 2. Redistributions in binary form must reproduce the above copyright
13f0301095Syamt * notice, this list of conditions and the following disclaimer in the
14f0301095Syamt * documentation and/or other materials provided with the distribution.
15f0301095Syamt *
16f0301095Syamt * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17f0301095Syamt * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18f0301095Syamt * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19f0301095Syamt * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20f0301095Syamt * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21f0301095Syamt * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22f0301095Syamt * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23f0301095Syamt * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24f0301095Syamt * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25f0301095Syamt * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26f0301095Syamt * SUCH DAMAGE.
27f0301095Syamt */
28f0301095Syamt
29f0301095Syamt #include <sys/cdefs.h>
30f0301095Syamt
31*04615d56Smrg __KERNEL_RCSID(0, "$NetBSD: kern_idle.c,v 1.36 2024/03/01 04:32:38 mrg Exp $");
32f0301095Syamt
33f0301095Syamt #include <sys/param.h>
34f0301095Syamt #include <sys/cpu.h>
35f0301095Syamt #include <sys/idle.h>
36d831186dSad #include <sys/kthread.h>
37f0301095Syamt #include <sys/lockdebug.h>
38f0301095Syamt #include <sys/kmem.h>
39744a92f0Sad #include <sys/proc.h>
4061a0a960Sad #include <sys/atomic.h>
41f0301095Syamt
4294843b13Sad #include <uvm/uvm.h> /* uvm_idle */
43f0301095Syamt
44f0301095Syamt void
idle_loop(void * dummy)45f0301095Syamt idle_loop(void *dummy)
46f0301095Syamt {
47f0301095Syamt struct cpu_info *ci = curcpu();
4829170d38Srmind struct schedstate_percpu *spc;
49f0301095Syamt struct lwp *l = curlwp;
50f0301095Syamt
51b316ad65Sad KASSERT(l->l_blcnt == 0);
52b316ad65Sad
53a6709183Sad lwp_lock(l);
549c6efdb4Sad spc = &ci->ci_schedstate;
5511ba4e18Sad KASSERT(lwp_locked(l, spc->spc_lwplock));
569c6efdb4Sad kcpuset_atomic_set(kcpuset_running, cpu_index(ci));
579c6efdb4Sad /* Update start time for this thread. */
58949e16d9Syamt binuptime(&l->l_stime);
5911ba4e18Sad spc->spc_flags |= SPCF_RUNNING;
6082002773Sad KASSERT((l->l_pflag & LP_RUNNING) != 0);
61abbb7ed5Sad l->l_stat = LSIDL;
62a6709183Sad lwp_unlock(l);
6336a17127Sad
645b4feac1Sad /*
655b4feac1Sad * Use spl0() here to ensure that we have the correct interrupt
665b4feac1Sad * priority. This may be the first thread running on the CPU,
6711ba4e18Sad * in which case we took an odd route to get here.
685b4feac1Sad */
695b4feac1Sad spl0();
7011ba4e18Sad
7129170d38Srmind for (;;) {
72f0301095Syamt LOCKDEBUG_BARRIER(NULL, 0);
73f0301095Syamt KASSERT((l->l_flag & LW_IDLE) != 0);
74f0301095Syamt KASSERT(ci == curcpu());
75f0301095Syamt KASSERT(l == curlwp);
76f0301095Syamt KASSERT(CURCPU_IDLE_P());
77d831186dSad KASSERT(l->l_priority == PRI_IDLE);
78*04615d56Smrg KASSERTMSG(l->l_nopreempt == 0, "lwp %p nopreempt %d",
79*04615d56Smrg l, l->l_nopreempt);
80f0301095Syamt
8181fa379aSad sched_idle();
82cbbf514eSad if (!sched_curcpu_runnable_p()) {
8362c877a4Sad if ((spc->spc_flags & SPCF_OFFLINE) == 0) {
8494843b13Sad uvm_idle();
8562c877a4Sad }
86f0301095Syamt if (!sched_curcpu_runnable_p()) {
87f0301095Syamt cpu_idle();
88b58e3056Sad if (!sched_curcpu_runnable_p() &&
89b58e3056Sad !ci->ci_want_resched) {
90f0301095Syamt continue;
91f0301095Syamt }
92f0301095Syamt }
93cbbf514eSad }
9460c1b884Sad KASSERT(l->l_mutex == l->l_cpu->ci_schedstate.spc_lwplock);
95f0301095Syamt lwp_lock(l);
964477d28dSad spc_lock(l->l_cpu);
97f0301095Syamt mi_switch(l);
98f0301095Syamt KASSERT(curlwp == l);
99abbb7ed5Sad KASSERT(l->l_stat == LSIDL);
100f0301095Syamt }
101f0301095Syamt }
102f0301095Syamt
103f0301095Syamt int
create_idle_lwp(struct cpu_info * ci)104f0301095Syamt create_idle_lwp(struct cpu_info *ci)
105f0301095Syamt {
106d831186dSad lwp_t *l;
107f0301095Syamt int error;
108f0301095Syamt
109f0301095Syamt KASSERT(ci->ci_data.cpu_idlelwp == NULL);
110d831186dSad error = kthread_create(PRI_IDLE, KTHREAD_MPSAFE | KTHREAD_IDLE,
111d8788e7fSmartin ci, idle_loop, NULL, &l, "idle/%u", ci->ci_index);
112d831186dSad if (error != 0)
113d831186dSad panic("create_idle_lwp: error %d", error);
114f0301095Syamt lwp_lock(l);
115d831186dSad l->l_flag |= LW_IDLE;
1169c6efdb4Sad if (ci != lwp0.l_cpu) {
1179c6efdb4Sad /*
1189c6efdb4Sad * For secondary CPUs, the idle LWP is the first to run, and
1199c6efdb4Sad * it's directly entered from MD code without a trip through
1209c6efdb4Sad * mi_switch(). Make the picture look good in case the CPU
1219c6efdb4Sad * takes an interrupt before it calls idle_loop().
1229c6efdb4Sad */
123abbb7ed5Sad l->l_stat = LSIDL;
12482002773Sad l->l_pflag |= LP_RUNNING;
1259c6efdb4Sad ci->ci_onproc = l;
1269c6efdb4Sad }
127f0301095Syamt lwp_unlock(l);
128d831186dSad ci->ci_data.cpu_idlelwp = l;
129d831186dSad
130f0301095Syamt return error;
131f0301095Syamt }
132