xref: /netbsd-src/sys/kern/kern_idle.c (revision 53b02e147d4ed531c0d2a5ca9b3e8026ba3e99b5)
1 /*	$NetBSD: kern_idle.c,v 1.34 2020/09/05 16:30:12 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c)2002, 2006, 2007 YAMAMOTO Takashi,
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 
31 __KERNEL_RCSID(0, "$NetBSD: kern_idle.c,v 1.34 2020/09/05 16:30:12 riastradh Exp $");
32 
33 #include <sys/param.h>
34 #include <sys/cpu.h>
35 #include <sys/idle.h>
36 #include <sys/kthread.h>
37 #include <sys/lockdebug.h>
38 #include <sys/kmem.h>
39 #include <sys/proc.h>
40 #include <sys/atomic.h>
41 
42 #include <uvm/uvm.h>	/* uvm_idle */
43 
44 void
45 idle_loop(void *dummy)
46 {
47 	struct cpu_info *ci = curcpu();
48 	struct schedstate_percpu *spc;
49 	struct lwp *l = curlwp;
50 
51 	lwp_lock(l);
52 	spc = &ci->ci_schedstate;
53 	KASSERT(lwp_locked(l, spc->spc_lwplock));
54 	kcpuset_atomic_set(kcpuset_running, cpu_index(ci));
55 	/* Update start time for this thread. */
56 	binuptime(&l->l_stime);
57 	spc->spc_flags |= SPCF_RUNNING;
58 	KASSERT((l->l_pflag & LP_RUNNING) != 0);
59 	l->l_stat = LSIDL;
60 	lwp_unlock(l);
61 
62 	/*
63 	 * Use spl0() here to ensure that we have the correct interrupt
64 	 * priority.  This may be the first thread running on the CPU,
65 	 * in which case we took an odd route to get here.
66 	 */
67 	spl0();
68 	KERNEL_UNLOCK_ALL(l, NULL);
69 
70 	for (;;) {
71 		LOCKDEBUG_BARRIER(NULL, 0);
72 		KASSERT((l->l_flag & LW_IDLE) != 0);
73 		KASSERT(ci == curcpu());
74 		KASSERT(l == curlwp);
75 		KASSERT(CURCPU_IDLE_P());
76 		KASSERT(l->l_priority == PRI_IDLE);
77 
78 		sched_idle();
79 		if (!sched_curcpu_runnable_p()) {
80 			if ((spc->spc_flags & SPCF_OFFLINE) == 0) {
81 				uvm_idle();
82 			}
83 			if (!sched_curcpu_runnable_p()) {
84 				cpu_idle();
85 				if (!sched_curcpu_runnable_p() &&
86 				    !ci->ci_want_resched) {
87 					continue;
88 				}
89 			}
90 		}
91 		KASSERT(l->l_mutex == l->l_cpu->ci_schedstate.spc_lwplock);
92 		lwp_lock(l);
93 		spc_lock(l->l_cpu);
94 		mi_switch(l);
95 		KASSERT(curlwp == l);
96 		KASSERT(l->l_stat == LSIDL);
97 	}
98 }
99 
100 int
101 create_idle_lwp(struct cpu_info *ci)
102 {
103 	lwp_t *l;
104 	int error;
105 
106 	KASSERT(ci->ci_data.cpu_idlelwp == NULL);
107 	error = kthread_create(PRI_IDLE, KTHREAD_MPSAFE | KTHREAD_IDLE,
108 	    ci, idle_loop, NULL, &l, "idle/%u", ci->ci_index);
109 	if (error != 0)
110 		panic("create_idle_lwp: error %d", error);
111 	lwp_lock(l);
112 	l->l_flag |= LW_IDLE;
113 	if (ci != lwp0.l_cpu) {
114 		/*
115 		 * For secondary CPUs, the idle LWP is the first to run, and
116 		 * it's directly entered from MD code without a trip through
117 		 * mi_switch().  Make the picture look good in case the CPU
118 		 * takes an interrupt before it calls idle_loop().
119 		 */
120 		l->l_stat = LSIDL;
121 		l->l_pflag |= LP_RUNNING;
122 		ci->ci_onproc = l;
123 	}
124 	lwp_unlock(l);
125 	ci->ci_data.cpu_idlelwp = l;
126 
127 	return error;
128 }
129