1 /* $NetBSD: kern_idle.c,v 1.33 2020/03/26 19:42:39 ad Exp $ */ 2 3 /*- 4 * Copyright (c)2002, 2006, 2007 YAMAMOTO Takashi, 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 31 __KERNEL_RCSID(0, "$NetBSD: kern_idle.c,v 1.33 2020/03/26 19:42:39 ad Exp $"); 32 33 #include <sys/param.h> 34 #include <sys/cpu.h> 35 #include <sys/idle.h> 36 #include <sys/kthread.h> 37 #include <sys/lockdebug.h> 38 #include <sys/kmem.h> 39 #include <sys/proc.h> 40 #include <sys/atomic.h> 41 42 #include <uvm/uvm.h> /* uvm_idle */ 43 #include <uvm/uvm_extern.h> 44 45 void 46 idle_loop(void *dummy) 47 { 48 struct cpu_info *ci = curcpu(); 49 struct schedstate_percpu *spc; 50 struct lwp *l = curlwp; 51 52 lwp_lock(l); 53 spc = &ci->ci_schedstate; 54 KASSERT(lwp_locked(l, spc->spc_lwplock)); 55 kcpuset_atomic_set(kcpuset_running, cpu_index(ci)); 56 /* Update start time for this thread. */ 57 binuptime(&l->l_stime); 58 spc->spc_flags |= SPCF_RUNNING; 59 KASSERT((l->l_pflag & LP_RUNNING) != 0); 60 l->l_stat = LSIDL; 61 lwp_unlock(l); 62 63 /* 64 * Use spl0() here to ensure that we have the correct interrupt 65 * priority. This may be the first thread running on the CPU, 66 * in which case we took an odd route to get here. 67 */ 68 spl0(); 69 KERNEL_UNLOCK_ALL(l, NULL); 70 71 for (;;) { 72 LOCKDEBUG_BARRIER(NULL, 0); 73 KASSERT((l->l_flag & LW_IDLE) != 0); 74 KASSERT(ci == curcpu()); 75 KASSERT(l == curlwp); 76 KASSERT(CURCPU_IDLE_P()); 77 KASSERT(l->l_priority == PRI_IDLE); 78 79 sched_idle(); 80 if (!sched_curcpu_runnable_p()) { 81 if ((spc->spc_flags & SPCF_OFFLINE) == 0) { 82 uvm_idle(); 83 } 84 if (!sched_curcpu_runnable_p()) { 85 cpu_idle(); 86 if (!sched_curcpu_runnable_p() && 87 !ci->ci_want_resched) { 88 continue; 89 } 90 } 91 } 92 KASSERT(l->l_mutex == l->l_cpu->ci_schedstate.spc_lwplock); 93 lwp_lock(l); 94 spc_lock(l->l_cpu); 95 mi_switch(l); 96 KASSERT(curlwp == l); 97 KASSERT(l->l_stat == LSIDL); 98 } 99 } 100 101 int 102 create_idle_lwp(struct cpu_info *ci) 103 { 104 lwp_t *l; 105 int error; 106 107 KASSERT(ci->ci_data.cpu_idlelwp == NULL); 108 error = kthread_create(PRI_IDLE, KTHREAD_MPSAFE | KTHREAD_IDLE, 109 ci, idle_loop, NULL, &l, "idle/%u", ci->ci_index); 110 if (error != 0) 111 panic("create_idle_lwp: error %d", error); 112 lwp_lock(l); 113 l->l_flag |= LW_IDLE; 114 if (ci != lwp0.l_cpu) { 115 /* 116 * For secondary CPUs, the idle LWP is the first to run, and 117 * it's directly entered from MD code without a trip through 118 * mi_switch(). Make the picture look good in case the CPU 119 * takes an interrupt before it calls idle_loop(). 120 */ 121 l->l_stat = LSIDL; 122 l->l_pflag |= LP_RUNNING; 123 ci->ci_onproc = l; 124 } 125 lwp_unlock(l); 126 ci->ci_data.cpu_idlelwp = l; 127 128 return error; 129 } 130