1 /* $OpenBSD: kern_sched.c,v 1.4 2008/06/10 20:14:36 beck Exp $ */ 2 /* 3 * Copyright (c) 2007 Artur Grabowski <art@openbsd.org> 4 * 5 * Permission to use, copy, modify, and distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include <sys/param.h> 19 20 #include <sys/sched.h> 21 #include <sys/proc.h> 22 #include <sys/kthread.h> 23 #include <sys/systm.h> 24 #include <sys/resourcevar.h> 25 #include <sys/signalvar.h> 26 #include <sys/mutex.h> 27 28 #include <uvm/uvm_extern.h> 29 30 void sched_kthreads_create(void *); 31 void sched_idle(void *); 32 33 /* 34 * A few notes about cpu_switchto that is implemented in MD code. 35 * 36 * cpu_switchto takes two arguments, the old proc and the proc 37 * it should switch to. The new proc will never be NULL, so we always have 38 * a saved state that we need to switch to. The old proc however can 39 * be NULL if the process is exiting. NULL for the old proc simply 40 * means "don't bother saving old state". 41 * 42 * cpu_switchto is supposed to atomically load the new state of the process 43 * including the pcb, pmap and setting curproc, the p_cpu pointer in the 44 * proc and p_stat to SONPROC. Atomically with respect to interrupts, other 45 * cpus in the system must not depend on this state being consistent. 46 * Therefore no locking is necessary in cpu_switchto other than blocking 47 * interrupts during the context switch. 48 */ 49 50 /* 51 * sched_init_cpu is called from main() for the boot cpu, then it's the 52 * responsibility of the MD code to call it for all other cpus. 53 */ 54 void 55 sched_init_cpu(struct cpu_info *ci) 56 { 57 struct schedstate_percpu *spc = &ci->ci_schedstate; 58 59 spc->spc_idleproc = NULL; 60 61 kthread_create_deferred(sched_kthreads_create, ci); 62 63 LIST_INIT(&spc->spc_deadproc); 64 } 65 66 void 67 sched_kthreads_create(void *v) 68 { 69 struct cpu_info *ci = v; 70 struct schedstate_percpu *spc = &ci->ci_schedstate; 71 static int num; 72 73 if (kthread_create(sched_idle, ci, &spc->spc_idleproc, "idle%d", num)) 74 panic("fork idle"); 75 76 num++; 77 } 78 79 void 80 sched_idle(void *v) 81 { 82 struct proc *p = curproc; 83 struct cpu_info *ci = v; 84 int s; 85 86 KERNEL_PROC_UNLOCK(p); 87 88 /* 89 * First time we enter here, we're not supposed to idle, 90 * just go away for a while. 91 */ 92 SCHED_LOCK(s); 93 p->p_stat = SSLEEP; 94 mi_switch(); 95 SCHED_UNLOCK(s); 96 97 while (1) { 98 KASSERT(ci == curcpu()); 99 KASSERT(curproc == ci->ci_schedstate.spc_idleproc); 100 101 while (!sched_is_idle()) { 102 struct schedstate_percpu *spc = &ci->ci_schedstate; 103 struct proc *dead; 104 105 SCHED_LOCK(s); 106 p->p_stat = SSLEEP; 107 mi_switch(); 108 SCHED_UNLOCK(s); 109 110 while ((dead = LIST_FIRST(&spc->spc_deadproc))) { 111 LIST_REMOVE(dead, p_hash); 112 exit2(dead); 113 } 114 } 115 116 splassert(IPL_NONE); 117 118 cpu_idle_enter(); 119 while (sched_is_idle()) 120 cpu_idle_cycle(); 121 cpu_idle_leave(); 122 } 123 } 124 125 /* 126 * To free our address space we have to jump through a few hoops. 127 * The freeing is done by the reaper, but until we have one reaper 128 * per cpu, we have no way of putting this proc on the deadproc list 129 * and waking up the reaper without risking having our address space and 130 * stack torn from under us before we manage to switch to another proc. 131 * Therefore we have a per-cpu list of dead processes where we put this 132 * proc and have idle clean up that list and move it to the reaper list. 133 * All this will be unnecessary once we can bind the reaper this cpu 134 * and not risk having it switch to another in case it sleeps. 135 */ 136 void 137 sched_exit(struct proc *p) 138 { 139 struct schedstate_percpu *spc = &curcpu()->ci_schedstate; 140 struct timeval tv; 141 struct proc *idle; 142 int s; 143 144 microuptime(&tv); 145 timersub(&tv, &spc->spc_runtime, &tv); 146 timeradd(&p->p_rtime, &tv, &p->p_rtime); 147 148 LIST_INSERT_HEAD(&spc->spc_deadproc, p, p_hash); 149 150 #ifdef MULTIPROCESSOR 151 KASSERT(__mp_lock_held(&kernel_lock) == 0); 152 #endif 153 154 SCHED_LOCK(s); 155 idle = spc->spc_idleproc; 156 idle->p_stat = SRUN; 157 cpu_switchto(NULL, idle); 158 } 159 160 /* 161 * Run queue management. 162 * 163 * The run queue management is just like before, except that it's with 164 * a bit more modern queue handling. 165 */ 166 167 TAILQ_HEAD(prochead, proc) sched_qs[NQS]; 168 volatile int sched_whichqs; 169 170 void 171 sched_init_runqueues(void) 172 { 173 int i; 174 175 for (i = 0; i < NQS; i++) 176 TAILQ_INIT(&sched_qs[i]); 177 178 #ifdef MULTIPROCESSOR 179 __mp_lock_init(&sched_lock); 180 #endif 181 } 182 183 void 184 setrunqueue(struct proc *p) 185 { 186 int queue = p->p_priority >> 2; 187 188 SCHED_ASSERT_LOCKED(); 189 190 TAILQ_INSERT_TAIL(&sched_qs[queue], p, p_runq); 191 sched_whichqs |= (1 << queue); 192 } 193 194 void 195 remrunqueue(struct proc *p) 196 { 197 int queue = p->p_priority >> 2; 198 199 SCHED_ASSERT_LOCKED(); 200 201 TAILQ_REMOVE(&sched_qs[queue], p, p_runq); 202 if (TAILQ_EMPTY(&sched_qs[queue])) 203 sched_whichqs &= ~(1 << queue); 204 } 205 206 struct proc * 207 sched_chooseproc(void) 208 { 209 struct proc *p; 210 int queue; 211 212 SCHED_ASSERT_LOCKED(); 213 214 again: 215 if (sched_is_idle()) { 216 p = curcpu()->ci_schedstate.spc_idleproc; 217 if (p == NULL) { 218 int s; 219 /* 220 * We get here if someone decides to switch during 221 * boot before forking kthreads, bleh. 222 * This is kind of like a stupid idle loop. 223 */ 224 #ifdef MULTIPROCESSOR 225 __mp_unlock(&sched_lock); 226 #endif 227 spl0(); 228 delay(10); 229 SCHED_LOCK(s); 230 goto again; 231 } 232 KASSERT(p); 233 p->p_stat = SRUN; 234 } else { 235 queue = ffs(sched_whichqs) - 1; 236 p = TAILQ_FIRST(&sched_qs[queue]); 237 TAILQ_REMOVE(&sched_qs[queue], p, p_runq); 238 if (TAILQ_EMPTY(&sched_qs[queue])) 239 sched_whichqs &= ~(1 << queue); 240 } 241 242 return (p); 243 } 244