xref: /minix3/minix/servers/sched/schedule.c (revision 433d6423c39e34ec4b79c950597bb2d236f886be)
1 /* This file contains the scheduling policy for SCHED
2  *
3  * The entry points are:
4  *   do_noquantum:        Called on behalf of process' that run out of quantum
5  *   do_start_scheduling  Request to start scheduling a proc
6  *   do_stop_scheduling   Request to stop scheduling a proc
7  *   do_nice		  Request to change the nice level on a proc
8  *   init_scheduling      Called from main.c to set up/prepare scheduling
9  */
10 #include "sched.h"
11 #include "schedproc.h"
12 #include <assert.h>
13 #include <minix/com.h>
14 #include <machine/archtypes.h>
15 #include "kernel/proc.h" /* for queue constants */
16 
17 static minix_timer_t sched_timer;
18 static unsigned balance_timeout;
19 
20 #define BALANCE_TIMEOUT	5 /* how often to balance queues in seconds */
21 
22 static int schedule_process(struct schedproc * rmp, unsigned flags);
23 static void balance_queues(minix_timer_t *tp);
24 
25 #define SCHEDULE_CHANGE_PRIO	0x1
26 #define SCHEDULE_CHANGE_QUANTUM	0x2
27 #define SCHEDULE_CHANGE_CPU	0x4
28 
29 #define SCHEDULE_CHANGE_ALL	(	\
30 		SCHEDULE_CHANGE_PRIO	|	\
31 		SCHEDULE_CHANGE_QUANTUM	|	\
32 		SCHEDULE_CHANGE_CPU		\
33 		)
34 
35 #define schedule_process_local(p)	\
36 	schedule_process(p, SCHEDULE_CHANGE_PRIO | SCHEDULE_CHANGE_QUANTUM)
37 #define schedule_process_migrate(p)	\
38 	schedule_process(p, SCHEDULE_CHANGE_CPU)
39 
40 #define CPU_DEAD	-1
41 
42 #define cpu_is_available(c)	(cpu_proc[c] >= 0)
43 
44 #define DEFAULT_USER_TIME_SLICE 200
45 
46 /* processes created by RS are sysytem processes */
47 #define is_system_proc(p)	((p)->parent == RS_PROC_NR)
48 
49 static unsigned cpu_proc[CONFIG_MAX_CPUS];
50 
51 static void pick_cpu(struct schedproc * proc)
52 {
53 #ifdef CONFIG_SMP
54 	unsigned cpu, c;
55 	unsigned cpu_load = (unsigned) -1;
56 
57 	if (machine.processors_count == 1) {
58 		proc->cpu = machine.bsp_id;
59 		return;
60 	}
61 
62 	/* schedule sysytem processes only on the boot cpu */
63 	if (is_system_proc(proc)) {
64 		proc->cpu = machine.bsp_id;
65 		return;
66 	}
67 
68 	/* if no other cpu available, try BSP */
69 	cpu = machine.bsp_id;
70 	for (c = 0; c < machine.processors_count; c++) {
71 		/* skip dead cpus */
72 		if (!cpu_is_available(c))
73 			continue;
74 		if (c != machine.bsp_id && cpu_load > cpu_proc[c]) {
75 			cpu_load = cpu_proc[c];
76 			cpu = c;
77 		}
78 	}
79 	proc->cpu = cpu;
80 	cpu_proc[cpu]++;
81 #else
82 	proc->cpu = 0;
83 #endif
84 }
85 
86 /*===========================================================================*
87  *				do_noquantum				     *
88  *===========================================================================*/
89 
90 int do_noquantum(message *m_ptr)
91 {
92 	register struct schedproc *rmp;
93 	int rv, proc_nr_n;
94 
95 	if (sched_isokendpt(m_ptr->m_source, &proc_nr_n) != OK) {
96 		printf("SCHED: WARNING: got an invalid endpoint in OOQ msg %u.\n",
97 		m_ptr->m_source);
98 		return EBADEPT;
99 	}
100 
101 	rmp = &schedproc[proc_nr_n];
102 	if (rmp->priority < MIN_USER_Q) {
103 		rmp->priority += 1; /* lower priority */
104 	}
105 
106 	if ((rv = schedule_process_local(rmp)) != OK) {
107 		return rv;
108 	}
109 	return OK;
110 }
111 
112 /*===========================================================================*
113  *				do_stop_scheduling			     *
114  *===========================================================================*/
115 int do_stop_scheduling(message *m_ptr)
116 {
117 	register struct schedproc *rmp;
118 	int proc_nr_n;
119 
120 	/* check who can send you requests */
121 	if (!accept_message(m_ptr))
122 		return EPERM;
123 
124 	if (sched_isokendpt(m_ptr->m_lsys_sched_scheduling_stop.endpoint,
125 		    &proc_nr_n) != OK) {
126 		printf("SCHED: WARNING: got an invalid endpoint in OOQ msg "
127 		"%d\n", m_ptr->m_lsys_sched_scheduling_stop.endpoint);
128 		return EBADEPT;
129 	}
130 
131 	rmp = &schedproc[proc_nr_n];
132 #ifdef CONFIG_SMP
133 	cpu_proc[rmp->cpu]--;
134 #endif
135 	rmp->flags = 0; /*&= ~IN_USE;*/
136 
137 	return OK;
138 }
139 
140 /*===========================================================================*
141  *				do_start_scheduling			     *
142  *===========================================================================*/
143 int do_start_scheduling(message *m_ptr)
144 {
145 	register struct schedproc *rmp;
146 	int rv, proc_nr_n, parent_nr_n;
147 
148 	/* we can handle two kinds of messages here */
149 	assert(m_ptr->m_type == SCHEDULING_START ||
150 		m_ptr->m_type == SCHEDULING_INHERIT);
151 
152 	/* check who can send you requests */
153 	if (!accept_message(m_ptr))
154 		return EPERM;
155 
156 	/* Resolve endpoint to proc slot. */
157 	if ((rv = sched_isemtyendpt(m_ptr->m_lsys_sched_scheduling_start.endpoint,
158 			&proc_nr_n)) != OK) {
159 		return rv;
160 	}
161 	rmp = &schedproc[proc_nr_n];
162 
163 	/* Populate process slot */
164 	rmp->endpoint     = m_ptr->m_lsys_sched_scheduling_start.endpoint;
165 	rmp->parent       = m_ptr->m_lsys_sched_scheduling_start.parent;
166 	rmp->max_priority = m_ptr->m_lsys_sched_scheduling_start.maxprio;
167 	if (rmp->max_priority >= NR_SCHED_QUEUES) {
168 		return EINVAL;
169 	}
170 
171 	/* Inherit current priority and time slice from parent. Since there
172 	 * is currently only one scheduler scheduling the whole system, this
173 	 * value is local and we assert that the parent endpoint is valid */
174 	if (rmp->endpoint == rmp->parent) {
175 		/* We have a special case here for init, which is the first
176 		   process scheduled, and the parent of itself. */
177 		rmp->priority   = USER_Q;
178 		rmp->time_slice = DEFAULT_USER_TIME_SLICE;
179 
180 		/*
181 		 * Since kernel never changes the cpu of a process, all are
182 		 * started on the BSP and the userspace scheduling hasn't
183 		 * changed that yet either, we can be sure that BSP is the
184 		 * processor where the processes run now.
185 		 */
186 #ifdef CONFIG_SMP
187 		rmp->cpu = machine.bsp_id;
188 		/* FIXME set the cpu mask */
189 #endif
190 	}
191 
192 	switch (m_ptr->m_type) {
193 
194 	case SCHEDULING_START:
195 		/* We have a special case here for system processes, for which
196 		 * quanum and priority are set explicitly rather than inherited
197 		 * from the parent */
198 		rmp->priority   = rmp->max_priority;
199 		rmp->time_slice = m_ptr->m_lsys_sched_scheduling_start.quantum;
200 		break;
201 
202 	case SCHEDULING_INHERIT:
203 		/* Inherit current priority and time slice from parent. Since there
204 		 * is currently only one scheduler scheduling the whole system, this
205 		 * value is local and we assert that the parent endpoint is valid */
206 		if ((rv = sched_isokendpt(m_ptr->m_lsys_sched_scheduling_start.parent,
207 				&parent_nr_n)) != OK)
208 			return rv;
209 
210 		rmp->priority = schedproc[parent_nr_n].priority;
211 		rmp->time_slice = schedproc[parent_nr_n].time_slice;
212 		break;
213 
214 	default:
215 		/* not reachable */
216 		assert(0);
217 	}
218 
219 	/* Take over scheduling the process. The kernel reply message populates
220 	 * the processes current priority and its time slice */
221 	if ((rv = sys_schedctl(0, rmp->endpoint, 0, 0, 0)) != OK) {
222 		printf("Sched: Error taking over scheduling for %d, kernel said %d\n",
223 			rmp->endpoint, rv);
224 		return rv;
225 	}
226 	rmp->flags = IN_USE;
227 
228 	/* Schedule the process, giving it some quantum */
229 	pick_cpu(rmp);
230 	while ((rv = schedule_process(rmp, SCHEDULE_CHANGE_ALL)) == EBADCPU) {
231 		/* don't try this CPU ever again */
232 		cpu_proc[rmp->cpu] = CPU_DEAD;
233 		pick_cpu(rmp);
234 	}
235 
236 	if (rv != OK) {
237 		printf("Sched: Error while scheduling process, kernel replied %d\n",
238 			rv);
239 		return rv;
240 	}
241 
242 	/* Mark ourselves as the new scheduler.
243 	 * By default, processes are scheduled by the parents scheduler. In case
244 	 * this scheduler would want to delegate scheduling to another
245 	 * scheduler, it could do so and then write the endpoint of that
246 	 * scheduler into the "scheduler" field.
247 	 */
248 
249 	m_ptr->m_sched_lsys_scheduling_start.scheduler = SCHED_PROC_NR;
250 
251 	return OK;
252 }
253 
254 /*===========================================================================*
255  *				do_nice					     *
256  *===========================================================================*/
257 int do_nice(message *m_ptr)
258 {
259 	struct schedproc *rmp;
260 	int rv;
261 	int proc_nr_n;
262 	unsigned new_q, old_q, old_max_q;
263 
264 	/* check who can send you requests */
265 	if (!accept_message(m_ptr))
266 		return EPERM;
267 
268 	if (sched_isokendpt(m_ptr->m_pm_sched_scheduling_set_nice.endpoint, &proc_nr_n) != OK) {
269 		printf("SCHED: WARNING: got an invalid endpoint in OoQ msg "
270 		"%d\n", m_ptr->m_pm_sched_scheduling_set_nice.endpoint);
271 		return EBADEPT;
272 	}
273 
274 	rmp = &schedproc[proc_nr_n];
275 	new_q = m_ptr->m_pm_sched_scheduling_set_nice.maxprio;
276 	if (new_q >= NR_SCHED_QUEUES) {
277 		return EINVAL;
278 	}
279 
280 	/* Store old values, in case we need to roll back the changes */
281 	old_q     = rmp->priority;
282 	old_max_q = rmp->max_priority;
283 
284 	/* Update the proc entry and reschedule the process */
285 	rmp->max_priority = rmp->priority = new_q;
286 
287 	if ((rv = schedule_process_local(rmp)) != OK) {
288 		/* Something went wrong when rescheduling the process, roll
289 		 * back the changes to proc struct */
290 		rmp->priority     = old_q;
291 		rmp->max_priority = old_max_q;
292 	}
293 
294 	return rv;
295 }
296 
297 /*===========================================================================*
298  *				schedule_process			     *
299  *===========================================================================*/
300 static int schedule_process(struct schedproc * rmp, unsigned flags)
301 {
302 	int err;
303 	int new_prio, new_quantum, new_cpu;
304 
305 	pick_cpu(rmp);
306 
307 	if (flags & SCHEDULE_CHANGE_PRIO)
308 		new_prio = rmp->priority;
309 	else
310 		new_prio = -1;
311 
312 	if (flags & SCHEDULE_CHANGE_QUANTUM)
313 		new_quantum = rmp->time_slice;
314 	else
315 		new_quantum = -1;
316 
317 	if (flags & SCHEDULE_CHANGE_CPU)
318 		new_cpu = rmp->cpu;
319 	else
320 		new_cpu = -1;
321 
322 	if ((err = sys_schedule(rmp->endpoint, new_prio,
323 		new_quantum, new_cpu)) != OK) {
324 		printf("PM: An error occurred when trying to schedule %d: %d\n",
325 		rmp->endpoint, err);
326 	}
327 
328 	return err;
329 }
330 
331 
332 /*===========================================================================*
333  *				start_scheduling			     *
334  *===========================================================================*/
335 
336 void init_scheduling(void)
337 {
338 	balance_timeout = BALANCE_TIMEOUT * sys_hz();
339 	init_timer(&sched_timer);
340 	set_timer(&sched_timer, balance_timeout, balance_queues, 0);
341 }
342 
343 /*===========================================================================*
344  *				balance_queues				     *
345  *===========================================================================*/
346 
347 /* This function in called every 100 ticks to rebalance the queues. The current
348  * scheduler bumps processes down one priority when ever they run out of
349  * quantum. This function will find all proccesses that have been bumped down,
350  * and pulls them back up. This default policy will soon be changed.
351  */
352 static void balance_queues(minix_timer_t *tp)
353 {
354 	struct schedproc *rmp;
355 	int proc_nr;
356 
357 	for (proc_nr=0, rmp=schedproc; proc_nr < NR_PROCS; proc_nr++, rmp++) {
358 		if (rmp->flags & IN_USE) {
359 			if (rmp->priority > rmp->max_priority) {
360 				rmp->priority -= 1; /* increase priority */
361 				schedule_process_local(rmp);
362 			}
363 		}
364 	}
365 
366 	set_timer(&sched_timer, balance_timeout, balance_queues, 0);
367 }
368