1433d6423SLionel Sambuc /* This file contains the scheduling policy for SCHED
2433d6423SLionel Sambuc *
3433d6423SLionel Sambuc * The entry points are:
4433d6423SLionel Sambuc * do_noquantum: Called on behalf of process' that run out of quantum
5433d6423SLionel Sambuc * do_start_scheduling Request to start scheduling a proc
6433d6423SLionel Sambuc * do_stop_scheduling Request to stop scheduling a proc
7433d6423SLionel Sambuc * do_nice Request to change the nice level on a proc
8433d6423SLionel Sambuc * init_scheduling Called from main.c to set up/prepare scheduling
9433d6423SLionel Sambuc */
10433d6423SLionel Sambuc #include "sched.h"
11433d6423SLionel Sambuc #include "schedproc.h"
12433d6423SLionel Sambuc #include <assert.h>
13433d6423SLionel Sambuc #include <minix/com.h>
14433d6423SLionel Sambuc #include <machine/archtypes.h>
15433d6423SLionel Sambuc
16433d6423SLionel Sambuc static unsigned balance_timeout;
17433d6423SLionel Sambuc
18433d6423SLionel Sambuc #define BALANCE_TIMEOUT 5 /* how often to balance queues in seconds */
19433d6423SLionel Sambuc
20433d6423SLionel Sambuc static int schedule_process(struct schedproc * rmp, unsigned flags);
21433d6423SLionel Sambuc
22433d6423SLionel Sambuc #define SCHEDULE_CHANGE_PRIO 0x1
23433d6423SLionel Sambuc #define SCHEDULE_CHANGE_QUANTUM 0x2
24433d6423SLionel Sambuc #define SCHEDULE_CHANGE_CPU 0x4
25433d6423SLionel Sambuc
26433d6423SLionel Sambuc #define SCHEDULE_CHANGE_ALL ( \
27433d6423SLionel Sambuc SCHEDULE_CHANGE_PRIO | \
28433d6423SLionel Sambuc SCHEDULE_CHANGE_QUANTUM | \
29433d6423SLionel Sambuc SCHEDULE_CHANGE_CPU \
30433d6423SLionel Sambuc )
31433d6423SLionel Sambuc
32433d6423SLionel Sambuc #define schedule_process_local(p) \
33433d6423SLionel Sambuc schedule_process(p, SCHEDULE_CHANGE_PRIO | SCHEDULE_CHANGE_QUANTUM)
34433d6423SLionel Sambuc #define schedule_process_migrate(p) \
35433d6423SLionel Sambuc schedule_process(p, SCHEDULE_CHANGE_CPU)
36433d6423SLionel Sambuc
37433d6423SLionel Sambuc #define CPU_DEAD -1
38433d6423SLionel Sambuc
39433d6423SLionel Sambuc #define cpu_is_available(c) (cpu_proc[c] >= 0)
40433d6423SLionel Sambuc
41433d6423SLionel Sambuc #define DEFAULT_USER_TIME_SLICE 200
42433d6423SLionel Sambuc
43433d6423SLionel Sambuc /* processes created by RS are sysytem processes */
44433d6423SLionel Sambuc #define is_system_proc(p) ((p)->parent == RS_PROC_NR)
45433d6423SLionel Sambuc
46433d6423SLionel Sambuc static unsigned cpu_proc[CONFIG_MAX_CPUS];
47433d6423SLionel Sambuc
pick_cpu(struct schedproc * proc)48433d6423SLionel Sambuc static void pick_cpu(struct schedproc * proc)
49433d6423SLionel Sambuc {
50433d6423SLionel Sambuc #ifdef CONFIG_SMP
51433d6423SLionel Sambuc unsigned cpu, c;
52433d6423SLionel Sambuc unsigned cpu_load = (unsigned) -1;
53433d6423SLionel Sambuc
54433d6423SLionel Sambuc if (machine.processors_count == 1) {
55433d6423SLionel Sambuc proc->cpu = machine.bsp_id;
56433d6423SLionel Sambuc return;
57433d6423SLionel Sambuc }
58433d6423SLionel Sambuc
59433d6423SLionel Sambuc /* schedule sysytem processes only on the boot cpu */
60433d6423SLionel Sambuc if (is_system_proc(proc)) {
61433d6423SLionel Sambuc proc->cpu = machine.bsp_id;
62433d6423SLionel Sambuc return;
63433d6423SLionel Sambuc }
64433d6423SLionel Sambuc
65433d6423SLionel Sambuc /* if no other cpu available, try BSP */
66433d6423SLionel Sambuc cpu = machine.bsp_id;
67433d6423SLionel Sambuc for (c = 0; c < machine.processors_count; c++) {
68433d6423SLionel Sambuc /* skip dead cpus */
69433d6423SLionel Sambuc if (!cpu_is_available(c))
70433d6423SLionel Sambuc continue;
71433d6423SLionel Sambuc if (c != machine.bsp_id && cpu_load > cpu_proc[c]) {
72433d6423SLionel Sambuc cpu_load = cpu_proc[c];
73433d6423SLionel Sambuc cpu = c;
74433d6423SLionel Sambuc }
75433d6423SLionel Sambuc }
76433d6423SLionel Sambuc proc->cpu = cpu;
77433d6423SLionel Sambuc cpu_proc[cpu]++;
78433d6423SLionel Sambuc #else
79433d6423SLionel Sambuc proc->cpu = 0;
80433d6423SLionel Sambuc #endif
81433d6423SLionel Sambuc }
82433d6423SLionel Sambuc
83433d6423SLionel Sambuc /*===========================================================================*
84433d6423SLionel Sambuc * do_noquantum *
85433d6423SLionel Sambuc *===========================================================================*/
86433d6423SLionel Sambuc
do_noquantum(message * m_ptr)87433d6423SLionel Sambuc int do_noquantum(message *m_ptr)
88433d6423SLionel Sambuc {
89433d6423SLionel Sambuc register struct schedproc *rmp;
90433d6423SLionel Sambuc int rv, proc_nr_n;
91433d6423SLionel Sambuc
92433d6423SLionel Sambuc if (sched_isokendpt(m_ptr->m_source, &proc_nr_n) != OK) {
93433d6423SLionel Sambuc printf("SCHED: WARNING: got an invalid endpoint in OOQ msg %u.\n",
94433d6423SLionel Sambuc m_ptr->m_source);
95433d6423SLionel Sambuc return EBADEPT;
96433d6423SLionel Sambuc }
97433d6423SLionel Sambuc
98433d6423SLionel Sambuc rmp = &schedproc[proc_nr_n];
99433d6423SLionel Sambuc if (rmp->priority < MIN_USER_Q) {
100433d6423SLionel Sambuc rmp->priority += 1; /* lower priority */
101433d6423SLionel Sambuc }
102433d6423SLionel Sambuc
103433d6423SLionel Sambuc if ((rv = schedule_process_local(rmp)) != OK) {
104433d6423SLionel Sambuc return rv;
105433d6423SLionel Sambuc }
106433d6423SLionel Sambuc return OK;
107433d6423SLionel Sambuc }
108433d6423SLionel Sambuc
109433d6423SLionel Sambuc /*===========================================================================*
110433d6423SLionel Sambuc * do_stop_scheduling *
111433d6423SLionel Sambuc *===========================================================================*/
do_stop_scheduling(message * m_ptr)112433d6423SLionel Sambuc int do_stop_scheduling(message *m_ptr)
113433d6423SLionel Sambuc {
114433d6423SLionel Sambuc register struct schedproc *rmp;
115433d6423SLionel Sambuc int proc_nr_n;
116433d6423SLionel Sambuc
117433d6423SLionel Sambuc /* check who can send you requests */
118433d6423SLionel Sambuc if (!accept_message(m_ptr))
119433d6423SLionel Sambuc return EPERM;
120433d6423SLionel Sambuc
121433d6423SLionel Sambuc if (sched_isokendpt(m_ptr->m_lsys_sched_scheduling_stop.endpoint,
122433d6423SLionel Sambuc &proc_nr_n) != OK) {
123433d6423SLionel Sambuc printf("SCHED: WARNING: got an invalid endpoint in OOQ msg "
124433d6423SLionel Sambuc "%d\n", m_ptr->m_lsys_sched_scheduling_stop.endpoint);
125433d6423SLionel Sambuc return EBADEPT;
126433d6423SLionel Sambuc }
127433d6423SLionel Sambuc
128433d6423SLionel Sambuc rmp = &schedproc[proc_nr_n];
129433d6423SLionel Sambuc #ifdef CONFIG_SMP
130433d6423SLionel Sambuc cpu_proc[rmp->cpu]--;
131433d6423SLionel Sambuc #endif
132433d6423SLionel Sambuc rmp->flags = 0; /*&= ~IN_USE;*/
133433d6423SLionel Sambuc
134433d6423SLionel Sambuc return OK;
135433d6423SLionel Sambuc }
136433d6423SLionel Sambuc
137433d6423SLionel Sambuc /*===========================================================================*
138433d6423SLionel Sambuc * do_start_scheduling *
139433d6423SLionel Sambuc *===========================================================================*/
do_start_scheduling(message * m_ptr)140433d6423SLionel Sambuc int do_start_scheduling(message *m_ptr)
141433d6423SLionel Sambuc {
142433d6423SLionel Sambuc register struct schedproc *rmp;
143433d6423SLionel Sambuc int rv, proc_nr_n, parent_nr_n;
144433d6423SLionel Sambuc
145433d6423SLionel Sambuc /* we can handle two kinds of messages here */
146433d6423SLionel Sambuc assert(m_ptr->m_type == SCHEDULING_START ||
147433d6423SLionel Sambuc m_ptr->m_type == SCHEDULING_INHERIT);
148433d6423SLionel Sambuc
149433d6423SLionel Sambuc /* check who can send you requests */
150433d6423SLionel Sambuc if (!accept_message(m_ptr))
151433d6423SLionel Sambuc return EPERM;
152433d6423SLionel Sambuc
153433d6423SLionel Sambuc /* Resolve endpoint to proc slot. */
154433d6423SLionel Sambuc if ((rv = sched_isemtyendpt(m_ptr->m_lsys_sched_scheduling_start.endpoint,
155433d6423SLionel Sambuc &proc_nr_n)) != OK) {
156433d6423SLionel Sambuc return rv;
157433d6423SLionel Sambuc }
158433d6423SLionel Sambuc rmp = &schedproc[proc_nr_n];
159433d6423SLionel Sambuc
160433d6423SLionel Sambuc /* Populate process slot */
161433d6423SLionel Sambuc rmp->endpoint = m_ptr->m_lsys_sched_scheduling_start.endpoint;
162433d6423SLionel Sambuc rmp->parent = m_ptr->m_lsys_sched_scheduling_start.parent;
163433d6423SLionel Sambuc rmp->max_priority = m_ptr->m_lsys_sched_scheduling_start.maxprio;
164433d6423SLionel Sambuc if (rmp->max_priority >= NR_SCHED_QUEUES) {
165433d6423SLionel Sambuc return EINVAL;
166433d6423SLionel Sambuc }
167433d6423SLionel Sambuc
168433d6423SLionel Sambuc /* Inherit current priority and time slice from parent. Since there
169433d6423SLionel Sambuc * is currently only one scheduler scheduling the whole system, this
170433d6423SLionel Sambuc * value is local and we assert that the parent endpoint is valid */
171433d6423SLionel Sambuc if (rmp->endpoint == rmp->parent) {
172433d6423SLionel Sambuc /* We have a special case here for init, which is the first
173433d6423SLionel Sambuc process scheduled, and the parent of itself. */
174433d6423SLionel Sambuc rmp->priority = USER_Q;
175433d6423SLionel Sambuc rmp->time_slice = DEFAULT_USER_TIME_SLICE;
176433d6423SLionel Sambuc
177433d6423SLionel Sambuc /*
178433d6423SLionel Sambuc * Since kernel never changes the cpu of a process, all are
179433d6423SLionel Sambuc * started on the BSP and the userspace scheduling hasn't
180433d6423SLionel Sambuc * changed that yet either, we can be sure that BSP is the
181433d6423SLionel Sambuc * processor where the processes run now.
182433d6423SLionel Sambuc */
183433d6423SLionel Sambuc #ifdef CONFIG_SMP
184433d6423SLionel Sambuc rmp->cpu = machine.bsp_id;
185433d6423SLionel Sambuc /* FIXME set the cpu mask */
186433d6423SLionel Sambuc #endif
187433d6423SLionel Sambuc }
188433d6423SLionel Sambuc
189433d6423SLionel Sambuc switch (m_ptr->m_type) {
190433d6423SLionel Sambuc
191433d6423SLionel Sambuc case SCHEDULING_START:
192433d6423SLionel Sambuc /* We have a special case here for system processes, for which
193433d6423SLionel Sambuc * quanum and priority are set explicitly rather than inherited
194433d6423SLionel Sambuc * from the parent */
195433d6423SLionel Sambuc rmp->priority = rmp->max_priority;
196433d6423SLionel Sambuc rmp->time_slice = m_ptr->m_lsys_sched_scheduling_start.quantum;
197433d6423SLionel Sambuc break;
198433d6423SLionel Sambuc
199433d6423SLionel Sambuc case SCHEDULING_INHERIT:
200433d6423SLionel Sambuc /* Inherit current priority and time slice from parent. Since there
201433d6423SLionel Sambuc * is currently only one scheduler scheduling the whole system, this
202433d6423SLionel Sambuc * value is local and we assert that the parent endpoint is valid */
203433d6423SLionel Sambuc if ((rv = sched_isokendpt(m_ptr->m_lsys_sched_scheduling_start.parent,
204433d6423SLionel Sambuc &parent_nr_n)) != OK)
205433d6423SLionel Sambuc return rv;
206433d6423SLionel Sambuc
207433d6423SLionel Sambuc rmp->priority = schedproc[parent_nr_n].priority;
208433d6423SLionel Sambuc rmp->time_slice = schedproc[parent_nr_n].time_slice;
209433d6423SLionel Sambuc break;
210433d6423SLionel Sambuc
211433d6423SLionel Sambuc default:
212433d6423SLionel Sambuc /* not reachable */
213433d6423SLionel Sambuc assert(0);
214433d6423SLionel Sambuc }
215433d6423SLionel Sambuc
216433d6423SLionel Sambuc /* Take over scheduling the process. The kernel reply message populates
217433d6423SLionel Sambuc * the processes current priority and its time slice */
218433d6423SLionel Sambuc if ((rv = sys_schedctl(0, rmp->endpoint, 0, 0, 0)) != OK) {
219433d6423SLionel Sambuc printf("Sched: Error taking over scheduling for %d, kernel said %d\n",
220433d6423SLionel Sambuc rmp->endpoint, rv);
221433d6423SLionel Sambuc return rv;
222433d6423SLionel Sambuc }
223433d6423SLionel Sambuc rmp->flags = IN_USE;
224433d6423SLionel Sambuc
225433d6423SLionel Sambuc /* Schedule the process, giving it some quantum */
226433d6423SLionel Sambuc pick_cpu(rmp);
227433d6423SLionel Sambuc while ((rv = schedule_process(rmp, SCHEDULE_CHANGE_ALL)) == EBADCPU) {
228433d6423SLionel Sambuc /* don't try this CPU ever again */
229433d6423SLionel Sambuc cpu_proc[rmp->cpu] = CPU_DEAD;
230433d6423SLionel Sambuc pick_cpu(rmp);
231433d6423SLionel Sambuc }
232433d6423SLionel Sambuc
233433d6423SLionel Sambuc if (rv != OK) {
234433d6423SLionel Sambuc printf("Sched: Error while scheduling process, kernel replied %d\n",
235433d6423SLionel Sambuc rv);
236433d6423SLionel Sambuc return rv;
237433d6423SLionel Sambuc }
238433d6423SLionel Sambuc
239433d6423SLionel Sambuc /* Mark ourselves as the new scheduler.
240433d6423SLionel Sambuc * By default, processes are scheduled by the parents scheduler. In case
241433d6423SLionel Sambuc * this scheduler would want to delegate scheduling to another
242433d6423SLionel Sambuc * scheduler, it could do so and then write the endpoint of that
243433d6423SLionel Sambuc * scheduler into the "scheduler" field.
244433d6423SLionel Sambuc */
245433d6423SLionel Sambuc
246433d6423SLionel Sambuc m_ptr->m_sched_lsys_scheduling_start.scheduler = SCHED_PROC_NR;
247433d6423SLionel Sambuc
248433d6423SLionel Sambuc return OK;
249433d6423SLionel Sambuc }
250433d6423SLionel Sambuc
251433d6423SLionel Sambuc /*===========================================================================*
252433d6423SLionel Sambuc * do_nice *
253433d6423SLionel Sambuc *===========================================================================*/
do_nice(message * m_ptr)254433d6423SLionel Sambuc int do_nice(message *m_ptr)
255433d6423SLionel Sambuc {
256433d6423SLionel Sambuc struct schedproc *rmp;
257433d6423SLionel Sambuc int rv;
258433d6423SLionel Sambuc int proc_nr_n;
259433d6423SLionel Sambuc unsigned new_q, old_q, old_max_q;
260433d6423SLionel Sambuc
261433d6423SLionel Sambuc /* check who can send you requests */
262433d6423SLionel Sambuc if (!accept_message(m_ptr))
263433d6423SLionel Sambuc return EPERM;
264433d6423SLionel Sambuc
265433d6423SLionel Sambuc if (sched_isokendpt(m_ptr->m_pm_sched_scheduling_set_nice.endpoint, &proc_nr_n) != OK) {
266433d6423SLionel Sambuc printf("SCHED: WARNING: got an invalid endpoint in OoQ msg "
267433d6423SLionel Sambuc "%d\n", m_ptr->m_pm_sched_scheduling_set_nice.endpoint);
268433d6423SLionel Sambuc return EBADEPT;
269433d6423SLionel Sambuc }
270433d6423SLionel Sambuc
271433d6423SLionel Sambuc rmp = &schedproc[proc_nr_n];
272433d6423SLionel Sambuc new_q = m_ptr->m_pm_sched_scheduling_set_nice.maxprio;
273433d6423SLionel Sambuc if (new_q >= NR_SCHED_QUEUES) {
274433d6423SLionel Sambuc return EINVAL;
275433d6423SLionel Sambuc }
276433d6423SLionel Sambuc
277433d6423SLionel Sambuc /* Store old values, in case we need to roll back the changes */
278433d6423SLionel Sambuc old_q = rmp->priority;
279433d6423SLionel Sambuc old_max_q = rmp->max_priority;
280433d6423SLionel Sambuc
281433d6423SLionel Sambuc /* Update the proc entry and reschedule the process */
282433d6423SLionel Sambuc rmp->max_priority = rmp->priority = new_q;
283433d6423SLionel Sambuc
284433d6423SLionel Sambuc if ((rv = schedule_process_local(rmp)) != OK) {
285433d6423SLionel Sambuc /* Something went wrong when rescheduling the process, roll
286433d6423SLionel Sambuc * back the changes to proc struct */
287433d6423SLionel Sambuc rmp->priority = old_q;
288433d6423SLionel Sambuc rmp->max_priority = old_max_q;
289433d6423SLionel Sambuc }
290433d6423SLionel Sambuc
291433d6423SLionel Sambuc return rv;
292433d6423SLionel Sambuc }
293433d6423SLionel Sambuc
294433d6423SLionel Sambuc /*===========================================================================*
295433d6423SLionel Sambuc * schedule_process *
296433d6423SLionel Sambuc *===========================================================================*/
schedule_process(struct schedproc * rmp,unsigned flags)297433d6423SLionel Sambuc static int schedule_process(struct schedproc * rmp, unsigned flags)
298433d6423SLionel Sambuc {
299433d6423SLionel Sambuc int err;
300*366d18b2SDavid van Moolenbroek int new_prio, new_quantum, new_cpu, niced;
301433d6423SLionel Sambuc
302433d6423SLionel Sambuc pick_cpu(rmp);
303433d6423SLionel Sambuc
304433d6423SLionel Sambuc if (flags & SCHEDULE_CHANGE_PRIO)
305433d6423SLionel Sambuc new_prio = rmp->priority;
306433d6423SLionel Sambuc else
307433d6423SLionel Sambuc new_prio = -1;
308433d6423SLionel Sambuc
309433d6423SLionel Sambuc if (flags & SCHEDULE_CHANGE_QUANTUM)
310433d6423SLionel Sambuc new_quantum = rmp->time_slice;
311433d6423SLionel Sambuc else
312433d6423SLionel Sambuc new_quantum = -1;
313433d6423SLionel Sambuc
314433d6423SLionel Sambuc if (flags & SCHEDULE_CHANGE_CPU)
315433d6423SLionel Sambuc new_cpu = rmp->cpu;
316433d6423SLionel Sambuc else
317433d6423SLionel Sambuc new_cpu = -1;
318433d6423SLionel Sambuc
319*366d18b2SDavid van Moolenbroek niced = (rmp->max_priority > USER_Q);
320*366d18b2SDavid van Moolenbroek
321433d6423SLionel Sambuc if ((err = sys_schedule(rmp->endpoint, new_prio,
322*366d18b2SDavid van Moolenbroek new_quantum, new_cpu, niced)) != OK) {
323433d6423SLionel Sambuc printf("PM: An error occurred when trying to schedule %d: %d\n",
324433d6423SLionel Sambuc rmp->endpoint, err);
325433d6423SLionel Sambuc }
326433d6423SLionel Sambuc
327433d6423SLionel Sambuc return err;
328433d6423SLionel Sambuc }
329433d6423SLionel Sambuc
330433d6423SLionel Sambuc
331433d6423SLionel Sambuc /*===========================================================================*
3326c31058dSDavid van Moolenbroek * init_scheduling *
333433d6423SLionel Sambuc *===========================================================================*/
init_scheduling(void)334433d6423SLionel Sambuc void init_scheduling(void)
335433d6423SLionel Sambuc {
3366c31058dSDavid van Moolenbroek int r;
3376c31058dSDavid van Moolenbroek
338433d6423SLionel Sambuc balance_timeout = BALANCE_TIMEOUT * sys_hz();
3396c31058dSDavid van Moolenbroek
3406c31058dSDavid van Moolenbroek if ((r = sys_setalarm(balance_timeout, 0)) != OK)
3416c31058dSDavid van Moolenbroek panic("sys_setalarm failed: %d", r);
342433d6423SLionel Sambuc }
343433d6423SLionel Sambuc
344433d6423SLionel Sambuc /*===========================================================================*
345433d6423SLionel Sambuc * balance_queues *
346433d6423SLionel Sambuc *===========================================================================*/
347433d6423SLionel Sambuc
3486c31058dSDavid van Moolenbroek /* This function in called every N ticks to rebalance the queues. The current
349433d6423SLionel Sambuc * scheduler bumps processes down one priority when ever they run out of
350433d6423SLionel Sambuc * quantum. This function will find all proccesses that have been bumped down,
351433d6423SLionel Sambuc * and pulls them back up. This default policy will soon be changed.
352433d6423SLionel Sambuc */
balance_queues(void)3536c31058dSDavid van Moolenbroek void balance_queues(void)
354433d6423SLionel Sambuc {
355433d6423SLionel Sambuc struct schedproc *rmp;
3566c31058dSDavid van Moolenbroek int r, proc_nr;
357433d6423SLionel Sambuc
358433d6423SLionel Sambuc for (proc_nr=0, rmp=schedproc; proc_nr < NR_PROCS; proc_nr++, rmp++) {
359433d6423SLionel Sambuc if (rmp->flags & IN_USE) {
360433d6423SLionel Sambuc if (rmp->priority > rmp->max_priority) {
361433d6423SLionel Sambuc rmp->priority -= 1; /* increase priority */
362433d6423SLionel Sambuc schedule_process_local(rmp);
363433d6423SLionel Sambuc }
364433d6423SLionel Sambuc }
365433d6423SLionel Sambuc }
366433d6423SLionel Sambuc
3676c31058dSDavid van Moolenbroek if ((r = sys_setalarm(balance_timeout, 0)) != OK)
3686c31058dSDavid van Moolenbroek panic("sys_setalarm failed: %d", r);
369433d6423SLionel Sambuc }
370