xref: /minix3/minix/servers/sched/schedule.c (revision 433d6423c39e34ec4b79c950597bb2d236f886be)
1*433d6423SLionel Sambuc /* This file contains the scheduling policy for SCHED
2*433d6423SLionel Sambuc  *
3*433d6423SLionel Sambuc  * The entry points are:
4*433d6423SLionel Sambuc  *   do_noquantum:        Called on behalf of process' that run out of quantum
5*433d6423SLionel Sambuc  *   do_start_scheduling  Request to start scheduling a proc
6*433d6423SLionel Sambuc  *   do_stop_scheduling   Request to stop scheduling a proc
7*433d6423SLionel Sambuc  *   do_nice		  Request to change the nice level on a proc
8*433d6423SLionel Sambuc  *   init_scheduling      Called from main.c to set up/prepare scheduling
9*433d6423SLionel Sambuc  */
10*433d6423SLionel Sambuc #include "sched.h"
11*433d6423SLionel Sambuc #include "schedproc.h"
12*433d6423SLionel Sambuc #include <assert.h>
13*433d6423SLionel Sambuc #include <minix/com.h>
14*433d6423SLionel Sambuc #include <machine/archtypes.h>
15*433d6423SLionel Sambuc #include "kernel/proc.h" /* for queue constants */
16*433d6423SLionel Sambuc 
17*433d6423SLionel Sambuc static minix_timer_t sched_timer;
18*433d6423SLionel Sambuc static unsigned balance_timeout;
19*433d6423SLionel Sambuc 
20*433d6423SLionel Sambuc #define BALANCE_TIMEOUT	5 /* how often to balance queues in seconds */
21*433d6423SLionel Sambuc 
22*433d6423SLionel Sambuc static int schedule_process(struct schedproc * rmp, unsigned flags);
23*433d6423SLionel Sambuc static void balance_queues(minix_timer_t *tp);
24*433d6423SLionel Sambuc 
25*433d6423SLionel Sambuc #define SCHEDULE_CHANGE_PRIO	0x1
26*433d6423SLionel Sambuc #define SCHEDULE_CHANGE_QUANTUM	0x2
27*433d6423SLionel Sambuc #define SCHEDULE_CHANGE_CPU	0x4
28*433d6423SLionel Sambuc 
29*433d6423SLionel Sambuc #define SCHEDULE_CHANGE_ALL	(	\
30*433d6423SLionel Sambuc 		SCHEDULE_CHANGE_PRIO	|	\
31*433d6423SLionel Sambuc 		SCHEDULE_CHANGE_QUANTUM	|	\
32*433d6423SLionel Sambuc 		SCHEDULE_CHANGE_CPU		\
33*433d6423SLionel Sambuc 		)
34*433d6423SLionel Sambuc 
35*433d6423SLionel Sambuc #define schedule_process_local(p)	\
36*433d6423SLionel Sambuc 	schedule_process(p, SCHEDULE_CHANGE_PRIO | SCHEDULE_CHANGE_QUANTUM)
37*433d6423SLionel Sambuc #define schedule_process_migrate(p)	\
38*433d6423SLionel Sambuc 	schedule_process(p, SCHEDULE_CHANGE_CPU)
39*433d6423SLionel Sambuc 
40*433d6423SLionel Sambuc #define CPU_DEAD	-1
41*433d6423SLionel Sambuc 
42*433d6423SLionel Sambuc #define cpu_is_available(c)	(cpu_proc[c] >= 0)
43*433d6423SLionel Sambuc 
44*433d6423SLionel Sambuc #define DEFAULT_USER_TIME_SLICE 200
45*433d6423SLionel Sambuc 
46*433d6423SLionel Sambuc /* processes created by RS are sysytem processes */
47*433d6423SLionel Sambuc #define is_system_proc(p)	((p)->parent == RS_PROC_NR)
48*433d6423SLionel Sambuc 
49*433d6423SLionel Sambuc static unsigned cpu_proc[CONFIG_MAX_CPUS];
50*433d6423SLionel Sambuc 
51*433d6423SLionel Sambuc static void pick_cpu(struct schedproc * proc)
52*433d6423SLionel Sambuc {
53*433d6423SLionel Sambuc #ifdef CONFIG_SMP
54*433d6423SLionel Sambuc 	unsigned cpu, c;
55*433d6423SLionel Sambuc 	unsigned cpu_load = (unsigned) -1;
56*433d6423SLionel Sambuc 
57*433d6423SLionel Sambuc 	if (machine.processors_count == 1) {
58*433d6423SLionel Sambuc 		proc->cpu = machine.bsp_id;
59*433d6423SLionel Sambuc 		return;
60*433d6423SLionel Sambuc 	}
61*433d6423SLionel Sambuc 
62*433d6423SLionel Sambuc 	/* schedule sysytem processes only on the boot cpu */
63*433d6423SLionel Sambuc 	if (is_system_proc(proc)) {
64*433d6423SLionel Sambuc 		proc->cpu = machine.bsp_id;
65*433d6423SLionel Sambuc 		return;
66*433d6423SLionel Sambuc 	}
67*433d6423SLionel Sambuc 
68*433d6423SLionel Sambuc 	/* if no other cpu available, try BSP */
69*433d6423SLionel Sambuc 	cpu = machine.bsp_id;
70*433d6423SLionel Sambuc 	for (c = 0; c < machine.processors_count; c++) {
71*433d6423SLionel Sambuc 		/* skip dead cpus */
72*433d6423SLionel Sambuc 		if (!cpu_is_available(c))
73*433d6423SLionel Sambuc 			continue;
74*433d6423SLionel Sambuc 		if (c != machine.bsp_id && cpu_load > cpu_proc[c]) {
75*433d6423SLionel Sambuc 			cpu_load = cpu_proc[c];
76*433d6423SLionel Sambuc 			cpu = c;
77*433d6423SLionel Sambuc 		}
78*433d6423SLionel Sambuc 	}
79*433d6423SLionel Sambuc 	proc->cpu = cpu;
80*433d6423SLionel Sambuc 	cpu_proc[cpu]++;
81*433d6423SLionel Sambuc #else
82*433d6423SLionel Sambuc 	proc->cpu = 0;
83*433d6423SLionel Sambuc #endif
84*433d6423SLionel Sambuc }
85*433d6423SLionel Sambuc 
86*433d6423SLionel Sambuc /*===========================================================================*
87*433d6423SLionel Sambuc  *				do_noquantum				     *
88*433d6423SLionel Sambuc  *===========================================================================*/
89*433d6423SLionel Sambuc 
90*433d6423SLionel Sambuc int do_noquantum(message *m_ptr)
91*433d6423SLionel Sambuc {
92*433d6423SLionel Sambuc 	register struct schedproc *rmp;
93*433d6423SLionel Sambuc 	int rv, proc_nr_n;
94*433d6423SLionel Sambuc 
95*433d6423SLionel Sambuc 	if (sched_isokendpt(m_ptr->m_source, &proc_nr_n) != OK) {
96*433d6423SLionel Sambuc 		printf("SCHED: WARNING: got an invalid endpoint in OOQ msg %u.\n",
97*433d6423SLionel Sambuc 		m_ptr->m_source);
98*433d6423SLionel Sambuc 		return EBADEPT;
99*433d6423SLionel Sambuc 	}
100*433d6423SLionel Sambuc 
101*433d6423SLionel Sambuc 	rmp = &schedproc[proc_nr_n];
102*433d6423SLionel Sambuc 	if (rmp->priority < MIN_USER_Q) {
103*433d6423SLionel Sambuc 		rmp->priority += 1; /* lower priority */
104*433d6423SLionel Sambuc 	}
105*433d6423SLionel Sambuc 
106*433d6423SLionel Sambuc 	if ((rv = schedule_process_local(rmp)) != OK) {
107*433d6423SLionel Sambuc 		return rv;
108*433d6423SLionel Sambuc 	}
109*433d6423SLionel Sambuc 	return OK;
110*433d6423SLionel Sambuc }
111*433d6423SLionel Sambuc 
112*433d6423SLionel Sambuc /*===========================================================================*
113*433d6423SLionel Sambuc  *				do_stop_scheduling			     *
114*433d6423SLionel Sambuc  *===========================================================================*/
115*433d6423SLionel Sambuc int do_stop_scheduling(message *m_ptr)
116*433d6423SLionel Sambuc {
117*433d6423SLionel Sambuc 	register struct schedproc *rmp;
118*433d6423SLionel Sambuc 	int proc_nr_n;
119*433d6423SLionel Sambuc 
120*433d6423SLionel Sambuc 	/* check who can send you requests */
121*433d6423SLionel Sambuc 	if (!accept_message(m_ptr))
122*433d6423SLionel Sambuc 		return EPERM;
123*433d6423SLionel Sambuc 
124*433d6423SLionel Sambuc 	if (sched_isokendpt(m_ptr->m_lsys_sched_scheduling_stop.endpoint,
125*433d6423SLionel Sambuc 		    &proc_nr_n) != OK) {
126*433d6423SLionel Sambuc 		printf("SCHED: WARNING: got an invalid endpoint in OOQ msg "
127*433d6423SLionel Sambuc 		"%d\n", m_ptr->m_lsys_sched_scheduling_stop.endpoint);
128*433d6423SLionel Sambuc 		return EBADEPT;
129*433d6423SLionel Sambuc 	}
130*433d6423SLionel Sambuc 
131*433d6423SLionel Sambuc 	rmp = &schedproc[proc_nr_n];
132*433d6423SLionel Sambuc #ifdef CONFIG_SMP
133*433d6423SLionel Sambuc 	cpu_proc[rmp->cpu]--;
134*433d6423SLionel Sambuc #endif
135*433d6423SLionel Sambuc 	rmp->flags = 0; /*&= ~IN_USE;*/
136*433d6423SLionel Sambuc 
137*433d6423SLionel Sambuc 	return OK;
138*433d6423SLionel Sambuc }
139*433d6423SLionel Sambuc 
140*433d6423SLionel Sambuc /*===========================================================================*
141*433d6423SLionel Sambuc  *				do_start_scheduling			     *
142*433d6423SLionel Sambuc  *===========================================================================*/
143*433d6423SLionel Sambuc int do_start_scheduling(message *m_ptr)
144*433d6423SLionel Sambuc {
145*433d6423SLionel Sambuc 	register struct schedproc *rmp;
146*433d6423SLionel Sambuc 	int rv, proc_nr_n, parent_nr_n;
147*433d6423SLionel Sambuc 
148*433d6423SLionel Sambuc 	/* we can handle two kinds of messages here */
149*433d6423SLionel Sambuc 	assert(m_ptr->m_type == SCHEDULING_START ||
150*433d6423SLionel Sambuc 		m_ptr->m_type == SCHEDULING_INHERIT);
151*433d6423SLionel Sambuc 
152*433d6423SLionel Sambuc 	/* check who can send you requests */
153*433d6423SLionel Sambuc 	if (!accept_message(m_ptr))
154*433d6423SLionel Sambuc 		return EPERM;
155*433d6423SLionel Sambuc 
156*433d6423SLionel Sambuc 	/* Resolve endpoint to proc slot. */
157*433d6423SLionel Sambuc 	if ((rv = sched_isemtyendpt(m_ptr->m_lsys_sched_scheduling_start.endpoint,
158*433d6423SLionel Sambuc 			&proc_nr_n)) != OK) {
159*433d6423SLionel Sambuc 		return rv;
160*433d6423SLionel Sambuc 	}
161*433d6423SLionel Sambuc 	rmp = &schedproc[proc_nr_n];
162*433d6423SLionel Sambuc 
163*433d6423SLionel Sambuc 	/* Populate process slot */
164*433d6423SLionel Sambuc 	rmp->endpoint     = m_ptr->m_lsys_sched_scheduling_start.endpoint;
165*433d6423SLionel Sambuc 	rmp->parent       = m_ptr->m_lsys_sched_scheduling_start.parent;
166*433d6423SLionel Sambuc 	rmp->max_priority = m_ptr->m_lsys_sched_scheduling_start.maxprio;
167*433d6423SLionel Sambuc 	if (rmp->max_priority >= NR_SCHED_QUEUES) {
168*433d6423SLionel Sambuc 		return EINVAL;
169*433d6423SLionel Sambuc 	}
170*433d6423SLionel Sambuc 
171*433d6423SLionel Sambuc 	/* Inherit current priority and time slice from parent. Since there
172*433d6423SLionel Sambuc 	 * is currently only one scheduler scheduling the whole system, this
173*433d6423SLionel Sambuc 	 * value is local and we assert that the parent endpoint is valid */
174*433d6423SLionel Sambuc 	if (rmp->endpoint == rmp->parent) {
175*433d6423SLionel Sambuc 		/* We have a special case here for init, which is the first
176*433d6423SLionel Sambuc 		   process scheduled, and the parent of itself. */
177*433d6423SLionel Sambuc 		rmp->priority   = USER_Q;
178*433d6423SLionel Sambuc 		rmp->time_slice = DEFAULT_USER_TIME_SLICE;
179*433d6423SLionel Sambuc 
180*433d6423SLionel Sambuc 		/*
181*433d6423SLionel Sambuc 		 * Since kernel never changes the cpu of a process, all are
182*433d6423SLionel Sambuc 		 * started on the BSP and the userspace scheduling hasn't
183*433d6423SLionel Sambuc 		 * changed that yet either, we can be sure that BSP is the
184*433d6423SLionel Sambuc 		 * processor where the processes run now.
185*433d6423SLionel Sambuc 		 */
186*433d6423SLionel Sambuc #ifdef CONFIG_SMP
187*433d6423SLionel Sambuc 		rmp->cpu = machine.bsp_id;
188*433d6423SLionel Sambuc 		/* FIXME set the cpu mask */
189*433d6423SLionel Sambuc #endif
190*433d6423SLionel Sambuc 	}
191*433d6423SLionel Sambuc 
192*433d6423SLionel Sambuc 	switch (m_ptr->m_type) {
193*433d6423SLionel Sambuc 
194*433d6423SLionel Sambuc 	case SCHEDULING_START:
195*433d6423SLionel Sambuc 		/* We have a special case here for system processes, for which
196*433d6423SLionel Sambuc 		 * quanum and priority are set explicitly rather than inherited
197*433d6423SLionel Sambuc 		 * from the parent */
198*433d6423SLionel Sambuc 		rmp->priority   = rmp->max_priority;
199*433d6423SLionel Sambuc 		rmp->time_slice = m_ptr->m_lsys_sched_scheduling_start.quantum;
200*433d6423SLionel Sambuc 		break;
201*433d6423SLionel Sambuc 
202*433d6423SLionel Sambuc 	case SCHEDULING_INHERIT:
203*433d6423SLionel Sambuc 		/* Inherit current priority and time slice from parent. Since there
204*433d6423SLionel Sambuc 		 * is currently only one scheduler scheduling the whole system, this
205*433d6423SLionel Sambuc 		 * value is local and we assert that the parent endpoint is valid */
206*433d6423SLionel Sambuc 		if ((rv = sched_isokendpt(m_ptr->m_lsys_sched_scheduling_start.parent,
207*433d6423SLionel Sambuc 				&parent_nr_n)) != OK)
208*433d6423SLionel Sambuc 			return rv;
209*433d6423SLionel Sambuc 
210*433d6423SLionel Sambuc 		rmp->priority = schedproc[parent_nr_n].priority;
211*433d6423SLionel Sambuc 		rmp->time_slice = schedproc[parent_nr_n].time_slice;
212*433d6423SLionel Sambuc 		break;
213*433d6423SLionel Sambuc 
214*433d6423SLionel Sambuc 	default:
215*433d6423SLionel Sambuc 		/* not reachable */
216*433d6423SLionel Sambuc 		assert(0);
217*433d6423SLionel Sambuc 	}
218*433d6423SLionel Sambuc 
219*433d6423SLionel Sambuc 	/* Take over scheduling the process. The kernel reply message populates
220*433d6423SLionel Sambuc 	 * the processes current priority and its time slice */
221*433d6423SLionel Sambuc 	if ((rv = sys_schedctl(0, rmp->endpoint, 0, 0, 0)) != OK) {
222*433d6423SLionel Sambuc 		printf("Sched: Error taking over scheduling for %d, kernel said %d\n",
223*433d6423SLionel Sambuc 			rmp->endpoint, rv);
224*433d6423SLionel Sambuc 		return rv;
225*433d6423SLionel Sambuc 	}
226*433d6423SLionel Sambuc 	rmp->flags = IN_USE;
227*433d6423SLionel Sambuc 
228*433d6423SLionel Sambuc 	/* Schedule the process, giving it some quantum */
229*433d6423SLionel Sambuc 	pick_cpu(rmp);
230*433d6423SLionel Sambuc 	while ((rv = schedule_process(rmp, SCHEDULE_CHANGE_ALL)) == EBADCPU) {
231*433d6423SLionel Sambuc 		/* don't try this CPU ever again */
232*433d6423SLionel Sambuc 		cpu_proc[rmp->cpu] = CPU_DEAD;
233*433d6423SLionel Sambuc 		pick_cpu(rmp);
234*433d6423SLionel Sambuc 	}
235*433d6423SLionel Sambuc 
236*433d6423SLionel Sambuc 	if (rv != OK) {
237*433d6423SLionel Sambuc 		printf("Sched: Error while scheduling process, kernel replied %d\n",
238*433d6423SLionel Sambuc 			rv);
239*433d6423SLionel Sambuc 		return rv;
240*433d6423SLionel Sambuc 	}
241*433d6423SLionel Sambuc 
242*433d6423SLionel Sambuc 	/* Mark ourselves as the new scheduler.
243*433d6423SLionel Sambuc 	 * By default, processes are scheduled by the parents scheduler. In case
244*433d6423SLionel Sambuc 	 * this scheduler would want to delegate scheduling to another
245*433d6423SLionel Sambuc 	 * scheduler, it could do so and then write the endpoint of that
246*433d6423SLionel Sambuc 	 * scheduler into the "scheduler" field.
247*433d6423SLionel Sambuc 	 */
248*433d6423SLionel Sambuc 
249*433d6423SLionel Sambuc 	m_ptr->m_sched_lsys_scheduling_start.scheduler = SCHED_PROC_NR;
250*433d6423SLionel Sambuc 
251*433d6423SLionel Sambuc 	return OK;
252*433d6423SLionel Sambuc }
253*433d6423SLionel Sambuc 
254*433d6423SLionel Sambuc /*===========================================================================*
255*433d6423SLionel Sambuc  *				do_nice					     *
256*433d6423SLionel Sambuc  *===========================================================================*/
257*433d6423SLionel Sambuc int do_nice(message *m_ptr)
258*433d6423SLionel Sambuc {
259*433d6423SLionel Sambuc 	struct schedproc *rmp;
260*433d6423SLionel Sambuc 	int rv;
261*433d6423SLionel Sambuc 	int proc_nr_n;
262*433d6423SLionel Sambuc 	unsigned new_q, old_q, old_max_q;
263*433d6423SLionel Sambuc 
264*433d6423SLionel Sambuc 	/* check who can send you requests */
265*433d6423SLionel Sambuc 	if (!accept_message(m_ptr))
266*433d6423SLionel Sambuc 		return EPERM;
267*433d6423SLionel Sambuc 
268*433d6423SLionel Sambuc 	if (sched_isokendpt(m_ptr->m_pm_sched_scheduling_set_nice.endpoint, &proc_nr_n) != OK) {
269*433d6423SLionel Sambuc 		printf("SCHED: WARNING: got an invalid endpoint in OoQ msg "
270*433d6423SLionel Sambuc 		"%d\n", m_ptr->m_pm_sched_scheduling_set_nice.endpoint);
271*433d6423SLionel Sambuc 		return EBADEPT;
272*433d6423SLionel Sambuc 	}
273*433d6423SLionel Sambuc 
274*433d6423SLionel Sambuc 	rmp = &schedproc[proc_nr_n];
275*433d6423SLionel Sambuc 	new_q = m_ptr->m_pm_sched_scheduling_set_nice.maxprio;
276*433d6423SLionel Sambuc 	if (new_q >= NR_SCHED_QUEUES) {
277*433d6423SLionel Sambuc 		return EINVAL;
278*433d6423SLionel Sambuc 	}
279*433d6423SLionel Sambuc 
280*433d6423SLionel Sambuc 	/* Store old values, in case we need to roll back the changes */
281*433d6423SLionel Sambuc 	old_q     = rmp->priority;
282*433d6423SLionel Sambuc 	old_max_q = rmp->max_priority;
283*433d6423SLionel Sambuc 
284*433d6423SLionel Sambuc 	/* Update the proc entry and reschedule the process */
285*433d6423SLionel Sambuc 	rmp->max_priority = rmp->priority = new_q;
286*433d6423SLionel Sambuc 
287*433d6423SLionel Sambuc 	if ((rv = schedule_process_local(rmp)) != OK) {
288*433d6423SLionel Sambuc 		/* Something went wrong when rescheduling the process, roll
289*433d6423SLionel Sambuc 		 * back the changes to proc struct */
290*433d6423SLionel Sambuc 		rmp->priority     = old_q;
291*433d6423SLionel Sambuc 		rmp->max_priority = old_max_q;
292*433d6423SLionel Sambuc 	}
293*433d6423SLionel Sambuc 
294*433d6423SLionel Sambuc 	return rv;
295*433d6423SLionel Sambuc }
296*433d6423SLionel Sambuc 
297*433d6423SLionel Sambuc /*===========================================================================*
298*433d6423SLionel Sambuc  *				schedule_process			     *
299*433d6423SLionel Sambuc  *===========================================================================*/
300*433d6423SLionel Sambuc static int schedule_process(struct schedproc * rmp, unsigned flags)
301*433d6423SLionel Sambuc {
302*433d6423SLionel Sambuc 	int err;
303*433d6423SLionel Sambuc 	int new_prio, new_quantum, new_cpu;
304*433d6423SLionel Sambuc 
305*433d6423SLionel Sambuc 	pick_cpu(rmp);
306*433d6423SLionel Sambuc 
307*433d6423SLionel Sambuc 	if (flags & SCHEDULE_CHANGE_PRIO)
308*433d6423SLionel Sambuc 		new_prio = rmp->priority;
309*433d6423SLionel Sambuc 	else
310*433d6423SLionel Sambuc 		new_prio = -1;
311*433d6423SLionel Sambuc 
312*433d6423SLionel Sambuc 	if (flags & SCHEDULE_CHANGE_QUANTUM)
313*433d6423SLionel Sambuc 		new_quantum = rmp->time_slice;
314*433d6423SLionel Sambuc 	else
315*433d6423SLionel Sambuc 		new_quantum = -1;
316*433d6423SLionel Sambuc 
317*433d6423SLionel Sambuc 	if (flags & SCHEDULE_CHANGE_CPU)
318*433d6423SLionel Sambuc 		new_cpu = rmp->cpu;
319*433d6423SLionel Sambuc 	else
320*433d6423SLionel Sambuc 		new_cpu = -1;
321*433d6423SLionel Sambuc 
322*433d6423SLionel Sambuc 	if ((err = sys_schedule(rmp->endpoint, new_prio,
323*433d6423SLionel Sambuc 		new_quantum, new_cpu)) != OK) {
324*433d6423SLionel Sambuc 		printf("PM: An error occurred when trying to schedule %d: %d\n",
325*433d6423SLionel Sambuc 		rmp->endpoint, err);
326*433d6423SLionel Sambuc 	}
327*433d6423SLionel Sambuc 
328*433d6423SLionel Sambuc 	return err;
329*433d6423SLionel Sambuc }
330*433d6423SLionel Sambuc 
331*433d6423SLionel Sambuc 
332*433d6423SLionel Sambuc /*===========================================================================*
333*433d6423SLionel Sambuc  *				start_scheduling			     *
334*433d6423SLionel Sambuc  *===========================================================================*/
335*433d6423SLionel Sambuc 
336*433d6423SLionel Sambuc void init_scheduling(void)
337*433d6423SLionel Sambuc {
338*433d6423SLionel Sambuc 	balance_timeout = BALANCE_TIMEOUT * sys_hz();
339*433d6423SLionel Sambuc 	init_timer(&sched_timer);
340*433d6423SLionel Sambuc 	set_timer(&sched_timer, balance_timeout, balance_queues, 0);
341*433d6423SLionel Sambuc }
342*433d6423SLionel Sambuc 
343*433d6423SLionel Sambuc /*===========================================================================*
344*433d6423SLionel Sambuc  *				balance_queues				     *
345*433d6423SLionel Sambuc  *===========================================================================*/
346*433d6423SLionel Sambuc 
347*433d6423SLionel Sambuc /* This function in called every 100 ticks to rebalance the queues. The current
348*433d6423SLionel Sambuc  * scheduler bumps processes down one priority when ever they run out of
349*433d6423SLionel Sambuc  * quantum. This function will find all proccesses that have been bumped down,
350*433d6423SLionel Sambuc  * and pulls them back up. This default policy will soon be changed.
351*433d6423SLionel Sambuc  */
352*433d6423SLionel Sambuc static void balance_queues(minix_timer_t *tp)
353*433d6423SLionel Sambuc {
354*433d6423SLionel Sambuc 	struct schedproc *rmp;
355*433d6423SLionel Sambuc 	int proc_nr;
356*433d6423SLionel Sambuc 
357*433d6423SLionel Sambuc 	for (proc_nr=0, rmp=schedproc; proc_nr < NR_PROCS; proc_nr++, rmp++) {
358*433d6423SLionel Sambuc 		if (rmp->flags & IN_USE) {
359*433d6423SLionel Sambuc 			if (rmp->priority > rmp->max_priority) {
360*433d6423SLionel Sambuc 				rmp->priority -= 1; /* increase priority */
361*433d6423SLionel Sambuc 				schedule_process_local(rmp);
362*433d6423SLionel Sambuc 			}
363*433d6423SLionel Sambuc 		}
364*433d6423SLionel Sambuc 	}
365*433d6423SLionel Sambuc 
366*433d6423SLionel Sambuc 	set_timer(&sched_timer, balance_timeout, balance_queues, 0);
367*433d6423SLionel Sambuc }
368