xref: /minix3/minix/kernel/clock.c (revision 0b98e8aad89f2bd4ba80b523d73cf29e9dd82ce1)
1 /* This file contains the clock task, which handles time related functions.
2  * Important events that are handled by the CLOCK include setting and
3  * monitoring alarm timers and deciding when to (re)schedule processes.
4  * The CLOCK offers a direct interface to kernel processes. System services
5  * can access its services through system calls, such as sys_setalarm(). The
6  * CLOCK task thus is hidden from the outside world.
7  *
8  * Changes:
9  *   Aug 18, 2006   removed direct hardware access etc, MinixPPC (Ingmar Alting)
10  *   Oct 08, 2005   reordering and comment editing (A. S. Woodhull)
11  *   Mar 18, 2004   clock interface moved to SYSTEM task (Jorrit N. Herder)
12  *   Sep 30, 2004   source code documentation updated  (Jorrit N. Herder)
13  *   Sep 24, 2004   redesigned alarm timers  (Jorrit N. Herder)
14  *
15  * Clock task is notified by the clock's interrupt handler when a timer
16  * has expired.
17  *
18  * In addition to the main clock_task() entry point, which starts the main
19  * loop, there are several other minor entry points:
20  *   clock_stop:		called just before MINIX shutdown
21  *   get_realtime:		get wall time since boot in clock ticks
22  *   set_realtime:		set wall time since boot in clock ticks
23  *   set_adjtime_delta:		set the number of ticks to adjust realtime
24  *   get_monotonic:		get monotonic time since boot in clock ticks
25  *   set_kernel_timer:		set a watchdog timer (+)
26  *   reset_kernel_timer:	reset a watchdog timer (+)
27  *   read_clock:		read the counter of channel 0 of the 8253A timer
28  *
29  * (+) The CLOCK task keeps tracks of watchdog timers for the entire kernel.
30  * It is crucial that watchdog functions not block, or the CLOCK task may
31  * be blocked. Do not send() a message when the receiver is not expecting it.
32  * Instead, notify(), which always returns, should be used.
33  */
34 
35 #include "kernel/kernel.h"
36 #include <minix/endpoint.h>
37 #include <assert.h>
38 
39 #include "clock.h"
40 
41 #ifdef USE_WATCHDOG
42 #include "watchdog.h"
43 #endif
44 
45 /* Function prototype for PRIVATE functions.
46  */
47 static void load_update(void);
48 
49 /* The CLOCK's timers queue. The functions in <minix/timers.h> operate on this.
50  * Each system process possesses a single synchronous alarm timer. If other
51  * kernel parts want to use additional timers, they must declare their own
52  * persistent (static) timer structure, which can be passed to the clock
53  * via (re)set_kernel_timer().
54  * When a timer expires its watchdog function is run by the CLOCK task.
55  */
56 static minix_timer_t *clock_timers;	/* queue of CLOCK timers */
57 static clock_t next_timeout;	/* monotonic time that next timer expires */
58 
59 /* The time is incremented by the interrupt handler on each clock tick.
60  */
61 static clock_t monotonic = 0;
62 
63 /* Reflects the wall time and may be slowed/sped up by using adjclock()
64  */
65 static clock_t realtime = 0;
66 
67 /* Number of ticks to adjust realtime by. A negative value implies slowing
68  * down realtime, a positive value implies speeding it up.
69  */
70 static int32_t adjtime_delta = 0;
71 
72 /*
73  * The boot processor's timer interrupt handler. In addition to non-boot cpus
74  * it keeps real time and notifies the clock task if need be.
75  */
76 int timer_int_handler(void)
77 {
78 	/* Update user and system accounting times. Charge the current process
79 	 * for user time. If the current process is not billable, that is, if a
80 	 * non-user process is running, charge the billable process for system
81 	 * time as well.  Thus the unbillable process' user time is the billable
82 	 * user's system time.
83 	 */
84 
85 	struct proc * p, * billp;
86 
87 	/* FIXME watchdog for slave cpus! */
88 #ifdef USE_WATCHDOG
89 	/*
90 	 * we need to know whether local timer ticks are happening or whether
91 	 * the kernel is locked up. We don't care about overflows as we only
92 	 * need to know that it's still ticking or not
93 	 */
94 	watchdog_local_timer_ticks++;
95 #endif
96 
97 	if (cpu_is_bsp(cpuid)) {
98 		monotonic++;
99 
100 		/* if adjtime_delta has ticks remaining, apply one to realtime.
101 		 * limit changes to every other interrupt.
102 		 */
103 		if (adjtime_delta != 0 && monotonic & 0x1) {
104 			/* go forward or stay behind */
105 			realtime += (adjtime_delta > 0) ? 2 : 0;
106 			adjtime_delta += (adjtime_delta > 0) ? -1 : +1;
107 		} else {
108 			realtime++;
109 		}
110 	}
111 
112 	/* Update user and system accounting times. Charge the current process
113 	 * for user time. If the current process is not billable, that is, if a
114 	 * non-user process is running, charge the billable process for system
115 	 * time as well.  Thus the unbillable process' user time is the billable
116 	 * user's system time.
117 	 */
118 
119 	p = get_cpulocal_var(proc_ptr);
120 	billp = get_cpulocal_var(bill_ptr);
121 
122 	p->p_user_time++;
123 
124 	if (! (priv(p)->s_flags & BILLABLE)) {
125 		billp->p_sys_time++;
126 	}
127 
128 	/* Decrement virtual timers, if applicable. We decrement both the
129 	 * virtual and the profile timer of the current process, and if the
130 	 * current process is not billable, the timer of the billed process as
131 	 * well.  If any of the timers expire, do_clocktick() will send out
132 	 * signals.
133 	 */
134 	if ((p->p_misc_flags & MF_VIRT_TIMER) && (p->p_virt_left > 0)) {
135 		p->p_virt_left--;
136 	}
137 	if ((p->p_misc_flags & MF_PROF_TIMER) && (p->p_prof_left > 0)) {
138 		p->p_prof_left--;
139 	}
140 	if (! (priv(p)->s_flags & BILLABLE) &&
141 			(billp->p_misc_flags & MF_PROF_TIMER) &&
142 			(billp->p_prof_left > 0)) {
143 		billp->p_prof_left--;
144 	}
145 
146 	/*
147 	 * Check if a process-virtual timer expired. Check current process, but
148 	 * also bill_ptr - one process's user time is another's system time, and
149 	 * the profile timer decreases for both!
150 	 */
151 	vtimer_check(p);
152 
153 	if (p != billp)
154 		vtimer_check(billp);
155 
156 	/* Update load average. */
157 	load_update();
158 
159 	if (cpu_is_bsp(cpuid)) {
160 		/* if a timer expired, notify the clock task */
161 		if ((next_timeout <= monotonic)) {
162 			tmrs_exptimers(&clock_timers, monotonic, NULL);
163 			next_timeout = (clock_timers == NULL) ?
164 				TMR_NEVER : clock_timers->tmr_exp_time;
165 		}
166 
167 #ifdef DEBUG_SERIAL
168 		if (kinfo.do_serial_debug)
169 			do_ser_debug();
170 #endif
171 
172 	}
173 
174 	arch_timer_int_handler();
175 
176 	return(1);					/* reenable interrupts */
177 }
178 
179 /*===========================================================================*
180  *				get_realtime				     *
181  *===========================================================================*/
182 clock_t get_realtime(void)
183 {
184   /* Get and return the current wall time in ticks since boot. */
185   return(realtime);
186 }
187 
188 /*===========================================================================*
189  *				set_realtime				     *
190  *===========================================================================*/
191 void set_realtime(clock_t newrealtime)
192 {
193   realtime = newrealtime;
194 }
195 
196 /*===========================================================================*
197  *				set_adjtime_delta			     *
198  *===========================================================================*/
199 void set_adjtime_delta(int32_t ticks)
200 {
201   adjtime_delta = ticks;
202 }
203 
204 /*===========================================================================*
205  *				get_monotonic				     *
206  *===========================================================================*/
207 clock_t get_monotonic(void)
208 {
209   /* Get and return the number of ticks since boot. */
210   return(monotonic);
211 }
212 
213 /*===========================================================================*
214  *				set_kernel_timer			     *
215  *===========================================================================*/
216 void set_kernel_timer(tp, exp_time, watchdog)
217 minix_timer_t *tp;		/* pointer to timer structure */
218 clock_t exp_time;		/* expiration monotonic time */
219 tmr_func_t watchdog;		/* watchdog to be called */
220 {
221 /* Insert the new timer in the active timers list. Always update the
222  * next timeout time by setting it to the front of the active list.
223  */
224   tmrs_settimer(&clock_timers, tp, exp_time, watchdog, NULL);
225   next_timeout = clock_timers->tmr_exp_time;
226 }
227 
228 /*===========================================================================*
229  *				reset_kernel_timer			     *
230  *===========================================================================*/
231 void reset_kernel_timer(tp)
232 minix_timer_t *tp;		/* pointer to timer structure */
233 {
234 /* The timer pointed to by 'tp' is no longer needed. Remove it from both the
235  * active and expired lists. Always update the next timeout time by setting
236  * it to the front of the active list.
237  */
238   tmrs_clrtimer(&clock_timers, tp, NULL);
239   next_timeout = (clock_timers == NULL) ?
240 	TMR_NEVER : clock_timers->tmr_exp_time;
241 }
242 
243 /*===========================================================================*
244  *				load_update				     *
245  *===========================================================================*/
246 static void load_update(void)
247 {
248 	u16_t slot;
249 	int enqueued = 0, q;
250 	struct proc *p;
251 	struct proc **rdy_head;
252 
253 	/* Load average data is stored as a list of numbers in a circular
254 	 * buffer. Each slot accumulates _LOAD_UNIT_SECS of samples of
255 	 * the number of runnable processes. Computations can then
256 	 * be made of the load average over variable periods, in the
257 	 * user library (see getloadavg(3)).
258 	 */
259 	slot = (monotonic / system_hz / _LOAD_UNIT_SECS) % _LOAD_HISTORY;
260 	if(slot != kloadinfo.proc_last_slot) {
261 		kloadinfo.proc_load_history[slot] = 0;
262 		kloadinfo.proc_last_slot = slot;
263 	}
264 
265 	rdy_head = get_cpulocal_var(run_q_head);
266 	/* Cumulation. How many processes are ready now? */
267 	for(q = 0; q < NR_SCHED_QUEUES; q++) {
268 		for(p = rdy_head[q]; p != NULL; p = p->p_nextready) {
269 			enqueued++;
270 		}
271 	}
272 
273 	kloadinfo.proc_load_history[slot] += enqueued;
274 
275 	/* Up-to-dateness. */
276 	kloadinfo.last_clock = monotonic;
277 }
278 
279 int boot_cpu_init_timer(unsigned freq)
280 {
281 	if (init_local_timer(freq))
282 		return -1;
283 
284 	if (register_local_timer_handler(
285 				(irq_handler_t) timer_int_handler))
286 		return -1;
287 
288 	return 0;
289 }
290 
291 int app_cpu_init_timer(unsigned freq)
292 {
293 	if (init_local_timer(freq))
294 		return -1;
295 
296 	return 0;
297 }
298