xref: /csrg-svn/sys/kern/kern_clock.c (revision 2768)
1 /*	kern_clock.c	4.14	02/27/81	*/
2 
3 #include "../h/param.h"
4 #include "../h/systm.h"
5 #include "../h/dk.h"
6 #include "../h/callout.h"
7 #include "../h/seg.h"
8 #include "../h/dir.h"
9 #include "../h/user.h"
10 #include "../h/proc.h"
11 #include "../h/reg.h"
12 #include "../h/psl.h"
13 #include "../h/vm.h"
14 #include "../h/buf.h"
15 #include "../h/text.h"
16 #include "../h/vlimit.h"
17 #include "../h/mtpr.h"
18 #include "../h/clock.h"
19 #include "../h/cpu.h"
20 
21 #include "dh.h"
22 #include "dz.h"
23 
24 #define	SCHMAG	9/10
25 
26 
27 /*
28  * Hardclock is called straight from
29  * the real time clock interrupt.
30  * We limit the work we do at real clock interrupt time to:
31  *	reloading clock
32  *	decrementing time to callouts
33  *	recording cpu time usage
34  *	modifying priority of current process
35  *	arrange for soft clock interrupt
36  *	kernel pc profiling
37  *
38  * At softclock interrupt time we:
39  *	implement callouts
40  *	maintain date
41  *	lightning bolt wakeup (every second)
42  *	alarm clock signals
43  *	jab the scheduler
44  *
45  * On the vax softclock interrupts are implemented by
46  * software interrupts.  Note that we may have multiple softclock
47  * interrupts compressed into one (due to excessive interrupt load),
48  * but that hardclock interrupts should never be lost.
49  */
50 
51 /*ARGSUSED*/
52 hardclock(pc, ps)
53 	caddr_t pc;
54 {
55 	register struct callout *p1;
56 	register struct proc *pp;
57 	register int s, cpstate;
58 
59 	/*
60 	 * reprime clock
61 	 */
62 	clkreld();
63 
64 	/*
65 	 * update callout times
66 	 */
67 	if(callout[0].c_func == NULL)
68 		goto out;
69 	p1 = &callout[0];
70 	while(p1->c_time<=0 && p1->c_func!=NULL)
71 		p1++;
72 	p1->c_time--;
73 out:
74 
75 	/*
76 	 * Maintain iostat and per-process cpu statistics
77 	 */
78 	if (!noproc) {
79 		s = u.u_procp->p_rssize;
80 		u.u_vm.vm_idsrss += s;
81 		if (u.u_procp->p_textp) {
82 			register int xrss = u.u_procp->p_textp->x_rssize;
83 
84 			s += xrss;
85 			u.u_vm.vm_ixrss += xrss;
86 		}
87 		if (s > u.u_vm.vm_maxrss)
88 			u.u_vm.vm_maxrss = s;
89 		if ((u.u_vm.vm_utime+u.u_vm.vm_stime+1)/hz > u.u_limit[LIM_CPU]) {
90 			psignal(u.u_procp, SIGXCPU);
91 			if (u.u_limit[LIM_CPU] < INFINITY - 5)
92 				u.u_limit[LIM_CPU] += 5;
93 		}
94 	}
95 	if (USERMODE(ps)) {
96 		u.u_vm.vm_utime++;
97 		if(u.u_procp->p_nice > NZERO)
98 			cpstate = CP_NICE;
99 		else
100 			cpstate = CP_USER;
101 	} else {
102 		cpstate = CP_SYS;
103 		if (noproc)
104 			cpstate = CP_IDLE;
105 		else
106 			u.u_vm.vm_stime++;
107 	}
108 	cp_time[cpstate]++;
109 	for (s = 0; s < DK_NDRIVE; s++)
110 		if (dk_busy&(1<<s))
111 			dk_time[s]++;
112 	if (!noproc) {
113 		pp = u.u_procp;
114 		pp->p_cpticks++;
115 		if(++pp->p_cpu == 0)
116 			pp->p_cpu--;
117 		if(pp->p_cpu % 16 == 0) {
118 			(void) setpri(pp);
119 			if (pp->p_pri >= PUSER)
120 				pp->p_pri = pp->p_usrpri;
121 		}
122 	}
123 	++lbolt;
124 #if VAX780
125 	if (cpu == VAX_780 && !BASEPRI(ps))
126 		unhang();
127 #endif
128 	setsoftclock();
129 }
130 
131 /*
132  * Constant for decay filter for cpu usage.
133  */
134 double	ccpu = 0.95122942450071400909;		/* exp(-1/20) */
135 
136 /*
137  * Software clock interrupt.
138  * This routine is blocked by spl1(),
139  * which doesn't block device interrupts!
140  */
141 /*ARGSUSED*/
142 softclock(pc, ps)
143 	caddr_t pc;
144 {
145 	register struct callout *p1, *p2;
146 	register struct proc *pp;
147 	register int a, s;
148 
149 	/*
150 	 * callout
151 	 */
152 	if(callout[0].c_time <= 0) {
153 		p1 = &callout[0];
154 		while(p1->c_func != 0 && p1->c_time <= 0) {
155 			(*p1->c_func)(p1->c_arg);
156 			p1++;
157 		}
158 		p2 = &callout[0];
159 		while(p2->c_func = p1->c_func) {
160 			p2->c_time = p1->c_time;
161 			p2->c_arg = p1->c_arg;
162 			p1++;
163 			p2++;
164 		}
165 	}
166 
167 	/*
168 	 * Drain silos.
169 	 */
170 #if NDH > 0
171 	s = spl5(); dhtimer(); splx(s);
172 #endif
173 #if NDZ > 0
174 	s = spl5(); dztimer(); splx(s);
175 #endif
176 
177 	/*
178 	 * If idling and processes are waiting to swap in,
179 	 * check on them.
180 	 */
181 	if (noproc && runin) {
182 		runin = 0;
183 		wakeup((caddr_t)&runin);
184 	}
185 
186 	/*
187 	 * Run paging daemon and reschedule every 1/4 sec.
188 	 */
189 	if (lbolt % (hz/4) == 0) {
190 		vmpago();
191 		runrun++;
192 		aston();
193 	}
194 
195 	/*
196 	 * Lightning bolt every second:
197 	 *	sleep timeouts
198 	 *	process priority recomputation
199 	 *	process %cpu averaging
200 	 *	virtual memory metering
201 	 *	kick swapper if processes want in
202 	 */
203 	if (lbolt >= hz) {
204 		if (BASEPRI(ps))
205 			return;
206 		lbolt -= hz;
207 		++time;
208 		wakeup((caddr_t)&lbolt);
209 		for(pp = proc; pp < procNPROC; pp++)
210 		if (pp->p_stat && pp->p_stat!=SZOMB) {
211 			if(pp->p_time != 127)
212 				pp->p_time++;
213 			if(pp->p_clktim)
214 				if(--pp->p_clktim == 0)
215 					if (pp->p_flag & STIMO) {
216 						s = spl6();
217 						switch (pp->p_stat) {
218 
219 						case SSLEEP:
220 							setrun(pp);
221 							break;
222 
223 						case SSTOP:
224 							unsleep(pp);
225 							break;
226 						}
227 						pp->p_flag &= ~STIMO;
228 						splx(s);
229 					} else
230 						psignal(pp, SIGALRM);
231 			if(pp->p_stat==SSLEEP||pp->p_stat==SSTOP)
232 				if (pp->p_slptime != 127)
233 					pp->p_slptime++;
234 			if (pp->p_flag&SLOAD)
235 				pp->p_pctcpu = ccpu * pp->p_pctcpu +
236 				    (1.0 - ccpu) * (pp->p_cpticks/(float)hz);
237 			pp->p_cpticks = 0;
238 			a = (pp->p_cpu & 0377)*SCHMAG + pp->p_nice - NZERO;
239 			if(a < 0)
240 				a = 0;
241 			if(a > 255)
242 				a = 255;
243 			pp->p_cpu = a;
244 			(void) setpri(pp);
245 			s = spl6();
246 			if(pp->p_pri >= PUSER) {
247 				if ((pp != u.u_procp || noproc) &&
248 				    pp->p_stat == SRUN &&
249 				    (pp->p_flag & SLOAD) &&
250 				    pp->p_pri != pp->p_usrpri) {
251 					remrq(pp);
252 					pp->p_pri = pp->p_usrpri;
253 					setrq(pp);
254 				} else
255 					pp->p_pri = pp->p_usrpri;
256 			}
257 			splx(s);
258 		}
259 		vmmeter();
260 		if(runin!=0) {
261 			runin = 0;
262 			wakeup((caddr_t)&runin);
263 		}
264 		/*
265 		 * If there are pages that have been cleaned,
266 		 * jolt the pageout daemon to process them.
267 		 * We do this here so that these pages will be
268 		 * freed if there is an abundance of memory and the
269 		 * daemon would not be awakened otherwise.
270 		 */
271 		if (bclnlist != NULL)
272 			wakeup((caddr_t)&proc[2]);
273 		if (USERMODE(ps)) {
274 			pp = u.u_procp;
275 			if (pp->p_uid)
276 				if (pp->p_nice == NZERO && u.u_vm.vm_utime > 600 * hz)
277 					pp->p_nice = NZERO+4;
278 			(void) setpri(pp);
279 			pp->p_pri = pp->p_usrpri;
280 		}
281 	}
282 	if (USERMODE(ps) && u.u_prof.pr_scale) {
283 		u.u_procp->p_flag |= SOWEUPC;
284 		aston();
285 	}
286 }
287 
288 /*
289  * timeout is called to arrange that
290  * fun(arg) is called in tim/hz seconds.
291  * An entry is sorted into the callout
292  * structure. The time in each structure
293  * entry is the number of hz's more
294  * than the previous entry.
295  * In this way, decrementing the
296  * first entry has the effect of
297  * updating all entries.
298  *
299  * The panic is there because there is nothing
300  * intelligent to be done if an entry won't fit.
301  */
302 timeout(fun, arg, tim)
303 	int (*fun)();
304 	caddr_t arg;
305 {
306 	register struct callout *p1, *p2, *p3;
307 	register int t;
308 	int s;
309 
310 	t = tim;
311 	p1 = &callout[0];
312 	s = spl7();
313 	while(p1->c_func != 0 && p1->c_time <= t) {
314 		t -= p1->c_time;
315 		p1++;
316 	}
317 	p1->c_time -= t;
318 	p2 = p1;
319 	p3 = callout+(ncallout-2);
320 	while(p2->c_func != 0) {
321 		if (p2 >= p3)
322 			panic("timeout");
323 		p2++;
324 	}
325 	while(p2 >= p1) {
326 		(p2+1)->c_time = p2->c_time;
327 		(p2+1)->c_func = p2->c_func;
328 		(p2+1)->c_arg = p2->c_arg;
329 		p2--;
330 	}
331 	p1->c_time = t;
332 	p1->c_func = fun;
333 	p1->c_arg = arg;
334 	splx(s);
335 }
336