1*2442Swnj /* kern_clock.c 4.7 02/15/81 */ 29Sbill 39Sbill #include "../h/param.h" 49Sbill #include "../h/systm.h" 5329Sbill #include "../h/dk.h" 69Sbill #include "../h/callo.h" 79Sbill #include "../h/seg.h" 89Sbill #include "../h/dir.h" 99Sbill #include "../h/user.h" 109Sbill #include "../h/proc.h" 119Sbill #include "../h/reg.h" 129Sbill #include "../h/psl.h" 139Sbill #include "../h/vm.h" 149Sbill #include "../h/buf.h" 159Sbill #include "../h/text.h" 16877Sbill #include "../h/vlimit.h" 17877Sbill #include "../h/mtpr.h" 18877Sbill #include "../h/clock.h" 199Sbill 201943Swnj #include "dh.h" 211943Swnj #include "dz.h" 221559Sbill 239Sbill #define SCHMAG 9/10 249Sbill 259Sbill 269Sbill /* 27*2442Swnj * Hardclock is called straight from 289Sbill * the real time clock interrupt. 29*2442Swnj * We limit the work we do at real clock interrupt time to: 30*2442Swnj * reloading clock 31*2442Swnj * decrementing time to callouts 32*2442Swnj * recording cpu time usage 33*2442Swnj * modifying priority of current processing 34*2442Swnj * arrange for soft clock interrupt 35*2442Swnj * kernel pc profiling 369Sbill * 37*2442Swnj * At softclock interrupt time we: 389Sbill * implement callouts 399Sbill * maintain date 409Sbill * lightning bolt wakeup (every second) 419Sbill * alarm clock signals 429Sbill * jab the scheduler 43*2442Swnj * 44*2442Swnj * On the vax softclock interrupts are implemented by 45*2442Swnj * software interrupts. Note that we may have multiple softclock 46*2442Swnj * interrupts compressed into one (due to excessive interrupt load), 47*2442Swnj * but that hardclock interrupts should never be lost. 489Sbill */ 499Sbill #ifdef KPROF 50104Sbill unsigned short kcount[20000]; 519Sbill #endif 529Sbill 53*2442Swnj hardclock(pc, ps) 549Sbill { 55*2442Swnj register struct callo *p1; 569Sbill register struct proc *pp; 57*2442Swnj register long *ip; 58*2442Swnj register int s, cpstate; 599Sbill 609Sbill /* 619Sbill * reprime clock 629Sbill */ 639Sbill clkreld(); 649Sbill 659Sbill /* 66*2442Swnj * update callout times 679Sbill */ 689Sbill if(callout[0].c_func == NULL) 699Sbill goto out; 70*2442Swnj p1 = &callout[0]; 71*2442Swnj while(p1->c_time<=0 && p1->c_func!=NULL) 72*2442Swnj p1++; 73*2442Swnj p1->c_time--; 749Sbill out: 75138Sbill 76138Sbill /* 77*2442Swnj * Maintain iostat and per-process cpu statistics 78138Sbill */ 799Sbill if (!noproc) { 809Sbill s = u.u_procp->p_rssize; 819Sbill u.u_vm.vm_idsrss += s; 829Sbill if (u.u_procp->p_textp) { 839Sbill register int xrss = u.u_procp->p_textp->x_rssize; 849Sbill 859Sbill s += xrss; 869Sbill u.u_vm.vm_ixrss += xrss; 879Sbill } 889Sbill if (s > u.u_vm.vm_maxrss) 899Sbill u.u_vm.vm_maxrss = s; 90375Sbill if ((u.u_vm.vm_utime+u.u_vm.vm_stime+1)/HZ > u.u_limit[LIM_CPU]) { 91375Sbill psignal(u.u_procp, SIGXCPU); 92375Sbill if (u.u_limit[LIM_CPU] < INFINITY - 5) 93375Sbill u.u_limit[LIM_CPU] += 5; 94375Sbill } 959Sbill } 969Sbill if (USERMODE(ps)) { 979Sbill u.u_vm.vm_utime++; 989Sbill if(u.u_procp->p_nice > NZERO) 99305Sbill cpstate = CP_NICE; 100305Sbill else 101305Sbill cpstate = CP_USER; 1029Sbill } else { 103305Sbill cpstate = CP_SYS; 1049Sbill if (noproc) 105305Sbill cpstate = CP_IDLE; 1069Sbill else 1079Sbill u.u_vm.vm_stime++; 1089Sbill } 1091408Sbill cp_time[cpstate]++; 110*2442Swnj for (s = 0; s < DK_NDRIVE; s++) 111*2442Swnj if (dk_busy&(1<<s)) 112*2442Swnj dk_time[s]++; 1139Sbill if (!noproc) { 1149Sbill pp = u.u_procp; 1151399Sbill pp->p_cpticks++; 1169Sbill if(++pp->p_cpu == 0) 1179Sbill pp->p_cpu--; 1189Sbill if(pp->p_cpu % 16 == 0) { 119125Sbill (void) setpri(pp); 1209Sbill if (pp->p_pri >= PUSER) 1219Sbill pp->p_pri = pp->p_usrpri; 1229Sbill } 1239Sbill } 1249Sbill ++lbolt; 125*2442Swnj #ifdef KPROF 126*2442Swnj if (!USERMODE(ps) && !noproc) { 127*2442Swnj register int indx = ((int)pc & 0x7fffffff) / 4; 128*2442Swnj 129*2442Swnj if (indx >= 0 && indx < 20000) 130*2442Swnj if (++kcount[indx] == 0) 131*2442Swnj --kcount[indx]; 132*2442Swnj } 133*2442Swnj #endif 134*2442Swnj #if VAX==780 135*2442Swnj if (!BASEPRI(ps)) 136*2442Swnj unhang(); 137*2442Swnj #endif 138*2442Swnj setsoftclock(); 139*2442Swnj } 140*2442Swnj 141*2442Swnj /* 142*2442Swnj * Constant for decay filter for cpu usage. 143*2442Swnj */ 144*2442Swnj double ccpu = 0.95122942450071400909; /* exp(-1/20) */ 145*2442Swnj 146*2442Swnj /* 147*2442Swnj * Software clock interrupt. 148*2442Swnj * This routine is blocked by spl1(), 149*2442Swnj * which doesn't block device interrupts! 150*2442Swnj */ 151*2442Swnj softclock(pc, ps) 152*2442Swnj caddr_t pc; 153*2442Swnj { 154*2442Swnj register struct callo *p1, *p2; 155*2442Swnj register struct proc *pp; 156*2442Swnj register int a, s; 157*2442Swnj 158*2442Swnj /* 159*2442Swnj * callout 160*2442Swnj */ 161*2442Swnj if(callout[0].c_time <= 0) { 162*2442Swnj p1 = &callout[0]; 163*2442Swnj while(p1->c_func != 0 && p1->c_time <= 0) { 164*2442Swnj (*p1->c_func)(p1->c_arg); 165*2442Swnj p1++; 166*2442Swnj } 167*2442Swnj p2 = &callout[0]; 168*2442Swnj while(p2->c_func = p1->c_func) { 169*2442Swnj p2->c_time = p1->c_time; 170*2442Swnj p2->c_arg = p1->c_arg; 171*2442Swnj p1++; 172*2442Swnj p2++; 173*2442Swnj } 174*2442Swnj } 175*2442Swnj 176*2442Swnj /* 177*2442Swnj * Drain silos. 178*2442Swnj */ 179*2442Swnj #if NDH11 > 0 180*2442Swnj s = spl5(); dhtimer(); splx(s); 181*2442Swnj #endif 182*2442Swnj #if NDZ11 > 0 183*2442Swnj s = spl5(); dztimer(); splx(s); 184*2442Swnj #endif 185*2442Swnj 186*2442Swnj /* 187*2442Swnj * Run paging daemon and reschedule every 1/4 sec. 188*2442Swnj */ 1899Sbill if (lbolt % (HZ/4) == 0) { 1909Sbill vmpago(); 1919Sbill runrun++; 192*2442Swnj aston(); 1939Sbill } 194*2442Swnj 195*2442Swnj /* 196*2442Swnj * Lightning bolt every second: 197*2442Swnj * sleep timeouts 198*2442Swnj * process priority recomputation 199*2442Swnj * process %cpu averaging 200*2442Swnj * virtual memory metering 201*2442Swnj * kick swapper if processes want in 202*2442Swnj */ 2039Sbill if (lbolt >= HZ) { 2049Sbill if (BASEPRI(ps)) 2059Sbill return; 2069Sbill lbolt -= HZ; 2079Sbill ++time; 2089Sbill wakeup((caddr_t)&lbolt); 2099Sbill for(pp = &proc[0]; pp < &proc[NPROC]; pp++) 210928Sbill if (pp->p_stat && pp->p_stat!=SZOMB) { 2119Sbill if(pp->p_time != 127) 2129Sbill pp->p_time++; 2139Sbill if(pp->p_clktim) 2149Sbill if(--pp->p_clktim == 0) 215101Sbill if (pp->p_flag & STIMO) { 216101Sbill s = spl6(); 217204Sbill switch (pp->p_stat) { 218204Sbill 219204Sbill case SSLEEP: 220101Sbill setrun(pp); 221204Sbill break; 222204Sbill 223204Sbill case SSTOP: 224204Sbill unsleep(pp); 225204Sbill break; 226204Sbill } 227101Sbill pp->p_flag &= ~STIMO; 228101Sbill splx(s); 229101Sbill } else 230166Sbill psignal(pp, SIGALRM); 2319Sbill if(pp->p_stat==SSLEEP||pp->p_stat==SSTOP) 2329Sbill if (pp->p_slptime != 127) 2339Sbill pp->p_slptime++; 2341399Sbill if (pp->p_flag&SLOAD) 2351399Sbill pp->p_pctcpu = ccpu * pp->p_pctcpu + 2361399Sbill (1.0 - ccpu) * (pp->p_cpticks/(float)HZ); 2371399Sbill pp->p_cpticks = 0; 2389Sbill a = (pp->p_cpu & 0377)*SCHMAG + pp->p_nice - NZERO; 2399Sbill if(a < 0) 2409Sbill a = 0; 2419Sbill if(a > 255) 2429Sbill a = 255; 2439Sbill pp->p_cpu = a; 244125Sbill (void) setpri(pp); 2459Sbill s = spl6(); 2469Sbill if(pp->p_pri >= PUSER) { 2479Sbill if ((pp != u.u_procp || noproc) && 2489Sbill pp->p_stat == SRUN && 2499Sbill (pp->p_flag & SLOAD) && 2509Sbill pp->p_pri != pp->p_usrpri) { 2519Sbill remrq(pp); 2529Sbill pp->p_pri = pp->p_usrpri; 2539Sbill setrq(pp); 2549Sbill } else 2559Sbill pp->p_pri = pp->p_usrpri; 2569Sbill } 2579Sbill splx(s); 2589Sbill } 2599Sbill vmmeter(); 2609Sbill if(runin!=0) { 2619Sbill runin = 0; 2629Sbill wakeup((caddr_t)&runin); 2639Sbill } 2649Sbill /* 2659Sbill * If there are pages that have been cleaned, 2669Sbill * jolt the pageout daemon to process them. 2679Sbill * We do this here so that these pages will be 2689Sbill * freed if there is an abundance of memory and the 2699Sbill * daemon would not be awakened otherwise. 2709Sbill */ 2719Sbill if (bclnlist != NULL) 2729Sbill wakeup((caddr_t)&proc[2]); 2739Sbill if (USERMODE(ps)) { 2749Sbill pp = u.u_procp; 2759Sbill if (pp->p_uid) 2769Sbill if (pp->p_nice == NZERO && u.u_vm.vm_utime > 600 * HZ) 2779Sbill pp->p_nice = NZERO+4; 278125Sbill (void) setpri(pp); 2799Sbill pp->p_pri = pp->p_usrpri; 2809Sbill } 2819Sbill } 282*2442Swnj if (USERMODE(ps) && u.u_prof.pr_scale) { 283*2442Swnj u.u_procp->p_flag |= SOWEUPC; 284*2442Swnj aston(); 2859Sbill } 2869Sbill } 2879Sbill 2889Sbill /* 2899Sbill * timeout is called to arrange that 2909Sbill * fun(arg) is called in tim/HZ seconds. 2919Sbill * An entry is sorted into the callout 2929Sbill * structure. The time in each structure 2939Sbill * entry is the number of HZ's more 2949Sbill * than the previous entry. 2959Sbill * In this way, decrementing the 2969Sbill * first entry has the effect of 2979Sbill * updating all entries. 2989Sbill * 2999Sbill * The panic is there because there is nothing 3009Sbill * intelligent to be done if an entry won't fit. 3019Sbill */ 3029Sbill timeout(fun, arg, tim) 3039Sbill int (*fun)(); 3049Sbill caddr_t arg; 3059Sbill { 3062430Swnj register struct callo *p1, *p2, *p3; 3079Sbill register int t; 3089Sbill int s; 3099Sbill 3109Sbill t = tim; 3119Sbill p1 = &callout[0]; 3129Sbill s = spl7(); 3139Sbill while(p1->c_func != 0 && p1->c_time <= t) { 3149Sbill t -= p1->c_time; 3159Sbill p1++; 3169Sbill } 3179Sbill p1->c_time -= t; 3189Sbill p2 = p1; 3192430Swnj p3 = &callout[NCALL-2]; 3202430Swnj while(p2->c_func != 0) { 3212430Swnj if (p2 >= p3) 322*2442Swnj panic("timeout"); 3239Sbill p2++; 3242430Swnj } 3259Sbill while(p2 >= p1) { 3269Sbill (p2+1)->c_time = p2->c_time; 3279Sbill (p2+1)->c_func = p2->c_func; 3289Sbill (p2+1)->c_arg = p2->c_arg; 3299Sbill p2--; 3309Sbill } 3319Sbill p1->c_time = t; 3329Sbill p1->c_func = fun; 3339Sbill p1->c_arg = arg; 3349Sbill splx(s); 3359Sbill } 336