1*2647Swnj /* kern_clock.c 4.10 02/23/81 */ 29Sbill 39Sbill #include "../h/param.h" 49Sbill #include "../h/systm.h" 5329Sbill #include "../h/dk.h" 69Sbill #include "../h/callo.h" 79Sbill #include "../h/seg.h" 89Sbill #include "../h/dir.h" 99Sbill #include "../h/user.h" 109Sbill #include "../h/proc.h" 119Sbill #include "../h/reg.h" 129Sbill #include "../h/psl.h" 139Sbill #include "../h/vm.h" 149Sbill #include "../h/buf.h" 159Sbill #include "../h/text.h" 16877Sbill #include "../h/vlimit.h" 17877Sbill #include "../h/mtpr.h" 18877Sbill #include "../h/clock.h" 199Sbill 201943Swnj #include "dh.h" 211943Swnj #include "dz.h" 221559Sbill 239Sbill #define SCHMAG 9/10 249Sbill 259Sbill 269Sbill /* 272442Swnj * Hardclock is called straight from 289Sbill * the real time clock interrupt. 292442Swnj * We limit the work we do at real clock interrupt time to: 302442Swnj * reloading clock 312442Swnj * decrementing time to callouts 322442Swnj * recording cpu time usage 332450Swnj * modifying priority of current process 342442Swnj * arrange for soft clock interrupt 352442Swnj * kernel pc profiling 369Sbill * 372442Swnj * At softclock interrupt time we: 389Sbill * implement callouts 399Sbill * maintain date 409Sbill * lightning bolt wakeup (every second) 419Sbill * alarm clock signals 429Sbill * jab the scheduler 432442Swnj * 442442Swnj * On the vax softclock interrupts are implemented by 452442Swnj * software interrupts. Note that we may have multiple softclock 462442Swnj * interrupts compressed into one (due to excessive interrupt load), 472442Swnj * but that hardclock interrupts should never be lost. 489Sbill */ 499Sbill 502609Swnj /*ARGSUSED*/ 512442Swnj hardclock(pc, ps) 522450Swnj caddr_t pc; 539Sbill { 542442Swnj register struct callo *p1; 559Sbill register struct proc *pp; 562442Swnj register int s, cpstate; 579Sbill 589Sbill /* 599Sbill * reprime clock 609Sbill */ 619Sbill clkreld(); 629Sbill 639Sbill /* 642442Swnj * update callout times 659Sbill */ 669Sbill if(callout[0].c_func == NULL) 679Sbill goto out; 682442Swnj p1 = &callout[0]; 692442Swnj while(p1->c_time<=0 && p1->c_func!=NULL) 702442Swnj p1++; 712442Swnj p1->c_time--; 729Sbill out: 73138Sbill 74138Sbill /* 752442Swnj * Maintain iostat and per-process cpu statistics 76138Sbill */ 779Sbill if (!noproc) { 789Sbill s = u.u_procp->p_rssize; 799Sbill u.u_vm.vm_idsrss += s; 809Sbill if (u.u_procp->p_textp) { 819Sbill register int xrss = u.u_procp->p_textp->x_rssize; 829Sbill 839Sbill s += xrss; 849Sbill u.u_vm.vm_ixrss += xrss; 859Sbill } 869Sbill if (s > u.u_vm.vm_maxrss) 879Sbill u.u_vm.vm_maxrss = s; 88375Sbill if ((u.u_vm.vm_utime+u.u_vm.vm_stime+1)/HZ > u.u_limit[LIM_CPU]) { 89375Sbill psignal(u.u_procp, SIGXCPU); 90375Sbill if (u.u_limit[LIM_CPU] < INFINITY - 5) 91375Sbill u.u_limit[LIM_CPU] += 5; 92375Sbill } 939Sbill } 949Sbill if (USERMODE(ps)) { 959Sbill u.u_vm.vm_utime++; 969Sbill if(u.u_procp->p_nice > NZERO) 97305Sbill cpstate = CP_NICE; 98305Sbill else 99305Sbill cpstate = CP_USER; 1009Sbill } else { 101305Sbill cpstate = CP_SYS; 1029Sbill if (noproc) 103305Sbill cpstate = CP_IDLE; 1049Sbill else 1059Sbill u.u_vm.vm_stime++; 1069Sbill } 1071408Sbill cp_time[cpstate]++; 1082442Swnj for (s = 0; s < DK_NDRIVE; s++) 1092442Swnj if (dk_busy&(1<<s)) 1102442Swnj dk_time[s]++; 1119Sbill if (!noproc) { 1129Sbill pp = u.u_procp; 1131399Sbill pp->p_cpticks++; 1149Sbill if(++pp->p_cpu == 0) 1159Sbill pp->p_cpu--; 1169Sbill if(pp->p_cpu % 16 == 0) { 117125Sbill (void) setpri(pp); 1189Sbill if (pp->p_pri >= PUSER) 1199Sbill pp->p_pri = pp->p_usrpri; 1209Sbill } 1219Sbill } 1229Sbill ++lbolt; 1232442Swnj #if VAX==780 1242442Swnj if (!BASEPRI(ps)) 1252442Swnj unhang(); 1262442Swnj #endif 1272442Swnj setsoftclock(); 1282442Swnj } 1292442Swnj 1302442Swnj /* 1312442Swnj * Constant for decay filter for cpu usage. 1322442Swnj */ 1332442Swnj double ccpu = 0.95122942450071400909; /* exp(-1/20) */ 1342442Swnj 1352442Swnj /* 1362442Swnj * Software clock interrupt. 1372442Swnj * This routine is blocked by spl1(), 1382442Swnj * which doesn't block device interrupts! 1392442Swnj */ 1402609Swnj /*ARGSUSED*/ 1412442Swnj softclock(pc, ps) 1422450Swnj caddr_t pc; 1432442Swnj { 1442442Swnj register struct callo *p1, *p2; 1452442Swnj register struct proc *pp; 1462442Swnj register int a, s; 1472442Swnj 1482442Swnj /* 1492442Swnj * callout 1502442Swnj */ 1512442Swnj if(callout[0].c_time <= 0) { 1522442Swnj p1 = &callout[0]; 1532442Swnj while(p1->c_func != 0 && p1->c_time <= 0) { 1542442Swnj (*p1->c_func)(p1->c_arg); 1552442Swnj p1++; 1562442Swnj } 1572442Swnj p2 = &callout[0]; 1582442Swnj while(p2->c_func = p1->c_func) { 1592442Swnj p2->c_time = p1->c_time; 1602442Swnj p2->c_arg = p1->c_arg; 1612442Swnj p1++; 1622442Swnj p2++; 1632442Swnj } 1642442Swnj } 1652442Swnj 1662442Swnj /* 1672442Swnj * Drain silos. 1682442Swnj */ 169*2647Swnj #if NDH > 0 1702442Swnj s = spl5(); dhtimer(); splx(s); 1712442Swnj #endif 172*2647Swnj #if NDZ > 0 1732442Swnj s = spl5(); dztimer(); splx(s); 1742442Swnj #endif 1752442Swnj 1762442Swnj /* 1772450Swnj * If idling and processes are waiting to swap in, 1782450Swnj * check on them. 1792450Swnj */ 1802450Swnj if (noproc && runin) { 1812450Swnj runin = 0; 1822450Swnj wakeup((caddr_t)&runin); 1832450Swnj } 1842450Swnj 1852450Swnj /* 1862442Swnj * Run paging daemon and reschedule every 1/4 sec. 1872442Swnj */ 1889Sbill if (lbolt % (HZ/4) == 0) { 1899Sbill vmpago(); 1909Sbill runrun++; 1912442Swnj aston(); 1929Sbill } 1932442Swnj 1942442Swnj /* 1952442Swnj * Lightning bolt every second: 1962442Swnj * sleep timeouts 1972442Swnj * process priority recomputation 1982442Swnj * process %cpu averaging 1992442Swnj * virtual memory metering 2002442Swnj * kick swapper if processes want in 2012442Swnj */ 2029Sbill if (lbolt >= HZ) { 2039Sbill if (BASEPRI(ps)) 2049Sbill return; 2059Sbill lbolt -= HZ; 2069Sbill ++time; 2079Sbill wakeup((caddr_t)&lbolt); 2089Sbill for(pp = &proc[0]; pp < &proc[NPROC]; pp++) 209928Sbill if (pp->p_stat && pp->p_stat!=SZOMB) { 2109Sbill if(pp->p_time != 127) 2119Sbill pp->p_time++; 2129Sbill if(pp->p_clktim) 2139Sbill if(--pp->p_clktim == 0) 214101Sbill if (pp->p_flag & STIMO) { 215101Sbill s = spl6(); 216204Sbill switch (pp->p_stat) { 217204Sbill 218204Sbill case SSLEEP: 219101Sbill setrun(pp); 220204Sbill break; 221204Sbill 222204Sbill case SSTOP: 223204Sbill unsleep(pp); 224204Sbill break; 225204Sbill } 226101Sbill pp->p_flag &= ~STIMO; 227101Sbill splx(s); 228101Sbill } else 229166Sbill psignal(pp, SIGALRM); 2309Sbill if(pp->p_stat==SSLEEP||pp->p_stat==SSTOP) 2319Sbill if (pp->p_slptime != 127) 2329Sbill pp->p_slptime++; 2331399Sbill if (pp->p_flag&SLOAD) 2341399Sbill pp->p_pctcpu = ccpu * pp->p_pctcpu + 2351399Sbill (1.0 - ccpu) * (pp->p_cpticks/(float)HZ); 2361399Sbill pp->p_cpticks = 0; 2379Sbill a = (pp->p_cpu & 0377)*SCHMAG + pp->p_nice - NZERO; 2389Sbill if(a < 0) 2399Sbill a = 0; 2409Sbill if(a > 255) 2419Sbill a = 255; 2429Sbill pp->p_cpu = a; 243125Sbill (void) setpri(pp); 2449Sbill s = spl6(); 2459Sbill if(pp->p_pri >= PUSER) { 2469Sbill if ((pp != u.u_procp || noproc) && 2479Sbill pp->p_stat == SRUN && 2489Sbill (pp->p_flag & SLOAD) && 2499Sbill pp->p_pri != pp->p_usrpri) { 2509Sbill remrq(pp); 2519Sbill pp->p_pri = pp->p_usrpri; 2529Sbill setrq(pp); 2539Sbill } else 2549Sbill pp->p_pri = pp->p_usrpri; 2559Sbill } 2569Sbill splx(s); 2579Sbill } 2589Sbill vmmeter(); 2599Sbill if(runin!=0) { 2609Sbill runin = 0; 2619Sbill wakeup((caddr_t)&runin); 2629Sbill } 2639Sbill /* 2649Sbill * If there are pages that have been cleaned, 2659Sbill * jolt the pageout daemon to process them. 2669Sbill * We do this here so that these pages will be 2679Sbill * freed if there is an abundance of memory and the 2689Sbill * daemon would not be awakened otherwise. 2699Sbill */ 2709Sbill if (bclnlist != NULL) 2719Sbill wakeup((caddr_t)&proc[2]); 2729Sbill if (USERMODE(ps)) { 2739Sbill pp = u.u_procp; 2749Sbill if (pp->p_uid) 2759Sbill if (pp->p_nice == NZERO && u.u_vm.vm_utime > 600 * HZ) 2769Sbill pp->p_nice = NZERO+4; 277125Sbill (void) setpri(pp); 2789Sbill pp->p_pri = pp->p_usrpri; 2799Sbill } 2809Sbill } 2812442Swnj if (USERMODE(ps) && u.u_prof.pr_scale) { 2822442Swnj u.u_procp->p_flag |= SOWEUPC; 2832442Swnj aston(); 2849Sbill } 2859Sbill } 2869Sbill 2879Sbill /* 2889Sbill * timeout is called to arrange that 2899Sbill * fun(arg) is called in tim/HZ seconds. 2909Sbill * An entry is sorted into the callout 2919Sbill * structure. The time in each structure 2929Sbill * entry is the number of HZ's more 2939Sbill * than the previous entry. 2949Sbill * In this way, decrementing the 2959Sbill * first entry has the effect of 2969Sbill * updating all entries. 2979Sbill * 2989Sbill * The panic is there because there is nothing 2999Sbill * intelligent to be done if an entry won't fit. 3009Sbill */ 3019Sbill timeout(fun, arg, tim) 3022450Swnj int (*fun)(); 3032450Swnj caddr_t arg; 3049Sbill { 3052430Swnj register struct callo *p1, *p2, *p3; 3069Sbill register int t; 3079Sbill int s; 3089Sbill 3099Sbill t = tim; 3109Sbill p1 = &callout[0]; 3119Sbill s = spl7(); 3129Sbill while(p1->c_func != 0 && p1->c_time <= t) { 3139Sbill t -= p1->c_time; 3149Sbill p1++; 3159Sbill } 3169Sbill p1->c_time -= t; 3179Sbill p2 = p1; 3182430Swnj p3 = &callout[NCALL-2]; 3192430Swnj while(p2->c_func != 0) { 3202430Swnj if (p2 >= p3) 3212442Swnj panic("timeout"); 3229Sbill p2++; 3232430Swnj } 3249Sbill while(p2 >= p1) { 3259Sbill (p2+1)->c_time = p2->c_time; 3269Sbill (p2+1)->c_func = p2->c_func; 3279Sbill (p2+1)->c_arg = p2->c_arg; 3289Sbill p2--; 3299Sbill } 3309Sbill p1->c_time = t; 3319Sbill p1->c_func = fun; 3329Sbill p1->c_arg = arg; 3339Sbill splx(s); 3349Sbill } 335