1*2768Swnj /* kern_clock.c 4.14 02/27/81 */ 29Sbill 39Sbill #include "../h/param.h" 49Sbill #include "../h/systm.h" 5329Sbill #include "../h/dk.h" 6*2768Swnj #include "../h/callout.h" 79Sbill #include "../h/seg.h" 89Sbill #include "../h/dir.h" 99Sbill #include "../h/user.h" 109Sbill #include "../h/proc.h" 119Sbill #include "../h/reg.h" 129Sbill #include "../h/psl.h" 139Sbill #include "../h/vm.h" 149Sbill #include "../h/buf.h" 159Sbill #include "../h/text.h" 16877Sbill #include "../h/vlimit.h" 17877Sbill #include "../h/mtpr.h" 18877Sbill #include "../h/clock.h" 192689Swnj #include "../h/cpu.h" 209Sbill 211943Swnj #include "dh.h" 221943Swnj #include "dz.h" 231559Sbill 249Sbill #define SCHMAG 9/10 259Sbill 269Sbill 279Sbill /* 282442Swnj * Hardclock is called straight from 299Sbill * the real time clock interrupt. 302442Swnj * We limit the work we do at real clock interrupt time to: 312442Swnj * reloading clock 322442Swnj * decrementing time to callouts 332442Swnj * recording cpu time usage 342450Swnj * modifying priority of current process 352442Swnj * arrange for soft clock interrupt 362442Swnj * kernel pc profiling 379Sbill * 382442Swnj * At softclock interrupt time we: 399Sbill * implement callouts 409Sbill * maintain date 419Sbill * lightning bolt wakeup (every second) 429Sbill * alarm clock signals 439Sbill * jab the scheduler 442442Swnj * 452442Swnj * On the vax softclock interrupts are implemented by 462442Swnj * software interrupts. Note that we may have multiple softclock 472442Swnj * interrupts compressed into one (due to excessive interrupt load), 482442Swnj * but that hardclock interrupts should never be lost. 499Sbill */ 509Sbill 512609Swnj /*ARGSUSED*/ 522442Swnj hardclock(pc, ps) 532450Swnj caddr_t pc; 549Sbill { 55*2768Swnj register struct callout *p1; 569Sbill register struct proc *pp; 572442Swnj register int s, cpstate; 589Sbill 599Sbill /* 609Sbill * reprime clock 619Sbill */ 629Sbill clkreld(); 639Sbill 649Sbill /* 652442Swnj * update callout times 669Sbill */ 679Sbill if(callout[0].c_func == NULL) 689Sbill goto out; 692442Swnj p1 = &callout[0]; 702442Swnj while(p1->c_time<=0 && p1->c_func!=NULL) 712442Swnj p1++; 722442Swnj p1->c_time--; 739Sbill out: 74138Sbill 75138Sbill /* 762442Swnj * Maintain iostat and per-process cpu statistics 77138Sbill */ 789Sbill if (!noproc) { 799Sbill s = u.u_procp->p_rssize; 809Sbill u.u_vm.vm_idsrss += s; 819Sbill if (u.u_procp->p_textp) { 829Sbill register int xrss = u.u_procp->p_textp->x_rssize; 839Sbill 849Sbill s += xrss; 859Sbill u.u_vm.vm_ixrss += xrss; 869Sbill } 879Sbill if (s > u.u_vm.vm_maxrss) 889Sbill u.u_vm.vm_maxrss = s; 89*2768Swnj if ((u.u_vm.vm_utime+u.u_vm.vm_stime+1)/hz > u.u_limit[LIM_CPU]) { 90375Sbill psignal(u.u_procp, SIGXCPU); 91375Sbill if (u.u_limit[LIM_CPU] < INFINITY - 5) 92375Sbill u.u_limit[LIM_CPU] += 5; 93375Sbill } 949Sbill } 959Sbill if (USERMODE(ps)) { 969Sbill u.u_vm.vm_utime++; 979Sbill if(u.u_procp->p_nice > NZERO) 98305Sbill cpstate = CP_NICE; 99305Sbill else 100305Sbill cpstate = CP_USER; 1019Sbill } else { 102305Sbill cpstate = CP_SYS; 1039Sbill if (noproc) 104305Sbill cpstate = CP_IDLE; 1059Sbill else 1069Sbill u.u_vm.vm_stime++; 1079Sbill } 1081408Sbill cp_time[cpstate]++; 1092442Swnj for (s = 0; s < DK_NDRIVE; s++) 1102442Swnj if (dk_busy&(1<<s)) 1112442Swnj dk_time[s]++; 1129Sbill if (!noproc) { 1139Sbill pp = u.u_procp; 1141399Sbill pp->p_cpticks++; 1159Sbill if(++pp->p_cpu == 0) 1169Sbill pp->p_cpu--; 1179Sbill if(pp->p_cpu % 16 == 0) { 118125Sbill (void) setpri(pp); 1199Sbill if (pp->p_pri >= PUSER) 1209Sbill pp->p_pri = pp->p_usrpri; 1219Sbill } 1229Sbill } 1239Sbill ++lbolt; 1242689Swnj #if VAX780 1252689Swnj if (cpu == VAX_780 && !BASEPRI(ps)) 1262442Swnj unhang(); 1272442Swnj #endif 1282442Swnj setsoftclock(); 1292442Swnj } 1302442Swnj 1312442Swnj /* 1322442Swnj * Constant for decay filter for cpu usage. 1332442Swnj */ 1342442Swnj double ccpu = 0.95122942450071400909; /* exp(-1/20) */ 1352442Swnj 1362442Swnj /* 1372442Swnj * Software clock interrupt. 1382442Swnj * This routine is blocked by spl1(), 1392442Swnj * which doesn't block device interrupts! 1402442Swnj */ 1412609Swnj /*ARGSUSED*/ 1422442Swnj softclock(pc, ps) 1432450Swnj caddr_t pc; 1442442Swnj { 145*2768Swnj register struct callout *p1, *p2; 1462442Swnj register struct proc *pp; 1472442Swnj register int a, s; 1482442Swnj 1492442Swnj /* 1502442Swnj * callout 1512442Swnj */ 1522442Swnj if(callout[0].c_time <= 0) { 1532442Swnj p1 = &callout[0]; 1542442Swnj while(p1->c_func != 0 && p1->c_time <= 0) { 1552442Swnj (*p1->c_func)(p1->c_arg); 1562442Swnj p1++; 1572442Swnj } 1582442Swnj p2 = &callout[0]; 1592442Swnj while(p2->c_func = p1->c_func) { 1602442Swnj p2->c_time = p1->c_time; 1612442Swnj p2->c_arg = p1->c_arg; 1622442Swnj p1++; 1632442Swnj p2++; 1642442Swnj } 1652442Swnj } 1662442Swnj 1672442Swnj /* 1682442Swnj * Drain silos. 1692442Swnj */ 1702647Swnj #if NDH > 0 1712442Swnj s = spl5(); dhtimer(); splx(s); 1722442Swnj #endif 1732647Swnj #if NDZ > 0 1742442Swnj s = spl5(); dztimer(); splx(s); 1752442Swnj #endif 1762442Swnj 1772442Swnj /* 1782450Swnj * If idling and processes are waiting to swap in, 1792450Swnj * check on them. 1802450Swnj */ 1812450Swnj if (noproc && runin) { 1822450Swnj runin = 0; 1832450Swnj wakeup((caddr_t)&runin); 1842450Swnj } 1852450Swnj 1862450Swnj /* 1872442Swnj * Run paging daemon and reschedule every 1/4 sec. 1882442Swnj */ 189*2768Swnj if (lbolt % (hz/4) == 0) { 1909Sbill vmpago(); 1919Sbill runrun++; 1922442Swnj aston(); 1939Sbill } 1942442Swnj 1952442Swnj /* 1962442Swnj * Lightning bolt every second: 1972442Swnj * sleep timeouts 1982442Swnj * process priority recomputation 1992442Swnj * process %cpu averaging 2002442Swnj * virtual memory metering 2012442Swnj * kick swapper if processes want in 2022442Swnj */ 203*2768Swnj if (lbolt >= hz) { 2049Sbill if (BASEPRI(ps)) 2059Sbill return; 206*2768Swnj lbolt -= hz; 2079Sbill ++time; 2089Sbill wakeup((caddr_t)&lbolt); 2092740Swnj for(pp = proc; pp < procNPROC; pp++) 210928Sbill if (pp->p_stat && pp->p_stat!=SZOMB) { 2119Sbill if(pp->p_time != 127) 2129Sbill pp->p_time++; 2139Sbill if(pp->p_clktim) 2149Sbill if(--pp->p_clktim == 0) 215101Sbill if (pp->p_flag & STIMO) { 216101Sbill s = spl6(); 217204Sbill switch (pp->p_stat) { 218204Sbill 219204Sbill case SSLEEP: 220101Sbill setrun(pp); 221204Sbill break; 222204Sbill 223204Sbill case SSTOP: 224204Sbill unsleep(pp); 225204Sbill break; 226204Sbill } 227101Sbill pp->p_flag &= ~STIMO; 228101Sbill splx(s); 229101Sbill } else 230166Sbill psignal(pp, SIGALRM); 2319Sbill if(pp->p_stat==SSLEEP||pp->p_stat==SSTOP) 2329Sbill if (pp->p_slptime != 127) 2339Sbill pp->p_slptime++; 2341399Sbill if (pp->p_flag&SLOAD) 2351399Sbill pp->p_pctcpu = ccpu * pp->p_pctcpu + 236*2768Swnj (1.0 - ccpu) * (pp->p_cpticks/(float)hz); 2371399Sbill pp->p_cpticks = 0; 2389Sbill a = (pp->p_cpu & 0377)*SCHMAG + pp->p_nice - NZERO; 2399Sbill if(a < 0) 2409Sbill a = 0; 2419Sbill if(a > 255) 2429Sbill a = 255; 2439Sbill pp->p_cpu = a; 244125Sbill (void) setpri(pp); 2459Sbill s = spl6(); 2469Sbill if(pp->p_pri >= PUSER) { 2479Sbill if ((pp != u.u_procp || noproc) && 2489Sbill pp->p_stat == SRUN && 2499Sbill (pp->p_flag & SLOAD) && 2509Sbill pp->p_pri != pp->p_usrpri) { 2519Sbill remrq(pp); 2529Sbill pp->p_pri = pp->p_usrpri; 2539Sbill setrq(pp); 2549Sbill } else 2559Sbill pp->p_pri = pp->p_usrpri; 2569Sbill } 2579Sbill splx(s); 2589Sbill } 2599Sbill vmmeter(); 2609Sbill if(runin!=0) { 2619Sbill runin = 0; 2629Sbill wakeup((caddr_t)&runin); 2639Sbill } 2649Sbill /* 2659Sbill * If there are pages that have been cleaned, 2669Sbill * jolt the pageout daemon to process them. 2679Sbill * We do this here so that these pages will be 2689Sbill * freed if there is an abundance of memory and the 2699Sbill * daemon would not be awakened otherwise. 2709Sbill */ 2719Sbill if (bclnlist != NULL) 2729Sbill wakeup((caddr_t)&proc[2]); 2739Sbill if (USERMODE(ps)) { 2749Sbill pp = u.u_procp; 2759Sbill if (pp->p_uid) 276*2768Swnj if (pp->p_nice == NZERO && u.u_vm.vm_utime > 600 * hz) 2779Sbill pp->p_nice = NZERO+4; 278125Sbill (void) setpri(pp); 2799Sbill pp->p_pri = pp->p_usrpri; 2809Sbill } 2819Sbill } 2822442Swnj if (USERMODE(ps) && u.u_prof.pr_scale) { 2832442Swnj u.u_procp->p_flag |= SOWEUPC; 2842442Swnj aston(); 2859Sbill } 2869Sbill } 2879Sbill 2889Sbill /* 2899Sbill * timeout is called to arrange that 290*2768Swnj * fun(arg) is called in tim/hz seconds. 2919Sbill * An entry is sorted into the callout 2929Sbill * structure. The time in each structure 293*2768Swnj * entry is the number of hz's more 2949Sbill * than the previous entry. 2959Sbill * In this way, decrementing the 2969Sbill * first entry has the effect of 2979Sbill * updating all entries. 2989Sbill * 2999Sbill * The panic is there because there is nothing 3009Sbill * intelligent to be done if an entry won't fit. 3019Sbill */ 3029Sbill timeout(fun, arg, tim) 3032450Swnj int (*fun)(); 3042450Swnj caddr_t arg; 3059Sbill { 306*2768Swnj register struct callout *p1, *p2, *p3; 3079Sbill register int t; 3089Sbill int s; 3099Sbill 3109Sbill t = tim; 3119Sbill p1 = &callout[0]; 3129Sbill s = spl7(); 3139Sbill while(p1->c_func != 0 && p1->c_time <= t) { 3149Sbill t -= p1->c_time; 3159Sbill p1++; 3169Sbill } 3179Sbill p1->c_time -= t; 3189Sbill p2 = p1; 319*2768Swnj p3 = callout+(ncallout-2); 3202430Swnj while(p2->c_func != 0) { 3212430Swnj if (p2 >= p3) 3222442Swnj panic("timeout"); 3239Sbill p2++; 3242430Swnj } 3259Sbill while(p2 >= p1) { 3269Sbill (p2+1)->c_time = p2->c_time; 3279Sbill (p2+1)->c_func = p2->c_func; 3289Sbill (p2+1)->c_arg = p2->c_arg; 3299Sbill p2--; 3309Sbill } 3319Sbill p1->c_time = t; 3329Sbill p1->c_func = fun; 3339Sbill p1->c_arg = arg; 3349Sbill splx(s); 3359Sbill } 336