1*8028Sroot /* kern_clock.c 4.37 82/09/04 */ 29Sbill 39Sbill #include "../h/param.h" 49Sbill #include "../h/systm.h" 5329Sbill #include "../h/dk.h" 62768Swnj #include "../h/callout.h" 79Sbill #include "../h/seg.h" 89Sbill #include "../h/dir.h" 99Sbill #include "../h/user.h" 10*8028Sroot #include "../h/kernel.h" 119Sbill #include "../h/proc.h" 129Sbill #include "../h/reg.h" 139Sbill #include "../h/psl.h" 149Sbill #include "../h/vm.h" 159Sbill #include "../h/buf.h" 169Sbill #include "../h/text.h" 17877Sbill #include "../h/mtpr.h" 182689Swnj #include "../h/cpu.h" 195247Sroot #include "../h/protosw.h" 207474Ssam #include "../h/socket.h" 217474Ssam #include "../net/if.h" 227490Skre #ifdef MUSH 237490Skre #include "../h/quota.h" 247490Skre #include "../h/share.h" 257490Skre #endif 269Sbill 273511Sroot #include "bk.h" 281943Swnj #include "dh.h" 291943Swnj #include "dz.h" 307305Ssam #include "ps.h" 311559Sbill 329Sbill /* 332442Swnj * Hardclock is called straight from 349Sbill * the real time clock interrupt. 352442Swnj * We limit the work we do at real clock interrupt time to: 362442Swnj * reloading clock 372442Swnj * decrementing time to callouts 382442Swnj * recording cpu time usage 392450Swnj * modifying priority of current process 402442Swnj * arrange for soft clock interrupt 412442Swnj * kernel pc profiling 429Sbill * 433110Swnj * At software (softclock) interrupt time we: 449Sbill * implement callouts 459Sbill * maintain date 469Sbill * lightning bolt wakeup (every second) 479Sbill * alarm clock signals 489Sbill * jab the scheduler 492442Swnj * 502442Swnj * On the vax softclock interrupts are implemented by 512442Swnj * software interrupts. Note that we may have multiple softclock 522442Swnj * interrupts compressed into one (due to excessive interrupt load), 532442Swnj * but that hardclock interrupts should never be lost. 549Sbill */ 557388Sroot #ifdef GPROF 567388Sroot extern int profiling; 577388Sroot extern char *s_lowpc; 587388Sroot extern u_long s_textsize; 597388Sroot extern u_short *kcount; 604968Swnj #endif 619Sbill 625247Sroot /* 635247Sroot * Protoslow is like lbolt, but for slow protocol timeouts, counting 645247Sroot * up to (hz/PR_SLOWHZ), then causing a pfslowtimo(). 655247Sroot * Protofast is like lbolt, but for fast protocol timeouts, counting 665247Sroot * up to (hz/PR_FASTHZ), then causing a pffasttimo(). 675247Sroot */ 685247Sroot int protoslow; 695247Sroot int protofast; 707474Ssam int ifnetslow; 715247Sroot 722609Swnj /*ARGSUSED*/ 732442Swnj hardclock(pc, ps) 742450Swnj caddr_t pc; 759Sbill { 762768Swnj register struct callout *p1; 779Sbill register struct proc *pp; 782442Swnj register int s, cpstate; 797490Skre extern double avenrun[]; 809Sbill 819Sbill /* 829Sbill * reprime clock 839Sbill */ 849Sbill 857305Ssam #if NPS > 0 869Sbill /* 877305Ssam * sync referesh of picture system 887305Ssam */ 897305Ssam psextsync(pc, ps); 907305Ssam #endif 917305Ssam 927305Ssam /* 932442Swnj * update callout times 949Sbill */ 953542Swnj for (p1 = calltodo.c_next; p1 && p1->c_time <= 0; p1 = p1->c_next) 963542Swnj ; 973542Swnj if (p1) 983542Swnj p1->c_time--; 99138Sbill 100138Sbill /* 1012442Swnj * Maintain iostat and per-process cpu statistics 102138Sbill */ 1039Sbill if (!noproc) { 1049Sbill s = u.u_procp->p_rssize; 105*8028Sroot u.u_ru.ru_idrss += s; 106*8028Sroot u.u_ru.ru_isrss += 0; /* XXX */ 1079Sbill if (u.u_procp->p_textp) { 1089Sbill register int xrss = u.u_procp->p_textp->x_rssize; 1099Sbill 1109Sbill s += xrss; 111*8028Sroot u.u_ru.ru_ixrss += xrss; 1129Sbill } 113*8028Sroot if (s > u.u_ru.ru_maxrss) 114*8028Sroot u.u_ru.ru_maxrss = s; 115*8028Sroot if ((u.u_ru.ru_utime.tv_sec+u.u_ru.ru_stime.tv_sec+1) > 116*8028Sroot u.u_rlimit[RLIMIT_CPU].rlim_cur) { 117375Sbill psignal(u.u_procp, SIGXCPU); 118*8028Sroot if (u.u_rlimit[RLIMIT_CPU].rlim_cur < 119*8028Sroot u.u_rlimit[RLIMIT_CPU].rlim_max) 120*8028Sroot u.u_rlimit[RLIMIT_CPU].rlim_cur += 5; 121375Sbill } 1229Sbill } 1233110Swnj /* 1243110Swnj * Update iostat information. 1253110Swnj */ 1269Sbill if (USERMODE(ps)) { 127*8028Sroot u.u_ru.ru_utime.tv_usec += 1000000/hz; 128*8028Sroot if (u.u_ru.ru_utime.tv_usec > 1000000) { 129*8028Sroot u.u_ru.ru_utime.tv_sec++; 130*8028Sroot u.u_ru.ru_utime.tv_usec -= 1000000; 131*8028Sroot } 132*8028Sroot if (u.u_procp->p_nice > NZERO) 133305Sbill cpstate = CP_NICE; 134305Sbill else 135305Sbill cpstate = CP_USER; 1369Sbill } else { 1377388Sroot #ifdef GPROF 1387388Sroot int k = pc - s_lowpc; 1397388Sroot if (profiling < 2 && k < s_textsize) 1407388Sroot kcount[k / sizeof (*kcount)]++; 1414968Swnj #endif 142305Sbill cpstate = CP_SYS; 1437315Ssam if (noproc) { 1447315Ssam if ((ps&PSL_IPL) != 0) 1457315Ssam cpstate = CP_IDLE; 146*8028Sroot } else { 147*8028Sroot u.u_ru.ru_stime.tv_usec += 1000000/hz; 148*8028Sroot if (u.u_ru.ru_stime.tv_usec > 1000000) { 149*8028Sroot u.u_ru.ru_stime.tv_sec++; 150*8028Sroot u.u_ru.ru_stime.tv_usec -= 1000000; 151*8028Sroot } 152*8028Sroot } 1539Sbill } 1541408Sbill cp_time[cpstate]++; 1552442Swnj for (s = 0; s < DK_NDRIVE; s++) 1562442Swnj if (dk_busy&(1<<s)) 1572442Swnj dk_time[s]++; 1583110Swnj /* 1593110Swnj * Adjust priority of current process. 1603110Swnj */ 1619Sbill if (!noproc) { 1629Sbill pp = u.u_procp; 1631399Sbill pp->p_cpticks++; 1649Sbill if(++pp->p_cpu == 0) 1659Sbill pp->p_cpu--; 1667490Skre #ifdef MUSH 1677490Skre pp->p_quota->q_cost += (pp->p_nice > NZERO ? 1687490Skre (shconsts.sc_tic * ((2*NZERO)-pp->p_nice)) / NZERO : 1697490Skre shconsts.sc_tic) * (((int)avenrun[0]+2)/3); 1707490Skre #endif 1713876Swnj if(pp->p_cpu % 4 == 0) { 172125Sbill (void) setpri(pp); 1739Sbill if (pp->p_pri >= PUSER) 1749Sbill pp->p_pri = pp->p_usrpri; 1759Sbill } 1769Sbill } 1773110Swnj /* 1783110Swnj * Time moves on. 1793110Swnj */ 1809Sbill ++lbolt; 1815247Sroot 1825247Sroot /* 1835247Sroot * Time moves on for protocols. 1845247Sroot */ 1857474Ssam --protoslow; --protofast; --ifnetslow; 1865247Sroot 1872689Swnj #if VAX780 1883110Swnj /* 1893110Swnj * On 780's, impelement a fast UBA watcher, 1903110Swnj * to make sure uba's don't get stuck. 1913110Swnj */ 1922872Swnj if (cpu == VAX_780 && panicstr == 0 && !BASEPRI(ps)) 1932442Swnj unhang(); 1942442Swnj #endif 1953110Swnj /* 1963110Swnj * Schedule a software interrupt for the rest 1973110Swnj * of clock activities. 1983110Swnj */ 1992442Swnj setsoftclock(); 2002442Swnj } 2012442Swnj 2022442Swnj /* 2033876Swnj * The digital decay cpu usage priority assignment is scaled to run in 2043876Swnj * time as expanded by the 1 minute load average. Each second we 2053876Swnj * multiply the the previous cpu usage estimate by 2063876Swnj * nrscale*avenrun[0] 2073876Swnj * The following relates the load average to the period over which 2083876Swnj * cpu usage is 90% forgotten: 2093876Swnj * loadav 1 5 seconds 2103876Swnj * loadav 5 24 seconds 2113876Swnj * loadav 10 47 seconds 2123876Swnj * loadav 20 93 seconds 2133876Swnj * This is a great improvement on the previous algorithm which 2143876Swnj * decayed the priorities by a constant, and decayed away all knowledge 2153876Swnj * of previous activity in about 20 seconds. Under heavy load, 2163876Swnj * the previous algorithm degenerated to round-robin with poor response 2173876Swnj * time when there was a high load average. 2182442Swnj */ 2193984Sroot #undef ave 2203876Swnj #define ave(a,b) ((int)(((int)(a*b))/(b+1))) 2213876Swnj int nrscale = 2; 2223876Swnj double avenrun[]; 2233110Swnj 2243110Swnj /* 2253110Swnj * Constant for decay filter for cpu usage field 2263110Swnj * in process table (used by ps au). 2273110Swnj */ 2282442Swnj double ccpu = 0.95122942450071400909; /* exp(-1/20) */ 2292442Swnj 2307490Skre #ifdef MELB 2312442Swnj /* 2327490Skre * Automatic niceness rate & max constants 2337490Skre */ 2347490Skre #define MAXNICE (8 + NZERO) /* maximum auto nice value */ 2357490Skre #define NFACT (40 * hz) /* nice++ every 40 secs cpu+sys time */ 2367490Skre #endif 2377490Skre 2387490Skre /* 2392442Swnj * Software clock interrupt. 2403110Swnj * This routine runs at lower priority than device interrupts. 2412442Swnj */ 2422609Swnj /*ARGSUSED*/ 2432442Swnj softclock(pc, ps) 2442450Swnj caddr_t pc; 2452442Swnj { 2463615Sroot register struct callout *p1; 2472442Swnj register struct proc *pp; 2482442Swnj register int a, s; 2493542Swnj caddr_t arg; 2503542Swnj int (*func)(); 2512442Swnj 2522442Swnj /* 2532872Swnj * Perform callouts (but not after panic's!) 2542442Swnj */ 2553542Swnj if (panicstr == 0) { 2563542Swnj for (;;) { 2573542Swnj s = spl7(); 2584250Swnj if ((p1 = calltodo.c_next) == 0 || p1->c_time > 0) { 2594250Swnj splx(s); 2603542Swnj break; 2614250Swnj } 2623542Swnj calltodo.c_next = p1->c_next; 2633542Swnj arg = p1->c_arg; 2643542Swnj func = p1->c_func; 2653542Swnj p1->c_next = callfree; 2663542Swnj callfree = p1; 2673542Swnj (void) splx(s); 2683542Swnj (*func)(arg); 2692442Swnj } 2702442Swnj } 2712442Swnj 2722442Swnj /* 2732442Swnj * Drain silos. 2742442Swnj */ 2752647Swnj #if NDH > 0 2762442Swnj s = spl5(); dhtimer(); splx(s); 2772442Swnj #endif 2782647Swnj #if NDZ > 0 2792442Swnj s = spl5(); dztimer(); splx(s); 2802442Swnj #endif 2812442Swnj 2822442Swnj /* 2832450Swnj * If idling and processes are waiting to swap in, 2842450Swnj * check on them. 2852450Swnj */ 2862450Swnj if (noproc && runin) { 2872450Swnj runin = 0; 2882450Swnj wakeup((caddr_t)&runin); 2892450Swnj } 2902450Swnj 2912450Swnj /* 2923876Swnj * Run paging daemon every 1/4 sec. 2932442Swnj */ 2942768Swnj if (lbolt % (hz/4) == 0) { 2959Sbill vmpago(); 2963876Swnj } 2973876Swnj 2983876Swnj /* 2993876Swnj * Reschedule every 1/10 sec. 3003876Swnj */ 3013876Swnj if (lbolt % (hz/10) == 0) { 3029Sbill runrun++; 3032442Swnj aston(); 3049Sbill } 3052442Swnj 3062442Swnj /* 3075247Sroot * Run network slow and fast timeouts. 3085247Sroot */ 3095264Swnj if (protofast <= 0) { 3105264Swnj protofast = hz / PR_FASTHZ; 3115247Sroot pffasttimo(); 3125264Swnj } 3135264Swnj if (protoslow <= 0) { 3145264Swnj protoslow = hz / PR_SLOWHZ; 3155247Sroot pfslowtimo(); 3165264Swnj } 3177474Ssam if (ifnetslow <= 0) { 3187474Ssam ifnetslow = hz / IFNET_SLOWHZ; 3197474Ssam if_slowtimo(); 3207474Ssam } 3215247Sroot 3225247Sroot /* 3232442Swnj * Lightning bolt every second: 3242442Swnj * sleep timeouts 3252442Swnj * process priority recomputation 3262442Swnj * process %cpu averaging 3272442Swnj * virtual memory metering 3282442Swnj * kick swapper if processes want in 3292442Swnj */ 3302768Swnj if (lbolt >= hz) { 3312872Swnj /* 3323110Swnj * This doesn't mean much on VAX since we run at 3332872Swnj * software interrupt time... if hardclock() 3342872Swnj * calls softclock() directly, it prevents 3352872Swnj * this code from running when the priority 3362872Swnj * was raised when the clock interrupt occurred. 3372872Swnj */ 3389Sbill if (BASEPRI(ps)) 3399Sbill return; 3402872Swnj 3412872Swnj /* 3422872Swnj * If we didn't run a few times because of 3432872Swnj * long blockage at high ipl, we don't 3442872Swnj * really want to run this code several times, 3452872Swnj * so squish out all multiples of hz here. 3462872Swnj */ 3477315Ssam s = spl6(); 348*8028Sroot time.tv_sec += lbolt / hz; lbolt %= hz; 3497315Ssam splx(s); 3502872Swnj 3512872Swnj /* 3522872Swnj * Wakeup lightning bolt sleepers. 3532872Swnj * Processes sleep on lbolt to wait 3542872Swnj * for short amounts of time (e.g. 1 second). 3552872Swnj */ 3569Sbill wakeup((caddr_t)&lbolt); 3572872Swnj 3582872Swnj /* 3592872Swnj * Recompute process priority and process 3602872Swnj * sleep() system calls as well as internal 3612872Swnj * sleeps with timeouts (tsleep() kernel routine). 3622872Swnj */ 3632872Swnj for (pp = proc; pp < procNPROC; pp++) 364928Sbill if (pp->p_stat && pp->p_stat!=SZOMB) { 3657490Skre #ifdef MUSH 3662872Swnj /* 3677490Skre * Charge process for memory in use 3687490Skre */ 3697490Skre if (pp->p_quota->q_uid) 3707490Skre pp->p_quota->q_cost += 3717490Skre shconsts.sc_click * pp->p_rssize; 3727490Skre #endif 3737490Skre /* 3742872Swnj * Increase resident time, to max of 127 seconds 3752872Swnj * (it is kept in a character.) For 3762872Swnj * loaded processes this is time in core; for 3772872Swnj * swapped processes, this is time on drum. 3782872Swnj */ 3792872Swnj if (pp->p_time != 127) 3809Sbill pp->p_time++; 3812872Swnj /* 382*8028Sroot * Time processes out of select. 3832872Swnj */ 384*8028Sroot if (timerisset(&pp->p_seltimer) && 385*8028Sroot --pp->p_seltimer.tv_sec <= 0) { 386*8028Sroot timerclear(&pp->p_seltimer); 387*8028Sroot s = spl6(); 388*8028Sroot switch (pp->p_stat) { 389204Sbill 390*8028Sroot case SSLEEP: 391*8028Sroot setrun(pp); 392*8028Sroot break; 393204Sbill 394*8028Sroot case SSTOP: 395*8028Sroot unsleep(pp); 396*8028Sroot break; 397*8028Sroot } 398*8028Sroot splx(s); 399*8028Sroot } 400*8028Sroot if (timerisset(&pp->p_realtimer.itimer_value) && 401*8028Sroot itimerdecr(&pp->p_realtimer, 1000000) == 0) 402*8028Sroot psignal(pp, SIGALRM); 403*8028Sroot 4042872Swnj /* 4052872Swnj * If process is blocked, increment computed 4062872Swnj * time blocked. This is used in swap scheduling. 4072872Swnj */ 4082872Swnj if (pp->p_stat==SSLEEP || pp->p_stat==SSTOP) 4099Sbill if (pp->p_slptime != 127) 4109Sbill pp->p_slptime++; 4112872Swnj /* 4122872Swnj * Update digital filter estimation of process 4132872Swnj * cpu utilization for loaded processes. 4142872Swnj */ 4151399Sbill if (pp->p_flag&SLOAD) 4161399Sbill pp->p_pctcpu = ccpu * pp->p_pctcpu + 4172768Swnj (1.0 - ccpu) * (pp->p_cpticks/(float)hz); 4182872Swnj /* 4192872Swnj * Recompute process priority. The number p_cpu 4202872Swnj * is a weighted estimate of cpu time consumed. 4212872Swnj * A process which consumes cpu time has this 4222872Swnj * increase regularly. We here decrease it by 4233876Swnj * a fraction based on load average giving a digital 4243876Swnj * decay filter which damps out in about 5 seconds 4253876Swnj * when seconds are measured in time expanded by the 4263876Swnj * load average. 4272872Swnj * 4282872Swnj * If a process is niced, then the nice directly 4292872Swnj * affects the new priority. The final priority 4302872Swnj * is in the range 0 to 255, to fit in a character. 4312872Swnj */ 4321399Sbill pp->p_cpticks = 0; 4337490Skre #ifdef MUSH 4343876Swnj a = ave((pp->p_cpu & 0377), avenrun[0]*nrscale) + 4357490Skre pp->p_nice - NZERO + pp->p_quota->q_nice; 4367490Skre #else 4377490Skre a = ave((pp->p_cpu & 0377), avenrun[0]*nrscale) + 4383876Swnj pp->p_nice - NZERO; 4397490Skre #endif 4402872Swnj if (a < 0) 4419Sbill a = 0; 4422872Swnj if (a > 255) 4439Sbill a = 255; 4449Sbill pp->p_cpu = a; 445125Sbill (void) setpri(pp); 4462872Swnj /* 4472872Swnj * Now have computed new process priority 4482872Swnj * in p->p_usrpri. Carefully change p->p_pri. 4492872Swnj * A process is on a run queue associated with 4502872Swnj * this priority, so we must block out process 4512872Swnj * state changes during the transition. 4522872Swnj */ 4539Sbill s = spl6(); 4542872Swnj if (pp->p_pri >= PUSER) { 4559Sbill if ((pp != u.u_procp || noproc) && 4569Sbill pp->p_stat == SRUN && 4579Sbill (pp->p_flag & SLOAD) && 4589Sbill pp->p_pri != pp->p_usrpri) { 4599Sbill remrq(pp); 4609Sbill pp->p_pri = pp->p_usrpri; 4619Sbill setrq(pp); 4629Sbill } else 4639Sbill pp->p_pri = pp->p_usrpri; 4649Sbill } 4659Sbill splx(s); 4669Sbill } 4672872Swnj 4682872Swnj /* 4692872Swnj * Perform virtual memory metering. 4702872Swnj */ 4719Sbill vmmeter(); 4722872Swnj 4732872Swnj /* 4742872Swnj * If the swap process is trying to bring 4752872Swnj * a process in, have it look again to see 4762872Swnj * if it is possible now. 4772872Swnj */ 4782872Swnj if (runin!=0) { 4799Sbill runin = 0; 4809Sbill wakeup((caddr_t)&runin); 4819Sbill } 4822872Swnj 4839Sbill /* 4849Sbill * If there are pages that have been cleaned, 4859Sbill * jolt the pageout daemon to process them. 4869Sbill * We do this here so that these pages will be 4879Sbill * freed if there is an abundance of memory and the 4889Sbill * daemon would not be awakened otherwise. 4899Sbill */ 4909Sbill if (bclnlist != NULL) 4919Sbill wakeup((caddr_t)&proc[2]); 4922872Swnj 4937490Skre #ifdef MELB 4942872Swnj /* 4957490Skre * If a process was running, see if time to make it nicer 4967490Skre */ 4977490Skre if (!noproc) { 4987490Skre pp = u.u_procp; 4997490Skre if (pp->p_uid 5007490Skre #ifdef MUSH 5017490Skre && !(pp->p_flag & SLOGIN) 5027490Skre #else 5037490Skre /* this is definitely not good enough */ 5047490Skre && (pp->p_pid != pp->p_pgrp || pp->p_ppid != 1) 5057490Skre #endif 506*8028Sroot && (u.u_ru.ru_utime + u.u_ru.ru_stime) > 5077490Skre (pp->p_nice-NZERO+1)*NFACT 5087490Skre && pp->p_nice >= NZERO 5097490Skre && pp->p_nice < MAXNICE 5107490Skre ) { 5117490Skre pp->p_nice++; 5127490Skre (void) setpri(pp); 5137490Skre pp->p_pri = pp->p_usrpri; 5147490Skre } 5157490Skre } 5167490Skre #else 5177490Skre /* 5182872Swnj * If the trap occurred from usermode, 5192872Swnj * then check to see if it has now been 5202872Swnj * running more than 10 minutes of user time 5212872Swnj * and should thus run with reduced priority 5222872Swnj * to give other processes a chance. 5232872Swnj */ 5249Sbill if (USERMODE(ps)) { 5259Sbill pp = u.u_procp; 5262872Swnj if (pp->p_uid && pp->p_nice == NZERO && 527*8028Sroot u.u_ru.ru_utime.tv_sec > 600) 5282872Swnj pp->p_nice = NZERO+4; 529125Sbill (void) setpri(pp); 5309Sbill pp->p_pri = pp->p_usrpri; 5319Sbill } 5327490Skre #endif 5339Sbill } 5342872Swnj /* 5352872Swnj * If trapped user-mode, give it a profiling tick. 5362872Swnj */ 537*8028Sroot if (USERMODE(ps) && 538*8028Sroot timerisset(&u.u_timer[ITIMER_VIRTUAL].itimer_value) && 539*8028Sroot itimerdecr(&u.u_timer[ITIMER_VIRTUAL].itimer_value, 1000000/hz) == 0) 540*8028Sroot psignal(u.u_procp, SIGPROF); 5419Sbill } 5429Sbill 5439Sbill /* 5443110Swnj * Timeout is called to arrange that 5452768Swnj * fun(arg) is called in tim/hz seconds. 5463542Swnj * An entry is linked into the callout 5473110Swnj * structure. The time in each structure 5482768Swnj * entry is the number of hz's more 5499Sbill * than the previous entry. 5509Sbill * In this way, decrementing the 5519Sbill * first entry has the effect of 5529Sbill * updating all entries. 5539Sbill * 5549Sbill * The panic is there because there is nothing 5559Sbill * intelligent to be done if an entry won't fit. 5569Sbill */ 5579Sbill timeout(fun, arg, tim) 5582450Swnj int (*fun)(); 5592450Swnj caddr_t arg; 5609Sbill { 5613542Swnj register struct callout *p1, *p2, *pnew; 5629Sbill register int t; 5639Sbill int s; 5649Sbill 5659Sbill t = tim; 5669Sbill s = spl7(); 5673542Swnj pnew = callfree; 5683542Swnj if (pnew == NULL) 5693542Swnj panic("timeout table overflow"); 5703542Swnj callfree = pnew->c_next; 5713542Swnj pnew->c_arg = arg; 5723542Swnj pnew->c_func = fun; 5733542Swnj for (p1 = &calltodo; (p2 = p1->c_next) && p2->c_time < t; p1 = p2) 5743542Swnj t -= p2->c_time; 5753542Swnj p1->c_next = pnew; 5763542Swnj pnew->c_next = p2; 5773542Swnj pnew->c_time = t; 5783542Swnj if (p2) 5793542Swnj p2->c_time -= t; 5809Sbill splx(s); 5819Sbill } 5827305Ssam 5837814Sroot #ifdef notdef 5847305Ssam /* 5857305Ssam * untimeout is called to remove a function timeout call 5867305Ssam * from the callout structure. 5877305Ssam */ 5887305Ssam untimeout (fun, arg) 5897305Ssam int (*fun)(); 5907305Ssam caddr_t arg; 5917305Ssam { 5927305Ssam 5937305Ssam register struct callout *p1, *p2; 5947305Ssam register int s; 5957305Ssam 5967305Ssam s = spl7(); 5977305Ssam for (p1 = &calltodo; (p2 = p1->c_next) != 0; p1 = p2) { 5987305Ssam if (p2->c_func == fun && p2->c_arg == arg) { 5997305Ssam if (p2->c_next) 6007305Ssam p2->c_next->c_time += p2->c_time; 6017305Ssam p1->c_next = p2->c_next; 6027305Ssam p2->c_next = callfree; 6037305Ssam callfree = p2; 6047305Ssam break; 6057305Ssam } 6067305Ssam } 6077305Ssam splx(s); 6087305Ssam } 6097814Sroot #endif 610