1*7490Skre /* kern_clock.c 4.35 82/07/22 */ 29Sbill 39Sbill #include "../h/param.h" 49Sbill #include "../h/systm.h" 5329Sbill #include "../h/dk.h" 62768Swnj #include "../h/callout.h" 79Sbill #include "../h/seg.h" 89Sbill #include "../h/dir.h" 99Sbill #include "../h/user.h" 109Sbill #include "../h/proc.h" 119Sbill #include "../h/reg.h" 129Sbill #include "../h/psl.h" 139Sbill #include "../h/vm.h" 149Sbill #include "../h/buf.h" 159Sbill #include "../h/text.h" 16877Sbill #include "../h/vlimit.h" 17877Sbill #include "../h/mtpr.h" 18877Sbill #include "../h/clock.h" 192689Swnj #include "../h/cpu.h" 205247Sroot #include "../h/protosw.h" 217474Ssam #include "../h/socket.h" 227474Ssam #include "../net/if.h" 23*7490Skre #ifdef MUSH 24*7490Skre #include "../h/quota.h" 25*7490Skre #include "../h/share.h" 26*7490Skre #endif 279Sbill 283511Sroot #include "bk.h" 291943Swnj #include "dh.h" 301943Swnj #include "dz.h" 317305Ssam #include "ps.h" 321559Sbill 339Sbill /* 342442Swnj * Hardclock is called straight from 359Sbill * the real time clock interrupt. 362442Swnj * We limit the work we do at real clock interrupt time to: 372442Swnj * reloading clock 382442Swnj * decrementing time to callouts 392442Swnj * recording cpu time usage 402450Swnj * modifying priority of current process 412442Swnj * arrange for soft clock interrupt 422442Swnj * kernel pc profiling 439Sbill * 443110Swnj * At software (softclock) interrupt time we: 459Sbill * implement callouts 469Sbill * maintain date 479Sbill * lightning bolt wakeup (every second) 489Sbill * alarm clock signals 499Sbill * jab the scheduler 502442Swnj * 512442Swnj * On the vax softclock interrupts are implemented by 522442Swnj * software interrupts. Note that we may have multiple softclock 532442Swnj * interrupts compressed into one (due to excessive interrupt load), 542442Swnj * but that hardclock interrupts should never be lost. 559Sbill */ 567388Sroot #ifdef GPROF 577388Sroot extern int profiling; 587388Sroot extern char *s_lowpc; 597388Sroot extern u_long s_textsize; 607388Sroot extern u_short *kcount; 614968Swnj #endif 629Sbill 635247Sroot /* 645247Sroot * Protoslow is like lbolt, but for slow protocol timeouts, counting 655247Sroot * up to (hz/PR_SLOWHZ), then causing a pfslowtimo(). 665247Sroot * Protofast is like lbolt, but for fast protocol timeouts, counting 675247Sroot * up to (hz/PR_FASTHZ), then causing a pffasttimo(). 685247Sroot */ 695247Sroot int protoslow; 705247Sroot int protofast; 717474Ssam int ifnetslow; 725247Sroot 732609Swnj /*ARGSUSED*/ 742442Swnj hardclock(pc, ps) 752450Swnj caddr_t pc; 769Sbill { 772768Swnj register struct callout *p1; 789Sbill register struct proc *pp; 792442Swnj register int s, cpstate; 80*7490Skre extern double avenrun[]; 819Sbill 829Sbill /* 839Sbill * reprime clock 849Sbill */ 859Sbill clkreld(); 869Sbill 877305Ssam #if NPS > 0 889Sbill /* 897305Ssam * sync referesh of picture system 907305Ssam */ 917305Ssam psextsync(pc, ps); 927305Ssam #endif 937305Ssam 947305Ssam /* 952442Swnj * update callout times 969Sbill */ 973542Swnj for (p1 = calltodo.c_next; p1 && p1->c_time <= 0; p1 = p1->c_next) 983542Swnj ; 993542Swnj if (p1) 1003542Swnj p1->c_time--; 101138Sbill 102138Sbill /* 1032442Swnj * Maintain iostat and per-process cpu statistics 104138Sbill */ 1059Sbill if (!noproc) { 1069Sbill s = u.u_procp->p_rssize; 1079Sbill u.u_vm.vm_idsrss += s; 1089Sbill if (u.u_procp->p_textp) { 1099Sbill register int xrss = u.u_procp->p_textp->x_rssize; 1109Sbill 1119Sbill s += xrss; 1129Sbill u.u_vm.vm_ixrss += xrss; 1139Sbill } 1149Sbill if (s > u.u_vm.vm_maxrss) 1159Sbill u.u_vm.vm_maxrss = s; 1162768Swnj if ((u.u_vm.vm_utime+u.u_vm.vm_stime+1)/hz > u.u_limit[LIM_CPU]) { 117375Sbill psignal(u.u_procp, SIGXCPU); 118375Sbill if (u.u_limit[LIM_CPU] < INFINITY - 5) 119375Sbill u.u_limit[LIM_CPU] += 5; 120375Sbill } 1219Sbill } 1223110Swnj /* 1233110Swnj * Update iostat information. 1243110Swnj */ 1259Sbill if (USERMODE(ps)) { 1269Sbill u.u_vm.vm_utime++; 1279Sbill if(u.u_procp->p_nice > NZERO) 128305Sbill cpstate = CP_NICE; 129305Sbill else 130305Sbill cpstate = CP_USER; 1319Sbill } else { 1327388Sroot #ifdef GPROF 1337388Sroot int k = pc - s_lowpc; 1347388Sroot if (profiling < 2 && k < s_textsize) 1357388Sroot kcount[k / sizeof (*kcount)]++; 1364968Swnj #endif 137305Sbill cpstate = CP_SYS; 1387315Ssam if (noproc) { 1397315Ssam if ((ps&PSL_IPL) != 0) 1407315Ssam cpstate = CP_IDLE; 1417315Ssam } else 1429Sbill u.u_vm.vm_stime++; 1439Sbill } 1441408Sbill cp_time[cpstate]++; 1452442Swnj for (s = 0; s < DK_NDRIVE; s++) 1462442Swnj if (dk_busy&(1<<s)) 1472442Swnj dk_time[s]++; 1483110Swnj /* 1493110Swnj * Adjust priority of current process. 1503110Swnj */ 1519Sbill if (!noproc) { 1529Sbill pp = u.u_procp; 1531399Sbill pp->p_cpticks++; 1549Sbill if(++pp->p_cpu == 0) 1559Sbill pp->p_cpu--; 156*7490Skre #ifdef MUSH 157*7490Skre pp->p_quota->q_cost += (pp->p_nice > NZERO ? 158*7490Skre (shconsts.sc_tic * ((2*NZERO)-pp->p_nice)) / NZERO : 159*7490Skre shconsts.sc_tic) * (((int)avenrun[0]+2)/3); 160*7490Skre #endif 1613876Swnj if(pp->p_cpu % 4 == 0) { 162125Sbill (void) setpri(pp); 1639Sbill if (pp->p_pri >= PUSER) 1649Sbill pp->p_pri = pp->p_usrpri; 1659Sbill } 1669Sbill } 1673110Swnj /* 1683110Swnj * Time moves on. 1693110Swnj */ 1709Sbill ++lbolt; 1715247Sroot 1725247Sroot /* 1735247Sroot * Time moves on for protocols. 1745247Sroot */ 1757474Ssam --protoslow; --protofast; --ifnetslow; 1765247Sroot 1772689Swnj #if VAX780 1783110Swnj /* 1793110Swnj * On 780's, impelement a fast UBA watcher, 1803110Swnj * to make sure uba's don't get stuck. 1813110Swnj */ 1822872Swnj if (cpu == VAX_780 && panicstr == 0 && !BASEPRI(ps)) 1832442Swnj unhang(); 1842442Swnj #endif 1853110Swnj /* 1863110Swnj * Schedule a software interrupt for the rest 1873110Swnj * of clock activities. 1883110Swnj */ 1892442Swnj setsoftclock(); 1902442Swnj } 1912442Swnj 1922442Swnj /* 1933876Swnj * The digital decay cpu usage priority assignment is scaled to run in 1943876Swnj * time as expanded by the 1 minute load average. Each second we 1953876Swnj * multiply the the previous cpu usage estimate by 1963876Swnj * nrscale*avenrun[0] 1973876Swnj * The following relates the load average to the period over which 1983876Swnj * cpu usage is 90% forgotten: 1993876Swnj * loadav 1 5 seconds 2003876Swnj * loadav 5 24 seconds 2013876Swnj * loadav 10 47 seconds 2023876Swnj * loadav 20 93 seconds 2033876Swnj * This is a great improvement on the previous algorithm which 2043876Swnj * decayed the priorities by a constant, and decayed away all knowledge 2053876Swnj * of previous activity in about 20 seconds. Under heavy load, 2063876Swnj * the previous algorithm degenerated to round-robin with poor response 2073876Swnj * time when there was a high load average. 2082442Swnj */ 2093984Sroot #undef ave 2103876Swnj #define ave(a,b) ((int)(((int)(a*b))/(b+1))) 2113876Swnj int nrscale = 2; 2123876Swnj double avenrun[]; 2133110Swnj 2143110Swnj /* 2153110Swnj * Constant for decay filter for cpu usage field 2163110Swnj * in process table (used by ps au). 2173110Swnj */ 2182442Swnj double ccpu = 0.95122942450071400909; /* exp(-1/20) */ 2192442Swnj 220*7490Skre #ifdef MELB 2212442Swnj /* 222*7490Skre * Automatic niceness rate & max constants 223*7490Skre */ 224*7490Skre #define MAXNICE (8 + NZERO) /* maximum auto nice value */ 225*7490Skre #define NFACT (40 * hz) /* nice++ every 40 secs cpu+sys time */ 226*7490Skre #endif 227*7490Skre 228*7490Skre /* 2292442Swnj * Software clock interrupt. 2303110Swnj * This routine runs at lower priority than device interrupts. 2312442Swnj */ 2322609Swnj /*ARGSUSED*/ 2332442Swnj softclock(pc, ps) 2342450Swnj caddr_t pc; 2352442Swnj { 2363615Sroot register struct callout *p1; 2372442Swnj register struct proc *pp; 2382442Swnj register int a, s; 2393542Swnj caddr_t arg; 2403542Swnj int (*func)(); 2412442Swnj 2422442Swnj /* 2432872Swnj * Perform callouts (but not after panic's!) 2442442Swnj */ 2453542Swnj if (panicstr == 0) { 2463542Swnj for (;;) { 2473542Swnj s = spl7(); 2484250Swnj if ((p1 = calltodo.c_next) == 0 || p1->c_time > 0) { 2494250Swnj splx(s); 2503542Swnj break; 2514250Swnj } 2523542Swnj calltodo.c_next = p1->c_next; 2533542Swnj arg = p1->c_arg; 2543542Swnj func = p1->c_func; 2553542Swnj p1->c_next = callfree; 2563542Swnj callfree = p1; 2573542Swnj (void) splx(s); 2583542Swnj (*func)(arg); 2592442Swnj } 2602442Swnj } 2612442Swnj 2622442Swnj /* 2632442Swnj * Drain silos. 2642442Swnj */ 2652647Swnj #if NDH > 0 2662442Swnj s = spl5(); dhtimer(); splx(s); 2672442Swnj #endif 2682647Swnj #if NDZ > 0 2692442Swnj s = spl5(); dztimer(); splx(s); 2702442Swnj #endif 2712442Swnj 2722442Swnj /* 2732450Swnj * If idling and processes are waiting to swap in, 2742450Swnj * check on them. 2752450Swnj */ 2762450Swnj if (noproc && runin) { 2772450Swnj runin = 0; 2782450Swnj wakeup((caddr_t)&runin); 2792450Swnj } 2802450Swnj 2812450Swnj /* 2823876Swnj * Run paging daemon every 1/4 sec. 2832442Swnj */ 2842768Swnj if (lbolt % (hz/4) == 0) { 2859Sbill vmpago(); 2863876Swnj } 2873876Swnj 2883876Swnj /* 2893876Swnj * Reschedule every 1/10 sec. 2903876Swnj */ 2913876Swnj if (lbolt % (hz/10) == 0) { 2929Sbill runrun++; 2932442Swnj aston(); 2949Sbill } 2952442Swnj 2962442Swnj /* 2975247Sroot * Run network slow and fast timeouts. 2985247Sroot */ 2995264Swnj if (protofast <= 0) { 3005264Swnj protofast = hz / PR_FASTHZ; 3015247Sroot pffasttimo(); 3025264Swnj } 3035264Swnj if (protoslow <= 0) { 3045264Swnj protoslow = hz / PR_SLOWHZ; 3055247Sroot pfslowtimo(); 3065264Swnj } 3077474Ssam if (ifnetslow <= 0) { 3087474Ssam ifnetslow = hz / IFNET_SLOWHZ; 3097474Ssam if_slowtimo(); 3107474Ssam } 3115247Sroot 3125247Sroot /* 3132442Swnj * Lightning bolt every second: 3142442Swnj * sleep timeouts 3152442Swnj * process priority recomputation 3162442Swnj * process %cpu averaging 3172442Swnj * virtual memory metering 3182442Swnj * kick swapper if processes want in 3192442Swnj */ 3202768Swnj if (lbolt >= hz) { 3212872Swnj /* 3223110Swnj * This doesn't mean much on VAX since we run at 3232872Swnj * software interrupt time... if hardclock() 3242872Swnj * calls softclock() directly, it prevents 3252872Swnj * this code from running when the priority 3262872Swnj * was raised when the clock interrupt occurred. 3272872Swnj */ 3289Sbill if (BASEPRI(ps)) 3299Sbill return; 3302872Swnj 3312872Swnj /* 3322872Swnj * If we didn't run a few times because of 3332872Swnj * long blockage at high ipl, we don't 3342872Swnj * really want to run this code several times, 3352872Swnj * so squish out all multiples of hz here. 3362872Swnj */ 3377315Ssam s = spl6(); 3387315Ssam time += lbolt / hz; lbolt %= hz; 3397315Ssam splx(s); 3402872Swnj 3412872Swnj /* 3422872Swnj * Wakeup lightning bolt sleepers. 3432872Swnj * Processes sleep on lbolt to wait 3442872Swnj * for short amounts of time (e.g. 1 second). 3452872Swnj */ 3469Sbill wakeup((caddr_t)&lbolt); 3472872Swnj 3482872Swnj /* 3492872Swnj * Recompute process priority and process 3502872Swnj * sleep() system calls as well as internal 3512872Swnj * sleeps with timeouts (tsleep() kernel routine). 3522872Swnj */ 3532872Swnj for (pp = proc; pp < procNPROC; pp++) 354928Sbill if (pp->p_stat && pp->p_stat!=SZOMB) { 355*7490Skre #ifdef MUSH 3562872Swnj /* 357*7490Skre * Charge process for memory in use 358*7490Skre */ 359*7490Skre if (pp->p_quota->q_uid) 360*7490Skre pp->p_quota->q_cost += 361*7490Skre shconsts.sc_click * pp->p_rssize; 362*7490Skre #endif 363*7490Skre /* 3642872Swnj * Increase resident time, to max of 127 seconds 3652872Swnj * (it is kept in a character.) For 3662872Swnj * loaded processes this is time in core; for 3672872Swnj * swapped processes, this is time on drum. 3682872Swnj */ 3692872Swnj if (pp->p_time != 127) 3709Sbill pp->p_time++; 3712872Swnj /* 3722872Swnj * If process has clock counting down, and it 3732872Swnj * expires, set it running (if this is a tsleep()), 3742872Swnj * or give it an SIGALRM (if the user process 3752872Swnj * is using alarm signals. 3762872Swnj */ 3772872Swnj if (pp->p_clktim && --pp->p_clktim == 0) 3782872Swnj if (pp->p_flag & STIMO) { 3792872Swnj s = spl6(); 3802872Swnj switch (pp->p_stat) { 381204Sbill 3822872Swnj case SSLEEP: 3832872Swnj setrun(pp); 3842872Swnj break; 385204Sbill 3862872Swnj case SSTOP: 3872872Swnj unsleep(pp); 3882872Swnj break; 3892872Swnj } 3902872Swnj pp->p_flag &= ~STIMO; 3912872Swnj splx(s); 3922872Swnj } else 3932872Swnj psignal(pp, SIGALRM); 3942872Swnj /* 3952872Swnj * If process is blocked, increment computed 3962872Swnj * time blocked. This is used in swap scheduling. 3972872Swnj */ 3982872Swnj if (pp->p_stat==SSLEEP || pp->p_stat==SSTOP) 3999Sbill if (pp->p_slptime != 127) 4009Sbill pp->p_slptime++; 4012872Swnj /* 4022872Swnj * Update digital filter estimation of process 4032872Swnj * cpu utilization for loaded processes. 4042872Swnj */ 4051399Sbill if (pp->p_flag&SLOAD) 4061399Sbill pp->p_pctcpu = ccpu * pp->p_pctcpu + 4072768Swnj (1.0 - ccpu) * (pp->p_cpticks/(float)hz); 4082872Swnj /* 4092872Swnj * Recompute process priority. The number p_cpu 4102872Swnj * is a weighted estimate of cpu time consumed. 4112872Swnj * A process which consumes cpu time has this 4122872Swnj * increase regularly. We here decrease it by 4133876Swnj * a fraction based on load average giving a digital 4143876Swnj * decay filter which damps out in about 5 seconds 4153876Swnj * when seconds are measured in time expanded by the 4163876Swnj * load average. 4172872Swnj * 4182872Swnj * If a process is niced, then the nice directly 4192872Swnj * affects the new priority. The final priority 4202872Swnj * is in the range 0 to 255, to fit in a character. 4212872Swnj */ 4221399Sbill pp->p_cpticks = 0; 423*7490Skre #ifdef MUSH 4243876Swnj a = ave((pp->p_cpu & 0377), avenrun[0]*nrscale) + 425*7490Skre pp->p_nice - NZERO + pp->p_quota->q_nice; 426*7490Skre #else 427*7490Skre a = ave((pp->p_cpu & 0377), avenrun[0]*nrscale) + 4283876Swnj pp->p_nice - NZERO; 429*7490Skre #endif 4302872Swnj if (a < 0) 4319Sbill a = 0; 4322872Swnj if (a > 255) 4339Sbill a = 255; 4349Sbill pp->p_cpu = a; 435125Sbill (void) setpri(pp); 4362872Swnj /* 4372872Swnj * Now have computed new process priority 4382872Swnj * in p->p_usrpri. Carefully change p->p_pri. 4392872Swnj * A process is on a run queue associated with 4402872Swnj * this priority, so we must block out process 4412872Swnj * state changes during the transition. 4422872Swnj */ 4439Sbill s = spl6(); 4442872Swnj if (pp->p_pri >= PUSER) { 4459Sbill if ((pp != u.u_procp || noproc) && 4469Sbill pp->p_stat == SRUN && 4479Sbill (pp->p_flag & SLOAD) && 4489Sbill pp->p_pri != pp->p_usrpri) { 4499Sbill remrq(pp); 4509Sbill pp->p_pri = pp->p_usrpri; 4519Sbill setrq(pp); 4529Sbill } else 4539Sbill pp->p_pri = pp->p_usrpri; 4549Sbill } 4559Sbill splx(s); 4569Sbill } 4572872Swnj 4582872Swnj /* 4592872Swnj * Perform virtual memory metering. 4602872Swnj */ 4619Sbill vmmeter(); 4622872Swnj 4632872Swnj /* 4642872Swnj * If the swap process is trying to bring 4652872Swnj * a process in, have it look again to see 4662872Swnj * if it is possible now. 4672872Swnj */ 4682872Swnj if (runin!=0) { 4699Sbill runin = 0; 4709Sbill wakeup((caddr_t)&runin); 4719Sbill } 4722872Swnj 4739Sbill /* 4749Sbill * If there are pages that have been cleaned, 4759Sbill * jolt the pageout daemon to process them. 4769Sbill * We do this here so that these pages will be 4779Sbill * freed if there is an abundance of memory and the 4789Sbill * daemon would not be awakened otherwise. 4799Sbill */ 4809Sbill if (bclnlist != NULL) 4819Sbill wakeup((caddr_t)&proc[2]); 4822872Swnj 483*7490Skre #ifdef MELB 4842872Swnj /* 485*7490Skre * If a process was running, see if time to make it nicer 486*7490Skre */ 487*7490Skre if (!noproc) { 488*7490Skre pp = u.u_procp; 489*7490Skre if (pp->p_uid 490*7490Skre #ifdef MUSH 491*7490Skre && !(pp->p_flag & SLOGIN) 492*7490Skre #else 493*7490Skre /* this is definitely not good enough */ 494*7490Skre && (pp->p_pid != pp->p_pgrp || pp->p_ppid != 1) 495*7490Skre #endif 496*7490Skre && (u.u_vm.vm_utime + u.u_vm.vm_stime) > 497*7490Skre (pp->p_nice-NZERO+1)*NFACT 498*7490Skre && pp->p_nice >= NZERO 499*7490Skre && pp->p_nice < MAXNICE 500*7490Skre ) { 501*7490Skre pp->p_nice++; 502*7490Skre (void) setpri(pp); 503*7490Skre pp->p_pri = pp->p_usrpri; 504*7490Skre } 505*7490Skre } 506*7490Skre #else 507*7490Skre /* 5082872Swnj * If the trap occurred from usermode, 5092872Swnj * then check to see if it has now been 5102872Swnj * running more than 10 minutes of user time 5112872Swnj * and should thus run with reduced priority 5122872Swnj * to give other processes a chance. 5132872Swnj */ 5149Sbill if (USERMODE(ps)) { 5159Sbill pp = u.u_procp; 5162872Swnj if (pp->p_uid && pp->p_nice == NZERO && 5172872Swnj u.u_vm.vm_utime > 600 * hz) 5182872Swnj pp->p_nice = NZERO+4; 519125Sbill (void) setpri(pp); 5209Sbill pp->p_pri = pp->p_usrpri; 5219Sbill } 522*7490Skre #endif 5239Sbill } 5242872Swnj /* 5252872Swnj * If trapped user-mode, give it a profiling tick. 5262872Swnj */ 5272442Swnj if (USERMODE(ps) && u.u_prof.pr_scale) { 5282442Swnj u.u_procp->p_flag |= SOWEUPC; 5292442Swnj aston(); 5309Sbill } 5319Sbill } 5329Sbill 5339Sbill /* 5343110Swnj * Timeout is called to arrange that 5352768Swnj * fun(arg) is called in tim/hz seconds. 5363542Swnj * An entry is linked into the callout 5373110Swnj * structure. The time in each structure 5382768Swnj * entry is the number of hz's more 5399Sbill * than the previous entry. 5409Sbill * In this way, decrementing the 5419Sbill * first entry has the effect of 5429Sbill * updating all entries. 5439Sbill * 5449Sbill * The panic is there because there is nothing 5459Sbill * intelligent to be done if an entry won't fit. 5469Sbill */ 5479Sbill timeout(fun, arg, tim) 5482450Swnj int (*fun)(); 5492450Swnj caddr_t arg; 5509Sbill { 5513542Swnj register struct callout *p1, *p2, *pnew; 5529Sbill register int t; 5539Sbill int s; 5549Sbill 5553446Sroot /* DEBUGGING CODE */ 5563446Sroot int ttrstrt(); 5573446Sroot 5583446Sroot if (fun == ttrstrt && arg == 0) 5593446Sroot panic("timeout ttrstr arg"); 5603446Sroot /* END DEBUGGING CODE */ 5619Sbill t = tim; 5629Sbill s = spl7(); 5633542Swnj pnew = callfree; 5643542Swnj if (pnew == NULL) 5653542Swnj panic("timeout table overflow"); 5663542Swnj callfree = pnew->c_next; 5673542Swnj pnew->c_arg = arg; 5683542Swnj pnew->c_func = fun; 5693542Swnj for (p1 = &calltodo; (p2 = p1->c_next) && p2->c_time < t; p1 = p2) 5703542Swnj t -= p2->c_time; 5713542Swnj p1->c_next = pnew; 5723542Swnj pnew->c_next = p2; 5733542Swnj pnew->c_time = t; 5743542Swnj if (p2) 5753542Swnj p2->c_time -= t; 5769Sbill splx(s); 5779Sbill } 5787305Ssam 5797305Ssam /* 5807305Ssam * untimeout is called to remove a function timeout call 5817305Ssam * from the callout structure. 5827305Ssam */ 5837305Ssam untimeout (fun, arg) 5847305Ssam int (*fun)(); 5857305Ssam caddr_t arg; 5867305Ssam { 5877305Ssam 5887305Ssam register struct callout *p1, *p2; 5897305Ssam register int s; 5907305Ssam 5917305Ssam s = spl7(); 5927305Ssam for (p1 = &calltodo; (p2 = p1->c_next) != 0; p1 = p2) { 5937305Ssam if (p2->c_func == fun && p2->c_arg == arg) { 5947305Ssam if (p2->c_next) 5957305Ssam p2->c_next->c_time += p2->c_time; 5967305Ssam p1->c_next = p2->c_next; 5977305Ssam p2->c_next = callfree; 5987305Ssam callfree = p2; 5997305Ssam break; 6007305Ssam } 6017305Ssam } 6027305Ssam splx(s); 6037305Ssam } 604