1 /* $OpenBSD: kern_synch.c,v 1.15 1999/04/21 01:21:48 alex Exp $ */ 2 /* $NetBSD: kern_synch.c,v 1.37 1996/04/22 01:38:37 christos Exp $ */ 3 4 /*- 5 * Copyright (c) 1982, 1986, 1990, 1991, 1993 6 * The Regents of the University of California. All rights reserved. 7 * (c) UNIX System Laboratories, Inc. 8 * All or some portions of this file are derived from material licensed 9 * to the University of California by American Telephone and Telegraph 10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 11 * the permission of UNIX System Laboratories, Inc. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 3. All advertising materials mentioning features or use of this software 22 * must display the following acknowledgement: 23 * This product includes software developed by the University of 24 * California, Berkeley and its contributors. 25 * 4. Neither the name of the University nor the names of its contributors 26 * may be used to endorse or promote products derived from this software 27 * without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39 * SUCH DAMAGE. 40 * 41 * @(#)kern_synch.c 8.6 (Berkeley) 1/21/94 42 */ 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/proc.h> 47 #include <sys/kernel.h> 48 #include <sys/buf.h> 49 #include <sys/signalvar.h> 50 #include <sys/resourcevar.h> 51 #include <vm/vm.h> 52 53 #if defined(UVM) 54 #include <uvm/uvm_extern.h> 55 #endif 56 57 #ifdef KTRACE 58 #include <sys/ktrace.h> 59 #endif 60 61 #include <machine/cpu.h> 62 63 u_char curpriority; /* usrpri of curproc */ 64 int lbolt; /* once a second sleep address */ 65 66 void roundrobin __P((void *)); 67 void schedcpu __P((void *)); 68 void updatepri __P((struct proc *)); 69 void endtsleep __P((void *)); 70 71 /* 72 * Force switch among equal priority processes every 100ms. 73 */ 74 /* ARGSUSED */ 75 void 76 roundrobin(arg) 77 void *arg; 78 { 79 80 need_resched(); 81 timeout(roundrobin, NULL, hz / 10); 82 } 83 84 /* 85 * Constants for digital decay and forget: 86 * 90% of (p_estcpu) usage in 5 * loadav time 87 * 95% of (p_pctcpu) usage in 60 seconds (load insensitive) 88 * Note that, as ps(1) mentions, this can let percentages 89 * total over 100% (I've seen 137.9% for 3 processes). 90 * 91 * Note that hardclock updates p_estcpu and p_cpticks independently. 92 * 93 * We wish to decay away 90% of p_estcpu in (5 * loadavg) seconds. 94 * That is, the system wants to compute a value of decay such 95 * that the following for loop: 96 * for (i = 0; i < (5 * loadavg); i++) 97 * p_estcpu *= decay; 98 * will compute 99 * p_estcpu *= 0.1; 100 * for all values of loadavg: 101 * 102 * Mathematically this loop can be expressed by saying: 103 * decay ** (5 * loadavg) ~= .1 104 * 105 * The system computes decay as: 106 * decay = (2 * loadavg) / (2 * loadavg + 1) 107 * 108 * We wish to prove that the system's computation of decay 109 * will always fulfill the equation: 110 * decay ** (5 * loadavg) ~= .1 111 * 112 * If we compute b as: 113 * b = 2 * loadavg 114 * then 115 * decay = b / (b + 1) 116 * 117 * We now need to prove two things: 118 * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1) 119 * 2) Given b/(b+1) ** power ~= .1, prove power == (5 * loadavg) 120 * 121 * Facts: 122 * For x close to zero, exp(x) =~ 1 + x, since 123 * exp(x) = 0! + x**1/1! + x**2/2! + ... . 124 * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b. 125 * For x close to zero, ln(1+x) =~ x, since 126 * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1 127 * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1). 128 * ln(.1) =~ -2.30 129 * 130 * Proof of (1): 131 * Solve (factor)**(power) =~ .1 given power (5*loadav): 132 * solving for factor, 133 * ln(factor) =~ (-2.30/5*loadav), or 134 * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) = 135 * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED 136 * 137 * Proof of (2): 138 * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)): 139 * solving for power, 140 * power*ln(b/(b+1)) =~ -2.30, or 141 * power =~ 2.3 * (b + 1) = 4.6*loadav + 2.3 =~ 5*loadav. QED 142 * 143 * Actual power values for the implemented algorithm are as follows: 144 * loadav: 1 2 3 4 145 * power: 5.68 10.32 14.94 19.55 146 */ 147 148 /* calculations for digital decay to forget 90% of usage in 5*loadav sec */ 149 #define loadfactor(loadav) (2 * (loadav)) 150 #define decay_cpu(loadfac, cpu) (((loadfac) * (cpu)) / ((loadfac) + FSCALE)) 151 152 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ 153 fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 154 155 /* 156 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the 157 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below 158 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT). 159 * 160 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used: 161 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits). 162 * 163 * If you dont want to bother with the faster/more-accurate formula, you 164 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate 165 * (more general) method of calculating the %age of CPU used by a process. 166 */ 167 #define CCPU_SHIFT 11 168 169 /* 170 * Recompute process priorities, every hz ticks. 171 */ 172 /* ARGSUSED */ 173 void 174 schedcpu(arg) 175 void *arg; 176 { 177 register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); 178 register struct proc *p; 179 register int s; 180 register unsigned int newcpu; 181 182 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { 183 /* 184 * Increment time in/out of memory and sleep time 185 * (if sleeping). We ignore overflow; with 16-bit int's 186 * (remember them?) overflow takes 45 days. 187 */ 188 p->p_swtime++; 189 if (p->p_stat == SSLEEP || p->p_stat == SSTOP) 190 p->p_slptime++; 191 p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT; 192 /* 193 * If the process has slept the entire second, 194 * stop recalculating its priority until it wakes up. 195 */ 196 if (p->p_slptime > 1) 197 continue; 198 s = splstatclock(); /* prevent state changes */ 199 /* 200 * p_pctcpu is only for ps. 201 */ 202 #if (FSHIFT >= CCPU_SHIFT) 203 p->p_pctcpu += (hz == 100)? 204 ((fixpt_t) p->p_cpticks) << (FSHIFT - CCPU_SHIFT): 205 100 * (((fixpt_t) p->p_cpticks) 206 << (FSHIFT - CCPU_SHIFT)) / hz; 207 #else 208 p->p_pctcpu += ((FSCALE - ccpu) * 209 (p->p_cpticks * FSCALE / hz)) >> FSHIFT; 210 #endif 211 p->p_cpticks = 0; 212 newcpu = (u_int) decay_cpu(loadfac, p->p_estcpu) + p->p_nice; 213 p->p_estcpu = min(newcpu, UCHAR_MAX); 214 resetpriority(p); 215 if (p->p_priority >= PUSER) { 216 #define PPQ (128 / NQS) /* priorities per queue */ 217 if ((p != curproc) && 218 p->p_stat == SRUN && 219 (p->p_flag & P_INMEM) && 220 (p->p_priority / PPQ) != (p->p_usrpri / PPQ)) { 221 remrunqueue(p); 222 p->p_priority = p->p_usrpri; 223 setrunqueue(p); 224 } else 225 p->p_priority = p->p_usrpri; 226 } 227 splx(s); 228 } 229 #if defined(UVM) 230 uvm_meter(); 231 #else 232 vmmeter(); 233 #endif 234 wakeup((caddr_t)&lbolt); 235 timeout(schedcpu, (void *)0, hz); 236 } 237 238 /* 239 * Recalculate the priority of a process after it has slept for a while. 240 * For all load averages >= 1 and max p_estcpu of 255, sleeping for at 241 * least six times the loadfactor will decay p_estcpu to zero. 242 */ 243 void 244 updatepri(p) 245 register struct proc *p; 246 { 247 register unsigned int newcpu = p->p_estcpu; 248 register fixpt_t loadfac = loadfactor(averunnable.ldavg[0]); 249 250 if (p->p_slptime > 5 * loadfac) 251 p->p_estcpu = 0; 252 else { 253 p->p_slptime--; /* the first time was done in schedcpu */ 254 while (newcpu && --p->p_slptime) 255 newcpu = (int) decay_cpu(loadfac, newcpu); 256 p->p_estcpu = min(newcpu, UCHAR_MAX); 257 } 258 resetpriority(p); 259 } 260 261 /* 262 * We're only looking at 7 bits of the address; everything is 263 * aligned to 4, lots of things are aligned to greater powers 264 * of 2. Shift right by 8, i.e. drop the bottom 256 worth. 265 */ 266 #define TABLESIZE 128 267 #define LOOKUP(x) (((long)(x) >> 8) & (TABLESIZE - 1)) 268 struct slpque { 269 struct proc *sq_head; 270 struct proc **sq_tailp; 271 } slpque[TABLESIZE]; 272 273 /* 274 * During autoconfiguration or after a panic, a sleep will simply 275 * lower the priority briefly to allow interrupts, then return. 276 * The priority to be used (safepri) is machine-dependent, thus this 277 * value is initialized and maintained in the machine-dependent layers. 278 * This priority will typically be 0, or the lowest priority 279 * that is safe for use on the interrupt stack; it can be made 280 * higher to block network software interrupts after panics. 281 */ 282 int safepri; 283 284 /* 285 * General sleep call. Suspends the current process until a wakeup is 286 * performed on the specified identifier. The process will then be made 287 * runnable with the specified priority. Sleeps at most timo/hz seconds 288 * (0 means no timeout). If pri includes PCATCH flag, signals are checked 289 * before and after sleeping, else signals are not checked. Returns 0 if 290 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a 291 * signal needs to be delivered, ERESTART is returned if the current system 292 * call should be restarted if possible, and EINTR is returned if the system 293 * call should be interrupted by the signal (return EINTR). 294 */ 295 int 296 tsleep(ident, priority, wmesg, timo) 297 void *ident; 298 int priority, timo; 299 char *wmesg; 300 { 301 register struct proc *p = curproc; 302 register struct slpque *qp; 303 register int s; 304 int sig, catch = priority & PCATCH; 305 extern int cold; 306 307 #ifdef KTRACE 308 if (KTRPOINT(p, KTR_CSW)) 309 ktrcsw(p->p_tracep, 1, 0); 310 #endif 311 s = splhigh(); 312 if (cold || panicstr) { 313 /* 314 * After a panic, or during autoconfiguration, 315 * just give interrupts a chance, then just return; 316 * don't run any other procs or panic below, 317 * in case this is the idle process and already asleep. 318 */ 319 splx(safepri); 320 splx(s); 321 return (0); 322 } 323 #ifdef DIAGNOSTIC 324 if (ident == NULL || p->p_stat != SRUN || p->p_back) 325 panic("tsleep"); 326 #endif 327 p->p_wchan = ident; 328 p->p_wmesg = wmesg; 329 p->p_slptime = 0; 330 p->p_priority = priority & PRIMASK; 331 qp = &slpque[LOOKUP(ident)]; 332 if (qp->sq_head == 0) 333 qp->sq_head = p; 334 else 335 *qp->sq_tailp = p; 336 *(qp->sq_tailp = &p->p_forw) = 0; 337 if (timo) 338 timeout(endtsleep, (void *)p, timo); 339 /* 340 * We put ourselves on the sleep queue and start our timeout 341 * before calling CURSIG, as we could stop there, and a wakeup 342 * or a SIGCONT (or both) could occur while we were stopped. 343 * A SIGCONT would cause us to be marked as SSLEEP 344 * without resuming us, thus we must be ready for sleep 345 * when CURSIG is called. If the wakeup happens while we're 346 * stopped, p->p_wchan will be 0 upon return from CURSIG. 347 */ 348 if (catch) { 349 p->p_flag |= P_SINTR; 350 if ((sig = CURSIG(p)) != 0) { 351 if (p->p_wchan) 352 unsleep(p); 353 p->p_stat = SRUN; 354 goto resume; 355 } 356 if (p->p_wchan == 0) { 357 catch = 0; 358 goto resume; 359 } 360 } else 361 sig = 0; 362 p->p_stat = SSLEEP; 363 p->p_stats->p_ru.ru_nvcsw++; 364 mi_switch(); 365 #ifdef DDB 366 /* handy breakpoint location after process "wakes" */ 367 __asm(".globl bpendtsleep ; bpendtsleep:"); 368 #endif 369 resume: 370 curpriority = p->p_usrpri; 371 splx(s); 372 p->p_flag &= ~P_SINTR; 373 if (p->p_flag & P_TIMEOUT) { 374 p->p_flag &= ~P_TIMEOUT; 375 if (sig == 0) { 376 #ifdef KTRACE 377 if (KTRPOINT(p, KTR_CSW)) 378 ktrcsw(p->p_tracep, 0, 0); 379 #endif 380 return (EWOULDBLOCK); 381 } 382 } else if (timo) 383 untimeout(endtsleep, (void *)p); 384 if (catch && (sig != 0 || (sig = CURSIG(p)) != 0)) { 385 #ifdef KTRACE 386 if (KTRPOINT(p, KTR_CSW)) 387 ktrcsw(p->p_tracep, 0, 0); 388 #endif 389 if (p->p_sigacts->ps_sigintr & sigmask(sig)) 390 return (EINTR); 391 return (ERESTART); 392 } 393 #ifdef KTRACE 394 if (KTRPOINT(p, KTR_CSW)) 395 ktrcsw(p->p_tracep, 0, 0); 396 #endif 397 return (0); 398 } 399 400 /* 401 * Implement timeout for tsleep. 402 * If process hasn't been awakened (wchan non-zero), 403 * set timeout flag and undo the sleep. If proc 404 * is stopped, just unsleep so it will remain stopped. 405 */ 406 void 407 endtsleep(arg) 408 void *arg; 409 { 410 register struct proc *p; 411 int s; 412 413 p = (struct proc *)arg; 414 s = splhigh(); 415 if (p->p_wchan) { 416 if (p->p_stat == SSLEEP) 417 setrunnable(p); 418 else 419 unsleep(p); 420 p->p_flag |= P_TIMEOUT; 421 } 422 splx(s); 423 } 424 425 /* 426 * Short-term, non-interruptable sleep. 427 */ 428 void 429 sleep(ident, priority) 430 void *ident; 431 int priority; 432 { 433 register struct proc *p = curproc; 434 register struct slpque *qp; 435 register int s; 436 extern int cold; 437 438 #ifdef DIAGNOSTIC 439 if (priority > PZERO) { 440 printf("sleep called with priority %d > PZERO, wchan: %p\n", 441 priority, ident); 442 panic("old sleep"); 443 } 444 #endif 445 s = splhigh(); 446 if (cold || panicstr) { 447 /* 448 * After a panic, or during autoconfiguration, 449 * just give interrupts a chance, then just return; 450 * don't run any other procs or panic below, 451 * in case this is the idle process and already asleep. 452 */ 453 splx(safepri); 454 splx(s); 455 return; 456 } 457 #ifdef DIAGNOSTIC 458 if (ident == NULL || p->p_stat != SRUN || p->p_back) 459 panic("sleep"); 460 #endif 461 p->p_wchan = ident; 462 p->p_wmesg = NULL; 463 p->p_slptime = 0; 464 p->p_priority = priority; 465 qp = &slpque[LOOKUP(ident)]; 466 if (qp->sq_head == 0) 467 qp->sq_head = p; 468 else 469 *qp->sq_tailp = p; 470 *(qp->sq_tailp = &p->p_forw) = 0; 471 p->p_stat = SSLEEP; 472 p->p_stats->p_ru.ru_nvcsw++; 473 #ifdef KTRACE 474 if (KTRPOINT(p, KTR_CSW)) 475 ktrcsw(p->p_tracep, 1, 0); 476 #endif 477 mi_switch(); 478 #ifdef DDB 479 /* handy breakpoint location after process "wakes" */ 480 __asm(".globl bpendsleep ; bpendsleep:"); 481 #endif 482 #ifdef KTRACE 483 if (KTRPOINT(p, KTR_CSW)) 484 ktrcsw(p->p_tracep, 0, 0); 485 #endif 486 curpriority = p->p_usrpri; 487 splx(s); 488 } 489 490 /* 491 * Remove a process from its wait queue 492 */ 493 void 494 unsleep(p) 495 register struct proc *p; 496 { 497 register struct slpque *qp; 498 register struct proc **hp; 499 int s; 500 501 s = splhigh(); 502 if (p->p_wchan) { 503 hp = &(qp = &slpque[LOOKUP(p->p_wchan)])->sq_head; 504 while (*hp != p) 505 hp = &(*hp)->p_forw; 506 *hp = p->p_forw; 507 if (qp->sq_tailp == &p->p_forw) 508 qp->sq_tailp = hp; 509 p->p_wchan = 0; 510 } 511 splx(s); 512 } 513 514 /* 515 * Make all processes sleeping on the specified identifier runnable. 516 */ 517 void 518 wakeup(ident) 519 register void *ident; 520 { 521 register struct slpque *qp; 522 register struct proc *p, **q; 523 int s; 524 525 s = splhigh(); 526 qp = &slpque[LOOKUP(ident)]; 527 restart: 528 for (q = &qp->sq_head; (p = *q) != NULL; ) { 529 #ifdef DIAGNOSTIC 530 if (p->p_back || (p->p_stat != SSLEEP && p->p_stat != SSTOP)) 531 panic("wakeup"); 532 #endif 533 if (p->p_wchan == ident) { 534 p->p_wchan = 0; 535 *q = p->p_forw; 536 if (qp->sq_tailp == &p->p_forw) 537 qp->sq_tailp = q; 538 if (p->p_stat == SSLEEP) { 539 /* OPTIMIZED EXPANSION OF setrunnable(p); */ 540 if (p->p_slptime > 1) 541 updatepri(p); 542 p->p_slptime = 0; 543 p->p_stat = SRUN; 544 if (p->p_flag & P_INMEM) 545 setrunqueue(p); 546 /* 547 * Since curpriority is a user priority, 548 * p->p_priority is always better than 549 * curpriority. 550 */ 551 if ((p->p_flag & P_INMEM) == 0) 552 wakeup((caddr_t)&proc0); 553 else 554 need_resched(); 555 /* END INLINE EXPANSION */ 556 goto restart; 557 } 558 } else 559 q = &p->p_forw; 560 } 561 splx(s); 562 } 563 564 /* 565 * The machine independent parts of mi_switch(). 566 * Must be called at splstatclock() or higher. 567 */ 568 void 569 mi_switch() 570 { 571 register struct proc *p = curproc; /* XXX */ 572 register struct rlimit *rlim; 573 register long s, u; 574 struct timeval tv; 575 576 /* 577 * Compute the amount of time during which the current 578 * process was running, and add that to its total so far. 579 */ 580 microtime(&tv); 581 u = p->p_rtime.tv_usec + (tv.tv_usec - runtime.tv_usec); 582 s = p->p_rtime.tv_sec + (tv.tv_sec - runtime.tv_sec); 583 if (u < 0) { 584 u += 1000000; 585 s--; 586 } else if (u >= 1000000) { 587 u -= 1000000; 588 s++; 589 } 590 p->p_rtime.tv_usec = u; 591 p->p_rtime.tv_sec = s; 592 593 /* 594 * Check if the process exceeds its cpu resource allocation. 595 * If over max, kill it. In any case, if it has run for more 596 * than 10 minutes, reduce priority to give others a chance. 597 */ 598 rlim = &p->p_rlimit[RLIMIT_CPU]; 599 if (s >= rlim->rlim_cur) { 600 if (s >= rlim->rlim_max) 601 psignal(p, SIGKILL); 602 else { 603 psignal(p, SIGXCPU); 604 if (rlim->rlim_cur < rlim->rlim_max) 605 rlim->rlim_cur += 5; 606 } 607 } 608 if (s > 10 * 60 && p->p_ucred->cr_uid && p->p_nice == NZERO) { 609 p->p_nice = NZERO + 4; 610 resetpriority(p); 611 } 612 613 /* 614 * Pick a new current process and record its start time. 615 */ 616 #if defined(UVM) 617 uvmexp.swtch++; 618 #else 619 cnt.v_swtch++; 620 #endif 621 cpu_switch(p); 622 microtime(&runtime); 623 } 624 625 /* 626 * Initialize the (doubly-linked) run queues 627 * to be empty. 628 */ 629 void 630 rqinit() 631 { 632 register int i; 633 634 for (i = 0; i < NQS; i++) 635 qs[i].ph_link = qs[i].ph_rlink = (struct proc *)&qs[i]; 636 } 637 638 /* 639 * Change process state to be runnable, 640 * placing it on the run queue if it is in memory, 641 * and awakening the swapper if it isn't in memory. 642 */ 643 void 644 setrunnable(p) 645 register struct proc *p; 646 { 647 register int s; 648 649 s = splhigh(); 650 switch (p->p_stat) { 651 case 0: 652 case SRUN: 653 case SZOMB: 654 default: 655 panic("setrunnable"); 656 case SSTOP: 657 /* 658 * If we're being traced (possibly because someone attached us 659 * while we were stopped), check for a signal from the debugger. 660 */ 661 if ((p->p_flag & P_TRACED) != 0 && p->p_xstat != 0) 662 p->p_siglist |= sigmask(p->p_xstat); 663 case SSLEEP: 664 unsleep(p); /* e.g. when sending signals */ 665 break; 666 667 case SIDL: 668 break; 669 } 670 p->p_stat = SRUN; 671 if (p->p_flag & P_INMEM) 672 setrunqueue(p); 673 splx(s); 674 if (p->p_slptime > 1) 675 updatepri(p); 676 p->p_slptime = 0; 677 if ((p->p_flag & P_INMEM) == 0) 678 wakeup((caddr_t)&proc0); 679 else if (p->p_priority < curpriority) 680 need_resched(); 681 } 682 683 /* 684 * Compute the priority of a process when running in user mode. 685 * Arrange to reschedule if the resulting priority is better 686 * than that of the current process. 687 */ 688 void 689 resetpriority(p) 690 register struct proc *p; 691 { 692 register unsigned int newpriority; 693 694 newpriority = PUSER + p->p_estcpu / 4 + 2 * p->p_nice; 695 newpriority = min(newpriority, MAXPRI); 696 p->p_usrpri = newpriority; 697 if (newpriority < curpriority) 698 need_resched(); 699 } 700 701 #ifdef DDB 702 #include <machine/db_machdep.h> 703 704 #include <ddb/db_interface.h> 705 #include <ddb/db_output.h> 706 707 void 708 db_show_all_procs(addr, haddr, count, modif) 709 db_expr_t addr; 710 int haddr; 711 db_expr_t count; 712 char *modif; 713 { 714 char *mode; 715 int doingzomb = 0; 716 struct proc *p, *pp; 717 718 if (modif[0] == 0) 719 modif[0] = 'n'; /* default == normal mode */ 720 721 mode = "mawn"; 722 while (*mode && *mode != modif[0]) 723 mode++; 724 if (*mode == 0 || *mode == 'm') { 725 db_printf("usage: show all procs [/a] [/n] [/w]\n"); 726 db_printf("\t/a == show process address info\n"); 727 db_printf("\t/n == show normal process info [default]\n"); 728 db_printf("\t/w == show process wait/emul info\n"); 729 return; 730 } 731 732 p = allproc.lh_first; 733 734 switch (*mode) { 735 736 case 'a': 737 db_printf(" PID %-10s %18s %18s %18s\n", 738 "COMMAND", "STRUCT PROC *", "UAREA *", "VMSPACE/VM_MAP"); 739 break; 740 case 'n': 741 db_printf(" PID %5s %5s %5s S %10s %-9s %-16s\n", 742 "PPID", "PGRP", "UID", "FLAGS", "WAIT", "COMMAND"); 743 break; 744 case 'w': 745 db_printf(" PID %-16s %-8s %18s %s\n", 746 "COMMAND", "EMUL", "WAIT-CHANNEL", "WAIT-MSG"); 747 break; 748 } 749 750 while (p != 0) { 751 pp = p->p_pptr; 752 if (p->p_stat) { 753 754 db_printf("%5d ", p->p_pid); 755 756 switch (*mode) { 757 758 case 'a': 759 db_printf("%-10.10s %18p %18p %18p\n", 760 p->p_comm, p, p->p_addr, p->p_vmspace); 761 break; 762 763 case 'n': 764 db_printf("%5d %5d %5d %d %#10x " 765 "%-9.9s %-16s\n", 766 pp ? pp->p_pid : -1, p->p_pgrp->pg_id, 767 p->p_cred->p_ruid, p->p_stat, p->p_flag, 768 (p->p_wchan && p->p_wmesg) ? 769 p->p_wmesg : "", p->p_comm); 770 break; 771 772 case 'w': 773 db_printf("%-16s %-8s %18p %s\n", p->p_comm, 774 p->p_emul->e_name, p->p_wchan, 775 (p->p_wchan && p->p_wmesg) ? 776 p->p_wmesg : ""); 777 break; 778 779 } 780 } 781 p = p->p_list.le_next; 782 if (p == 0 && doingzomb == 0) { 783 doingzomb = 1; 784 p = zombproc.lh_first; 785 } 786 } 787 } 788 #endif 789