1 /* $NetBSD: kern_synch.c,v 1.207 2007/11/12 23:11:59 ad Exp $ */ 2 3 /*- 4 * Copyright (c) 1999, 2000, 2004, 2006, 2007 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran and 10 * Daniel Sieger. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the NetBSD 23 * Foundation, Inc. and its contributors. 24 * 4. Neither the name of The NetBSD Foundation nor the names of its 25 * contributors may be used to endorse or promote products derived 26 * from this software without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 29 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 30 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 31 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 32 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 33 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 34 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 35 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 36 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 37 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 38 * POSSIBILITY OF SUCH DAMAGE. 39 */ 40 41 /*- 42 * Copyright (c) 1982, 1986, 1990, 1991, 1993 43 * The Regents of the University of California. All rights reserved. 44 * (c) UNIX System Laboratories, Inc. 45 * All or some portions of this file are derived from material licensed 46 * to the University of California by American Telephone and Telegraph 47 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 48 * the permission of UNIX System Laboratories, Inc. 49 * 50 * Redistribution and use in source and binary forms, with or without 51 * modification, are permitted provided that the following conditions 52 * are met: 53 * 1. Redistributions of source code must retain the above copyright 54 * notice, this list of conditions and the following disclaimer. 55 * 2. Redistributions in binary form must reproduce the above copyright 56 * notice, this list of conditions and the following disclaimer in the 57 * documentation and/or other materials provided with the distribution. 58 * 3. Neither the name of the University nor the names of its contributors 59 * may be used to endorse or promote products derived from this software 60 * without specific prior written permission. 61 * 62 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 63 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 64 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 65 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 66 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 67 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 68 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 69 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 70 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 71 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 72 * SUCH DAMAGE. 73 * 74 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95 75 */ 76 77 #include <sys/cdefs.h> 78 __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.207 2007/11/12 23:11:59 ad Exp $"); 79 80 #include "opt_kstack.h" 81 #include "opt_lockdebug.h" 82 #include "opt_multiprocessor.h" 83 #include "opt_perfctrs.h" 84 85 #define __MUTEX_PRIVATE 86 87 #include <sys/param.h> 88 #include <sys/systm.h> 89 #include <sys/proc.h> 90 #include <sys/kernel.h> 91 #if defined(PERFCTRS) 92 #include <sys/pmc.h> 93 #endif 94 #include <sys/cpu.h> 95 #include <sys/resourcevar.h> 96 #include <sys/sched.h> 97 #include <sys/syscall_stats.h> 98 #include <sys/sleepq.h> 99 #include <sys/lockdebug.h> 100 #include <sys/evcnt.h> 101 #include <sys/intr.h> 102 #include <sys/lwpctl.h> 103 104 #include <uvm/uvm_extern.h> 105 106 callout_t sched_pstats_ch; 107 unsigned int sched_pstats_ticks; 108 109 kcondvar_t lbolt; /* once a second sleep address */ 110 111 static void sched_unsleep(struct lwp *); 112 static void sched_changepri(struct lwp *, pri_t); 113 static void sched_lendpri(struct lwp *, pri_t); 114 115 syncobj_t sleep_syncobj = { 116 SOBJ_SLEEPQ_SORTED, 117 sleepq_unsleep, 118 sleepq_changepri, 119 sleepq_lendpri, 120 syncobj_noowner, 121 }; 122 123 syncobj_t sched_syncobj = { 124 SOBJ_SLEEPQ_SORTED, 125 sched_unsleep, 126 sched_changepri, 127 sched_lendpri, 128 syncobj_noowner, 129 }; 130 131 /* 132 * During autoconfiguration or after a panic, a sleep will simply lower the 133 * priority briefly to allow interrupts, then return. The priority to be 134 * used (safepri) is machine-dependent, thus this value is initialized and 135 * maintained in the machine-dependent layers. This priority will typically 136 * be 0, or the lowest priority that is safe for use on the interrupt stack; 137 * it can be made higher to block network software interrupts after panics. 138 */ 139 int safepri; 140 141 /* 142 * OBSOLETE INTERFACE 143 * 144 * General sleep call. Suspends the current process until a wakeup is 145 * performed on the specified identifier. The process will then be made 146 * runnable with the specified priority. Sleeps at most timo/hz seconds (0 147 * means no timeout). If pri includes PCATCH flag, signals are checked 148 * before and after sleeping, else signals are not checked. Returns 0 if 149 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a 150 * signal needs to be delivered, ERESTART is returned if the current system 151 * call should be restarted if possible, and EINTR is returned if the system 152 * call should be interrupted by the signal (return EINTR). 153 * 154 * The interlock is held until we are on a sleep queue. The interlock will 155 * be locked before returning back to the caller unless the PNORELOCK flag 156 * is specified, in which case the interlock will always be unlocked upon 157 * return. 158 */ 159 int 160 ltsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo, 161 volatile struct simplelock *interlock) 162 { 163 struct lwp *l = curlwp; 164 sleepq_t *sq; 165 int error; 166 167 KASSERT((l->l_pflag & LP_INTR) == 0); 168 169 if (sleepq_dontsleep(l)) { 170 (void)sleepq_abort(NULL, 0); 171 if ((priority & PNORELOCK) != 0) 172 simple_unlock(interlock); 173 return 0; 174 } 175 176 l->l_kpriority = true; 177 sq = sleeptab_lookup(&sleeptab, ident); 178 sleepq_enter(sq, l); 179 sleepq_enqueue(sq, ident, wmesg, &sleep_syncobj); 180 181 if (interlock != NULL) { 182 KASSERT(simple_lock_held(interlock)); 183 simple_unlock(interlock); 184 } 185 186 error = sleepq_block(timo, priority & PCATCH); 187 188 if (interlock != NULL && (priority & PNORELOCK) == 0) 189 simple_lock(interlock); 190 191 return error; 192 } 193 194 int 195 mtsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo, 196 kmutex_t *mtx) 197 { 198 struct lwp *l = curlwp; 199 sleepq_t *sq; 200 int error; 201 202 KASSERT((l->l_pflag & LP_INTR) == 0); 203 204 if (sleepq_dontsleep(l)) { 205 (void)sleepq_abort(mtx, (priority & PNORELOCK) != 0); 206 return 0; 207 } 208 209 l->l_kpriority = true; 210 sq = sleeptab_lookup(&sleeptab, ident); 211 sleepq_enter(sq, l); 212 sleepq_enqueue(sq, ident, wmesg, &sleep_syncobj); 213 mutex_exit(mtx); 214 error = sleepq_block(timo, priority & PCATCH); 215 216 if ((priority & PNORELOCK) == 0) 217 mutex_enter(mtx); 218 219 return error; 220 } 221 222 /* 223 * General sleep call for situations where a wake-up is not expected. 224 */ 225 int 226 kpause(const char *wmesg, bool intr, int timo, kmutex_t *mtx) 227 { 228 struct lwp *l = curlwp; 229 sleepq_t *sq; 230 int error; 231 232 if (sleepq_dontsleep(l)) 233 return sleepq_abort(NULL, 0); 234 235 if (mtx != NULL) 236 mutex_exit(mtx); 237 l->l_kpriority = true; 238 sq = sleeptab_lookup(&sleeptab, l); 239 sleepq_enter(sq, l); 240 sleepq_enqueue(sq, l, wmesg, &sleep_syncobj); 241 error = sleepq_block(timo, intr); 242 if (mtx != NULL) 243 mutex_enter(mtx); 244 245 return error; 246 } 247 248 /* 249 * OBSOLETE INTERFACE 250 * 251 * Make all processes sleeping on the specified identifier runnable. 252 */ 253 void 254 wakeup(wchan_t ident) 255 { 256 sleepq_t *sq; 257 258 if (cold) 259 return; 260 261 sq = sleeptab_lookup(&sleeptab, ident); 262 sleepq_wake(sq, ident, (u_int)-1); 263 } 264 265 /* 266 * OBSOLETE INTERFACE 267 * 268 * Make the highest priority process first in line on the specified 269 * identifier runnable. 270 */ 271 void 272 wakeup_one(wchan_t ident) 273 { 274 sleepq_t *sq; 275 276 if (cold) 277 return; 278 279 sq = sleeptab_lookup(&sleeptab, ident); 280 sleepq_wake(sq, ident, 1); 281 } 282 283 284 /* 285 * General yield call. Puts the current process back on its run queue and 286 * performs a voluntary context switch. Should only be called when the 287 * current process explicitly requests it (eg sched_yield(2)). 288 */ 289 void 290 yield(void) 291 { 292 struct lwp *l = curlwp; 293 294 KERNEL_UNLOCK_ALL(l, &l->l_biglocks); 295 lwp_lock(l); 296 KASSERT(lwp_locked(l, &l->l_cpu->ci_schedstate.spc_lwplock)); 297 KASSERT(l->l_stat == LSONPROC); 298 l->l_kpriority = false; 299 if (l->l_class == SCHED_OTHER) { 300 /* 301 * Only for timeshared threads. It will be reset 302 * by the scheduler in due course. 303 */ 304 l->l_priority = 0; 305 } 306 (void)mi_switch(l); 307 KERNEL_LOCK(l->l_biglocks, l); 308 } 309 310 /* 311 * General preemption call. Puts the current process back on its run queue 312 * and performs an involuntary context switch. 313 */ 314 void 315 preempt(void) 316 { 317 struct lwp *l = curlwp; 318 319 KERNEL_UNLOCK_ALL(l, &l->l_biglocks); 320 lwp_lock(l); 321 KASSERT(lwp_locked(l, &l->l_cpu->ci_schedstate.spc_lwplock)); 322 KASSERT(l->l_stat == LSONPROC); 323 l->l_kpriority = false; 324 l->l_nivcsw++; 325 (void)mi_switch(l); 326 KERNEL_LOCK(l->l_biglocks, l); 327 } 328 329 /* 330 * Compute the amount of time during which the current lwp was running. 331 * 332 * - update l_rtime unless it's an idle lwp. 333 */ 334 335 void 336 updatertime(lwp_t *l, const struct timeval *tv) 337 { 338 long s, u; 339 340 if ((l->l_flag & LW_IDLE) != 0) 341 return; 342 343 u = l->l_rtime.tv_usec + (tv->tv_usec - l->l_stime.tv_usec); 344 s = l->l_rtime.tv_sec + (tv->tv_sec - l->l_stime.tv_sec); 345 if (u < 0) { 346 u += 1000000; 347 s--; 348 } else if (u >= 1000000) { 349 u -= 1000000; 350 s++; 351 } 352 l->l_rtime.tv_usec = u; 353 l->l_rtime.tv_sec = s; 354 } 355 356 /* 357 * The machine independent parts of context switch. 358 * 359 * Returns 1 if another LWP was actually run. 360 */ 361 int 362 mi_switch(lwp_t *l) 363 { 364 struct schedstate_percpu *spc; 365 struct lwp *newl; 366 int retval, oldspl; 367 struct cpu_info *ci; 368 struct timeval tv; 369 bool returning; 370 371 KASSERT(lwp_locked(l, NULL)); 372 LOCKDEBUG_BARRIER(l->l_mutex, 1); 373 374 #ifdef KSTACK_CHECK_MAGIC 375 kstack_check_magic(l); 376 #endif 377 378 microtime(&tv); 379 380 /* 381 * It's safe to read the per CPU schedstate unlocked here, as all we 382 * are after is the run time and that's guarenteed to have been last 383 * updated by this CPU. 384 */ 385 ci = l->l_cpu; 386 KDASSERT(ci == curcpu()); 387 388 /* 389 * Process is about to yield the CPU; clear the appropriate 390 * scheduling flags. 391 */ 392 spc = &ci->ci_schedstate; 393 returning = false; 394 newl = NULL; 395 396 /* 397 * If we have been asked to switch to a specific LWP, then there 398 * is no need to inspect the run queues. If a soft interrupt is 399 * blocking, then return to the interrupted thread without adjusting 400 * VM context or its start time: neither have been changed in order 401 * to take the interrupt. 402 */ 403 if (l->l_switchto != NULL) { 404 if ((l->l_pflag & LP_INTR) != 0) { 405 returning = true; 406 softint_block(l); 407 if ((l->l_flag & LW_TIMEINTR) != 0) 408 updatertime(l, &tv); 409 } 410 newl = l->l_switchto; 411 l->l_switchto = NULL; 412 } 413 #ifndef __HAVE_FAST_SOFTINTS 414 else if (ci->ci_data.cpu_softints != 0) { 415 /* There are pending soft interrupts, so pick one. */ 416 newl = softint_picklwp(); 417 newl->l_stat = LSONPROC; 418 newl->l_flag |= LW_RUNNING; 419 } 420 #endif /* !__HAVE_FAST_SOFTINTS */ 421 422 /* Count time spent in current system call */ 423 if (!returning) { 424 SYSCALL_TIME_SLEEP(l); 425 426 /* 427 * XXXSMP If we are using h/w performance counters, 428 * save context. 429 */ 430 #if PERFCTRS 431 if (PMC_ENABLED(l->l_proc)) { 432 pmc_save_context(l->l_proc); 433 } 434 #endif 435 updatertime(l, &tv); 436 } 437 438 /* 439 * If on the CPU and we have gotten this far, then we must yield. 440 */ 441 mutex_spin_enter(spc->spc_mutex); 442 KASSERT(l->l_stat != LSRUN); 443 if (l->l_stat == LSONPROC && l != newl) { 444 KASSERT(lwp_locked(l, &spc->spc_lwplock)); 445 if ((l->l_flag & LW_IDLE) == 0) { 446 l->l_stat = LSRUN; 447 lwp_setlock(l, spc->spc_mutex); 448 sched_enqueue(l, true); 449 } else 450 l->l_stat = LSIDL; 451 } 452 453 /* 454 * Let sched_nextlwp() select the LWP to run the CPU next. 455 * If no LWP is runnable, switch to the idle LWP. 456 * Note that spc_lwplock might not necessary be held. 457 */ 458 if (newl == NULL) { 459 newl = sched_nextlwp(); 460 if (newl != NULL) { 461 sched_dequeue(newl); 462 KASSERT(lwp_locked(newl, spc->spc_mutex)); 463 newl->l_stat = LSONPROC; 464 newl->l_cpu = ci; 465 newl->l_flag |= LW_RUNNING; 466 lwp_setlock(newl, &spc->spc_lwplock); 467 } else { 468 newl = ci->ci_data.cpu_idlelwp; 469 newl->l_stat = LSONPROC; 470 newl->l_flag |= LW_RUNNING; 471 } 472 /* 473 * Only clear want_resched if there are no 474 * pending (slow) software interrupts. 475 */ 476 ci->ci_want_resched = ci->ci_data.cpu_softints; 477 spc->spc_flags &= ~SPCF_SWITCHCLEAR; 478 spc->spc_curpriority = lwp_eprio(newl); 479 } 480 481 /* Items that must be updated with the CPU locked. */ 482 if (!returning) { 483 /* Update the new LWP's start time. */ 484 newl->l_stime = tv; 485 486 /* 487 * ci_curlwp changes when a fast soft interrupt occurs. 488 * We use cpu_onproc to keep track of which kernel or 489 * user thread is running 'underneath' the software 490 * interrupt. This is important for time accounting, 491 * itimers and forcing user threads to preempt (aston). 492 */ 493 ci->ci_data.cpu_onproc = newl; 494 } 495 496 if (l != newl) { 497 struct lwp *prevlwp; 498 499 /* 500 * If the old LWP has been moved to a run queue above, 501 * drop the general purpose LWP lock: it's now locked 502 * by the scheduler lock. 503 * 504 * Otherwise, drop the scheduler lock. We're done with 505 * the run queues for now. 506 */ 507 if (l->l_mutex == spc->spc_mutex) { 508 mutex_spin_exit(&spc->spc_lwplock); 509 } else { 510 mutex_spin_exit(spc->spc_mutex); 511 } 512 513 /* Unlocked, but for statistics only. */ 514 uvmexp.swtch++; 515 516 /* 517 * Save old VM context, unless a soft interrupt 518 * handler is blocking. 519 */ 520 if (!returning) 521 pmap_deactivate(l); 522 523 /* Update status for lwpctl, if present. */ 524 if (l->l_lwpctl != NULL) 525 l->l_lwpctl->lc_curcpu = LWPCTL_CPU_NONE; 526 527 /* Switch to the new LWP.. */ 528 l->l_ncsw++; 529 l->l_flag &= ~LW_RUNNING; 530 oldspl = MUTEX_SPIN_OLDSPL(ci); 531 prevlwp = cpu_switchto(l, newl, returning); 532 ci = curcpu(); 533 534 /* 535 * .. we have switched away and are now back so we must 536 * be the new curlwp. prevlwp is who we replaced. 537 */ 538 if (prevlwp != NULL) { 539 ci->ci_mtx_oldspl = oldspl; 540 lwp_unlock(prevlwp); 541 } else { 542 splx(oldspl); 543 } 544 545 /* Restore VM context. */ 546 pmap_activate(l); 547 retval = 1; 548 549 /* Update status for lwpctl, if present. */ 550 if (l->l_lwpctl != NULL) 551 l->l_lwpctl->lc_curcpu = (short)ci->ci_data.cpu_index; 552 } else { 553 /* Nothing to do - just unlock and return. */ 554 mutex_spin_exit(spc->spc_mutex); 555 lwp_unlock(l); 556 retval = 0; 557 } 558 559 KASSERT(l == curlwp); 560 KASSERT(l->l_stat == LSONPROC); 561 KASSERT(l->l_cpu == ci); 562 563 /* 564 * XXXSMP If we are using h/w performance counters, restore context. 565 */ 566 #if PERFCTRS 567 if (PMC_ENABLED(l->l_proc)) { 568 pmc_restore_context(l->l_proc); 569 } 570 #endif 571 572 /* 573 * We're running again; record our new start time. We might 574 * be running on a new CPU now, so don't use the cached 575 * schedstate_percpu pointer. 576 */ 577 SYSCALL_TIME_WAKEUP(l); 578 KASSERT(curlwp == l); 579 KDASSERT(l->l_cpu == ci); 580 LOCKDEBUG_BARRIER(NULL, 1); 581 582 return retval; 583 } 584 585 /* 586 * Change process state to be runnable, placing it on the run queue if it is 587 * in memory, and awakening the swapper if it isn't in memory. 588 * 589 * Call with the process and LWP locked. Will return with the LWP unlocked. 590 */ 591 void 592 setrunnable(struct lwp *l) 593 { 594 struct proc *p = l->l_proc; 595 struct cpu_info *ci; 596 sigset_t *ss; 597 598 KASSERT((l->l_flag & LW_IDLE) == 0); 599 KASSERT(mutex_owned(&p->p_smutex)); 600 KASSERT(lwp_locked(l, NULL)); 601 KASSERT(l->l_mutex != l->l_cpu->ci_schedstate.spc_mutex); 602 603 switch (l->l_stat) { 604 case LSSTOP: 605 /* 606 * If we're being traced (possibly because someone attached us 607 * while we were stopped), check for a signal from the debugger. 608 */ 609 if ((p->p_slflag & PSL_TRACED) != 0 && p->p_xstat != 0) { 610 if ((sigprop[p->p_xstat] & SA_TOLWP) != 0) 611 ss = &l->l_sigpend.sp_set; 612 else 613 ss = &p->p_sigpend.sp_set; 614 sigaddset(ss, p->p_xstat); 615 signotify(l); 616 } 617 p->p_nrlwps++; 618 break; 619 case LSSUSPENDED: 620 l->l_flag &= ~LW_WSUSPEND; 621 p->p_nrlwps++; 622 cv_broadcast(&p->p_lwpcv); 623 break; 624 case LSSLEEP: 625 KASSERT(l->l_wchan != NULL); 626 break; 627 default: 628 panic("setrunnable: lwp %p state was %d", l, l->l_stat); 629 } 630 631 /* 632 * If the LWP was sleeping interruptably, then it's OK to start it 633 * again. If not, mark it as still sleeping. 634 */ 635 if (l->l_wchan != NULL) { 636 l->l_stat = LSSLEEP; 637 /* lwp_unsleep() will release the lock. */ 638 lwp_unsleep(l); 639 return; 640 } 641 642 /* 643 * If the LWP is still on the CPU, mark it as LSONPROC. It may be 644 * about to call mi_switch(), in which case it will yield. 645 */ 646 if ((l->l_flag & LW_RUNNING) != 0) { 647 l->l_stat = LSONPROC; 648 l->l_slptime = 0; 649 lwp_unlock(l); 650 return; 651 } 652 653 /* 654 * Look for a CPU to run. 655 * Set the LWP runnable. 656 */ 657 ci = sched_takecpu(l); 658 l->l_cpu = ci; 659 if (l->l_mutex != l->l_cpu->ci_schedstate.spc_mutex) { 660 lwp_unlock_to(l, ci->ci_schedstate.spc_mutex); 661 lwp_lock(l); 662 } 663 sched_setrunnable(l); 664 l->l_stat = LSRUN; 665 l->l_slptime = 0; 666 667 /* 668 * If thread is swapped out - wake the swapper to bring it back in. 669 * Otherwise, enter it into a run queue. 670 */ 671 if (l->l_flag & LW_INMEM) { 672 sched_enqueue(l, false); 673 resched_cpu(l); 674 lwp_unlock(l); 675 } else { 676 lwp_unlock(l); 677 uvm_kick_scheduler(); 678 } 679 } 680 681 /* 682 * suspendsched: 683 * 684 * Convert all non-L_SYSTEM LSSLEEP or LSRUN LWPs to LSSUSPENDED. 685 */ 686 void 687 suspendsched(void) 688 { 689 CPU_INFO_ITERATOR cii; 690 struct cpu_info *ci; 691 struct lwp *l; 692 struct proc *p; 693 694 /* 695 * We do this by process in order not to violate the locking rules. 696 */ 697 mutex_enter(&proclist_lock); 698 PROCLIST_FOREACH(p, &allproc) { 699 mutex_enter(&p->p_smutex); 700 701 if ((p->p_flag & PK_SYSTEM) != 0) { 702 mutex_exit(&p->p_smutex); 703 continue; 704 } 705 706 p->p_stat = SSTOP; 707 708 LIST_FOREACH(l, &p->p_lwps, l_sibling) { 709 if (l == curlwp) 710 continue; 711 712 lwp_lock(l); 713 714 /* 715 * Set L_WREBOOT so that the LWP will suspend itself 716 * when it tries to return to user mode. We want to 717 * try and get to get as many LWPs as possible to 718 * the user / kernel boundary, so that they will 719 * release any locks that they hold. 720 */ 721 l->l_flag |= (LW_WREBOOT | LW_WSUSPEND); 722 723 if (l->l_stat == LSSLEEP && 724 (l->l_flag & LW_SINTR) != 0) { 725 /* setrunnable() will release the lock. */ 726 setrunnable(l); 727 continue; 728 } 729 730 lwp_unlock(l); 731 } 732 733 mutex_exit(&p->p_smutex); 734 } 735 mutex_exit(&proclist_lock); 736 737 /* 738 * Kick all CPUs to make them preempt any LWPs running in user mode. 739 * They'll trap into the kernel and suspend themselves in userret(). 740 */ 741 for (CPU_INFO_FOREACH(cii, ci)) { 742 spc_lock(ci); 743 cpu_need_resched(ci, RESCHED_IMMED); 744 spc_unlock(ci); 745 } 746 } 747 748 /* 749 * sched_kpri: 750 * 751 * Scale a priority level to a kernel priority level, usually 752 * for an LWP that is about to sleep. 753 */ 754 pri_t 755 sched_kpri(struct lwp *l) 756 { 757 pri_t pri; 758 759 #ifndef __HAVE_FAST_SOFTINTS 760 /* 761 * Hack: if a user thread is being used to run a soft 762 * interrupt, we need to boost the priority here. 763 */ 764 if ((l->l_pflag & LP_INTR) != 0 && l->l_priority < PRI_KERNEL_RT) 765 return softint_kpri(l); 766 #endif 767 768 /* 769 * Scale user priorities (0 -> 63) up to kernel priorities 770 * in the range (64 -> 95). This makes assumptions about 771 * the priority space and so should be kept in sync with 772 * param.h. 773 */ 774 if ((pri = l->l_priority) >= PRI_KERNEL) 775 return pri; 776 return (pri >> 1) + PRI_KERNEL; 777 } 778 779 /* 780 * sched_unsleep: 781 * 782 * The is called when the LWP has not been awoken normally but instead 783 * interrupted: for example, if the sleep timed out. Because of this, 784 * it's not a valid action for running or idle LWPs. 785 */ 786 static void 787 sched_unsleep(struct lwp *l) 788 { 789 790 lwp_unlock(l); 791 panic("sched_unsleep"); 792 } 793 794 void 795 resched_cpu(struct lwp *l) 796 { 797 struct cpu_info *ci; 798 799 /* 800 * XXXSMP 801 * Since l->l_cpu persists across a context switch, 802 * this gives us *very weak* processor affinity, in 803 * that we notify the CPU on which the process last 804 * ran that it should try to switch. 805 * 806 * This does not guarantee that the process will run on 807 * that processor next, because another processor might 808 * grab it the next time it performs a context switch. 809 * 810 * This also does not handle the case where its last 811 * CPU is running a higher-priority process, but every 812 * other CPU is running a lower-priority process. There 813 * are ways to handle this situation, but they're not 814 * currently very pretty, and we also need to weigh the 815 * cost of moving a process from one CPU to another. 816 */ 817 ci = l->l_cpu; 818 if (lwp_eprio(l) > ci->ci_schedstate.spc_curpriority) 819 cpu_need_resched(ci, 0); 820 } 821 822 static void 823 sched_changepri(struct lwp *l, pri_t pri) 824 { 825 826 KASSERT(lwp_locked(l, NULL)); 827 828 if (l->l_stat == LSRUN && (l->l_flag & LW_INMEM) != 0) { 829 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex)); 830 sched_dequeue(l); 831 l->l_priority = pri; 832 sched_enqueue(l, false); 833 } else { 834 l->l_priority = pri; 835 } 836 resched_cpu(l); 837 } 838 839 static void 840 sched_lendpri(struct lwp *l, pri_t pri) 841 { 842 843 KASSERT(lwp_locked(l, NULL)); 844 845 if (l->l_stat == LSRUN && (l->l_flag & LW_INMEM) != 0) { 846 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex)); 847 sched_dequeue(l); 848 l->l_inheritedprio = pri; 849 sched_enqueue(l, false); 850 } else { 851 l->l_inheritedprio = pri; 852 } 853 resched_cpu(l); 854 } 855 856 struct lwp * 857 syncobj_noowner(wchan_t wchan) 858 { 859 860 return NULL; 861 } 862 863 864 /* decay 95% of `p_pctcpu' in 60 seconds; see CCPU_SHIFT before changing */ 865 fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */ 866 867 /* 868 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the 869 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below 870 * and possibly adjust FSHIFT in "param.h" so that (FSHIFT >= CCPU_SHIFT). 871 * 872 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used: 873 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits). 874 * 875 * If you dont want to bother with the faster/more-accurate formula, you 876 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate 877 * (more general) method of calculating the %age of CPU used by a process. 878 */ 879 #define CCPU_SHIFT (FSHIFT + 1) 880 881 /* 882 * sched_pstats: 883 * 884 * Update process statistics and check CPU resource allocation. 885 * Call scheduler-specific hook to eventually adjust process/LWP 886 * priorities. 887 */ 888 /* ARGSUSED */ 889 void 890 sched_pstats(void *arg) 891 { 892 struct rlimit *rlim; 893 struct lwp *l; 894 struct proc *p; 895 int sig, clkhz; 896 long runtm; 897 898 sched_pstats_ticks++; 899 900 mutex_enter(&proclist_mutex); 901 PROCLIST_FOREACH(p, &allproc) { 902 /* 903 * Increment time in/out of memory and sleep time (if 904 * sleeping). We ignore overflow; with 16-bit int's 905 * (remember them?) overflow takes 45 days. 906 */ 907 mutex_enter(&p->p_smutex); 908 mutex_spin_enter(&p->p_stmutex); 909 runtm = p->p_rtime.tv_sec; 910 LIST_FOREACH(l, &p->p_lwps, l_sibling) { 911 if ((l->l_flag & LW_IDLE) != 0) 912 continue; 913 lwp_lock(l); 914 runtm += l->l_rtime.tv_sec; 915 l->l_swtime++; 916 sched_pstats_hook(l); 917 lwp_unlock(l); 918 919 /* 920 * p_pctcpu is only for ps. 921 */ 922 l->l_pctcpu = (l->l_pctcpu * ccpu) >> FSHIFT; 923 if (l->l_slptime < 1) { 924 clkhz = stathz != 0 ? stathz : hz; 925 #if (FSHIFT >= CCPU_SHIFT) 926 l->l_pctcpu += (clkhz == 100) ? 927 ((fixpt_t)l->l_cpticks) << 928 (FSHIFT - CCPU_SHIFT) : 929 100 * (((fixpt_t) p->p_cpticks) 930 << (FSHIFT - CCPU_SHIFT)) / clkhz; 931 #else 932 l->l_pctcpu += ((FSCALE - ccpu) * 933 (l->l_cpticks * FSCALE / clkhz)) >> FSHIFT; 934 #endif 935 l->l_cpticks = 0; 936 } 937 } 938 p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT; 939 mutex_spin_exit(&p->p_stmutex); 940 941 /* 942 * Check if the process exceeds its CPU resource allocation. 943 * If over max, kill it. 944 */ 945 rlim = &p->p_rlimit[RLIMIT_CPU]; 946 sig = 0; 947 if (runtm >= rlim->rlim_cur) { 948 if (runtm >= rlim->rlim_max) 949 sig = SIGKILL; 950 else { 951 sig = SIGXCPU; 952 if (rlim->rlim_cur < rlim->rlim_max) 953 rlim->rlim_cur += 5; 954 } 955 } 956 mutex_exit(&p->p_smutex); 957 if (sig) { 958 psignal(p, sig); 959 } 960 } 961 mutex_exit(&proclist_mutex); 962 uvm_meter(); 963 cv_wakeup(&lbolt); 964 callout_schedule(&sched_pstats_ch, hz); 965 } 966 967 void 968 sched_init(void) 969 { 970 971 callout_init(&sched_pstats_ch, 0); 972 callout_setfunc(&sched_pstats_ch, sched_pstats, NULL); 973 sched_setup(); 974 sched_pstats(NULL); 975 } 976