1 /* $NetBSD: kern_synch.c,v 1.285 2010/12/18 01:13:36 rmind Exp $ */ 2 3 /*- 4 * Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008, 2009 5 * The NetBSD Foundation, Inc. 6 * All rights reserved. 7 * 8 * This code is derived from software contributed to The NetBSD Foundation 9 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 10 * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran and 11 * Daniel Sieger. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 /*- 36 * Copyright (c) 1982, 1986, 1990, 1991, 1993 37 * The Regents of the University of California. All rights reserved. 38 * (c) UNIX System Laboratories, Inc. 39 * All or some portions of this file are derived from material licensed 40 * to the University of California by American Telephone and Telegraph 41 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 42 * the permission of UNIX System Laboratories, Inc. 43 * 44 * Redistribution and use in source and binary forms, with or without 45 * modification, are permitted provided that the following conditions 46 * are met: 47 * 1. Redistributions of source code must retain the above copyright 48 * notice, this list of conditions and the following disclaimer. 49 * 2. Redistributions in binary form must reproduce the above copyright 50 * notice, this list of conditions and the following disclaimer in the 51 * documentation and/or other materials provided with the distribution. 52 * 3. Neither the name of the University nor the names of its contributors 53 * may be used to endorse or promote products derived from this software 54 * without specific prior written permission. 55 * 56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 66 * SUCH DAMAGE. 67 * 68 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95 69 */ 70 71 #include <sys/cdefs.h> 72 __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.285 2010/12/18 01:13:36 rmind Exp $"); 73 74 #include "opt_kstack.h" 75 #include "opt_perfctrs.h" 76 #include "opt_sa.h" 77 #include "opt_dtrace.h" 78 79 #define __MUTEX_PRIVATE 80 81 #include <sys/param.h> 82 #include <sys/systm.h> 83 #include <sys/proc.h> 84 #include <sys/kernel.h> 85 #if defined(PERFCTRS) 86 #include <sys/pmc.h> 87 #endif 88 #include <sys/cpu.h> 89 #include <sys/resourcevar.h> 90 #include <sys/sched.h> 91 #include <sys/sa.h> 92 #include <sys/savar.h> 93 #include <sys/syscall_stats.h> 94 #include <sys/sleepq.h> 95 #include <sys/lockdebug.h> 96 #include <sys/evcnt.h> 97 #include <sys/intr.h> 98 #include <sys/lwpctl.h> 99 #include <sys/atomic.h> 100 #include <sys/simplelock.h> 101 102 #include <uvm/uvm_extern.h> 103 104 #include <dev/lockstat.h> 105 106 #include <sys/dtrace_bsd.h> 107 int dtrace_vtime_active=0; 108 dtrace_vtime_switch_func_t dtrace_vtime_switch_func; 109 110 static void sched_unsleep(struct lwp *, bool); 111 static void sched_changepri(struct lwp *, pri_t); 112 static void sched_lendpri(struct lwp *, pri_t); 113 static void resched_cpu(struct lwp *); 114 115 syncobj_t sleep_syncobj = { 116 SOBJ_SLEEPQ_SORTED, 117 sleepq_unsleep, 118 sleepq_changepri, 119 sleepq_lendpri, 120 syncobj_noowner, 121 }; 122 123 syncobj_t sched_syncobj = { 124 SOBJ_SLEEPQ_SORTED, 125 sched_unsleep, 126 sched_changepri, 127 sched_lendpri, 128 syncobj_noowner, 129 }; 130 131 unsigned sched_pstats_ticks; 132 kcondvar_t lbolt; /* once a second sleep address */ 133 134 /* Preemption event counters */ 135 static struct evcnt kpreempt_ev_crit; 136 static struct evcnt kpreempt_ev_klock; 137 static struct evcnt kpreempt_ev_immed; 138 139 /* 140 * During autoconfiguration or after a panic, a sleep will simply lower the 141 * priority briefly to allow interrupts, then return. The priority to be 142 * used (safepri) is machine-dependent, thus this value is initialized and 143 * maintained in the machine-dependent layers. This priority will typically 144 * be 0, or the lowest priority that is safe for use on the interrupt stack; 145 * it can be made higher to block network software interrupts after panics. 146 */ 147 int safepri; 148 149 void 150 synch_init(void) 151 { 152 153 cv_init(&lbolt, "lbolt"); 154 155 evcnt_attach_dynamic(&kpreempt_ev_crit, EVCNT_TYPE_MISC, NULL, 156 "kpreempt", "defer: critical section"); 157 evcnt_attach_dynamic(&kpreempt_ev_klock, EVCNT_TYPE_MISC, NULL, 158 "kpreempt", "defer: kernel_lock"); 159 evcnt_attach_dynamic(&kpreempt_ev_immed, EVCNT_TYPE_MISC, NULL, 160 "kpreempt", "immediate"); 161 } 162 163 /* 164 * OBSOLETE INTERFACE 165 * 166 * General sleep call. Suspends the current LWP until a wakeup is 167 * performed on the specified identifier. The LWP will then be made 168 * runnable with the specified priority. Sleeps at most timo/hz seconds (0 169 * means no timeout). If pri includes PCATCH flag, signals are checked 170 * before and after sleeping, else signals are not checked. Returns 0 if 171 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a 172 * signal needs to be delivered, ERESTART is returned if the current system 173 * call should be restarted if possible, and EINTR is returned if the system 174 * call should be interrupted by the signal (return EINTR). 175 * 176 * The interlock is held until we are on a sleep queue. The interlock will 177 * be locked before returning back to the caller unless the PNORELOCK flag 178 * is specified, in which case the interlock will always be unlocked upon 179 * return. 180 */ 181 int 182 ltsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo, 183 volatile struct simplelock *interlock) 184 { 185 struct lwp *l = curlwp; 186 sleepq_t *sq; 187 kmutex_t *mp; 188 int error; 189 190 KASSERT((l->l_pflag & LP_INTR) == 0); 191 KASSERT(ident != &lbolt); 192 193 if (sleepq_dontsleep(l)) { 194 (void)sleepq_abort(NULL, 0); 195 if ((priority & PNORELOCK) != 0) 196 simple_unlock(interlock); 197 return 0; 198 } 199 200 l->l_kpriority = true; 201 sq = sleeptab_lookup(&sleeptab, ident, &mp); 202 sleepq_enter(sq, l, mp); 203 sleepq_enqueue(sq, ident, wmesg, &sleep_syncobj); 204 205 if (interlock != NULL) { 206 KASSERT(simple_lock_held(interlock)); 207 simple_unlock(interlock); 208 } 209 210 error = sleepq_block(timo, priority & PCATCH); 211 212 if (interlock != NULL && (priority & PNORELOCK) == 0) 213 simple_lock(interlock); 214 215 return error; 216 } 217 218 int 219 mtsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo, 220 kmutex_t *mtx) 221 { 222 struct lwp *l = curlwp; 223 sleepq_t *sq; 224 kmutex_t *mp; 225 int error; 226 227 KASSERT((l->l_pflag & LP_INTR) == 0); 228 KASSERT(ident != &lbolt); 229 230 if (sleepq_dontsleep(l)) { 231 (void)sleepq_abort(mtx, (priority & PNORELOCK) != 0); 232 return 0; 233 } 234 235 l->l_kpriority = true; 236 sq = sleeptab_lookup(&sleeptab, ident, &mp); 237 sleepq_enter(sq, l, mp); 238 sleepq_enqueue(sq, ident, wmesg, &sleep_syncobj); 239 mutex_exit(mtx); 240 error = sleepq_block(timo, priority & PCATCH); 241 242 if ((priority & PNORELOCK) == 0) 243 mutex_enter(mtx); 244 245 return error; 246 } 247 248 /* 249 * General sleep call for situations where a wake-up is not expected. 250 */ 251 int 252 kpause(const char *wmesg, bool intr, int timo, kmutex_t *mtx) 253 { 254 struct lwp *l = curlwp; 255 kmutex_t *mp; 256 sleepq_t *sq; 257 int error; 258 259 KASSERT(!(timo == 0 && intr == false)); 260 261 if (sleepq_dontsleep(l)) 262 return sleepq_abort(NULL, 0); 263 264 if (mtx != NULL) 265 mutex_exit(mtx); 266 l->l_kpriority = true; 267 sq = sleeptab_lookup(&sleeptab, l, &mp); 268 sleepq_enter(sq, l, mp); 269 sleepq_enqueue(sq, l, wmesg, &sleep_syncobj); 270 error = sleepq_block(timo, intr); 271 if (mtx != NULL) 272 mutex_enter(mtx); 273 274 return error; 275 } 276 277 #ifdef KERN_SA 278 /* 279 * sa_awaken: 280 * 281 * We believe this lwp is an SA lwp. If it's yielding, 282 * let it know it needs to wake up. 283 * 284 * We are called and exit with the lwp locked. We are 285 * called in the middle of wakeup operations, so we need 286 * to not touch the locks at all. 287 */ 288 void 289 sa_awaken(struct lwp *l) 290 { 291 /* LOCK_ASSERT(lwp_locked(l, NULL)); */ 292 293 if (l == l->l_savp->savp_lwp && l->l_flag & LW_SA_YIELD) 294 l->l_flag &= ~LW_SA_IDLE; 295 } 296 #endif /* KERN_SA */ 297 298 /* 299 * OBSOLETE INTERFACE 300 * 301 * Make all LWPs sleeping on the specified identifier runnable. 302 */ 303 void 304 wakeup(wchan_t ident) 305 { 306 sleepq_t *sq; 307 kmutex_t *mp; 308 309 if (__predict_false(cold)) 310 return; 311 312 sq = sleeptab_lookup(&sleeptab, ident, &mp); 313 sleepq_wake(sq, ident, (u_int)-1, mp); 314 } 315 316 /* 317 * OBSOLETE INTERFACE 318 * 319 * Make the highest priority LWP first in line on the specified 320 * identifier runnable. 321 */ 322 void 323 wakeup_one(wchan_t ident) 324 { 325 sleepq_t *sq; 326 kmutex_t *mp; 327 328 if (__predict_false(cold)) 329 return; 330 331 sq = sleeptab_lookup(&sleeptab, ident, &mp); 332 sleepq_wake(sq, ident, 1, mp); 333 } 334 335 336 /* 337 * General yield call. Puts the current LWP back on its run queue and 338 * performs a voluntary context switch. Should only be called when the 339 * current LWP explicitly requests it (eg sched_yield(2)). 340 */ 341 void 342 yield(void) 343 { 344 struct lwp *l = curlwp; 345 346 KERNEL_UNLOCK_ALL(l, &l->l_biglocks); 347 lwp_lock(l); 348 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock)); 349 KASSERT(l->l_stat == LSONPROC); 350 l->l_kpriority = false; 351 (void)mi_switch(l); 352 KERNEL_LOCK(l->l_biglocks, l); 353 } 354 355 /* 356 * General preemption call. Puts the current LWP back on its run queue 357 * and performs an involuntary context switch. 358 */ 359 void 360 preempt(void) 361 { 362 struct lwp *l = curlwp; 363 364 KERNEL_UNLOCK_ALL(l, &l->l_biglocks); 365 lwp_lock(l); 366 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock)); 367 KASSERT(l->l_stat == LSONPROC); 368 l->l_kpriority = false; 369 l->l_nivcsw++; 370 (void)mi_switch(l); 371 KERNEL_LOCK(l->l_biglocks, l); 372 } 373 374 /* 375 * Handle a request made by another agent to preempt the current LWP 376 * in-kernel. Usually called when l_dopreempt may be non-zero. 377 * 378 * Character addresses for lockstat only. 379 */ 380 static char in_critical_section; 381 static char kernel_lock_held; 382 static char is_softint; 383 static char cpu_kpreempt_enter_fail; 384 385 bool 386 kpreempt(uintptr_t where) 387 { 388 uintptr_t failed; 389 lwp_t *l; 390 int s, dop, lsflag; 391 392 l = curlwp; 393 failed = 0; 394 while ((dop = l->l_dopreempt) != 0) { 395 if (l->l_stat != LSONPROC) { 396 /* 397 * About to block (or die), let it happen. 398 * Doesn't really count as "preemption has 399 * been blocked", since we're going to 400 * context switch. 401 */ 402 l->l_dopreempt = 0; 403 return true; 404 } 405 if (__predict_false((l->l_flag & LW_IDLE) != 0)) { 406 /* Can't preempt idle loop, don't count as failure. */ 407 l->l_dopreempt = 0; 408 return true; 409 } 410 if (__predict_false(l->l_nopreempt != 0)) { 411 /* LWP holds preemption disabled, explicitly. */ 412 if ((dop & DOPREEMPT_COUNTED) == 0) { 413 kpreempt_ev_crit.ev_count++; 414 } 415 failed = (uintptr_t)&in_critical_section; 416 break; 417 } 418 if (__predict_false((l->l_pflag & LP_INTR) != 0)) { 419 /* Can't preempt soft interrupts yet. */ 420 l->l_dopreempt = 0; 421 failed = (uintptr_t)&is_softint; 422 break; 423 } 424 s = splsched(); 425 if (__predict_false(l->l_blcnt != 0 || 426 curcpu()->ci_biglock_wanted != NULL)) { 427 /* Hold or want kernel_lock, code is not MT safe. */ 428 splx(s); 429 if ((dop & DOPREEMPT_COUNTED) == 0) { 430 kpreempt_ev_klock.ev_count++; 431 } 432 failed = (uintptr_t)&kernel_lock_held; 433 break; 434 } 435 if (__predict_false(!cpu_kpreempt_enter(where, s))) { 436 /* 437 * It may be that the IPL is too high. 438 * kpreempt_enter() can schedule an 439 * interrupt to retry later. 440 */ 441 splx(s); 442 failed = (uintptr_t)&cpu_kpreempt_enter_fail; 443 break; 444 } 445 /* Do it! */ 446 if (__predict_true((dop & DOPREEMPT_COUNTED) == 0)) { 447 kpreempt_ev_immed.ev_count++; 448 } 449 lwp_lock(l); 450 mi_switch(l); 451 l->l_nopreempt++; 452 splx(s); 453 454 /* Take care of any MD cleanup. */ 455 cpu_kpreempt_exit(where); 456 l->l_nopreempt--; 457 } 458 459 if (__predict_true(!failed)) { 460 return false; 461 } 462 463 /* Record preemption failure for reporting via lockstat. */ 464 atomic_or_uint(&l->l_dopreempt, DOPREEMPT_COUNTED); 465 lsflag = 0; 466 LOCKSTAT_ENTER(lsflag); 467 if (__predict_false(lsflag)) { 468 if (where == 0) { 469 where = (uintptr_t)__builtin_return_address(0); 470 } 471 /* Preemption is on, might recurse, so make it atomic. */ 472 if (atomic_cas_ptr_ni((void *)&l->l_pfailaddr, NULL, 473 (void *)where) == NULL) { 474 LOCKSTAT_START_TIMER(lsflag, l->l_pfailtime); 475 l->l_pfaillock = failed; 476 } 477 } 478 LOCKSTAT_EXIT(lsflag); 479 return true; 480 } 481 482 /* 483 * Return true if preemption is explicitly disabled. 484 */ 485 bool 486 kpreempt_disabled(void) 487 { 488 const lwp_t *l = curlwp; 489 490 return l->l_nopreempt != 0 || l->l_stat == LSZOMB || 491 (l->l_flag & LW_IDLE) != 0 || cpu_kpreempt_disabled(); 492 } 493 494 /* 495 * Disable kernel preemption. 496 */ 497 void 498 kpreempt_disable(void) 499 { 500 501 KPREEMPT_DISABLE(curlwp); 502 } 503 504 /* 505 * Reenable kernel preemption. 506 */ 507 void 508 kpreempt_enable(void) 509 { 510 511 KPREEMPT_ENABLE(curlwp); 512 } 513 514 /* 515 * Compute the amount of time during which the current lwp was running. 516 * 517 * - update l_rtime unless it's an idle lwp. 518 */ 519 520 void 521 updatertime(lwp_t *l, const struct bintime *now) 522 { 523 524 if (__predict_false(l->l_flag & LW_IDLE)) 525 return; 526 527 /* rtime += now - stime */ 528 bintime_add(&l->l_rtime, now); 529 bintime_sub(&l->l_rtime, &l->l_stime); 530 } 531 532 /* 533 * Select next LWP from the current CPU to run.. 534 */ 535 static inline lwp_t * 536 nextlwp(struct cpu_info *ci, struct schedstate_percpu *spc) 537 { 538 lwp_t *newl; 539 540 /* 541 * Let sched_nextlwp() select the LWP to run the CPU next. 542 * If no LWP is runnable, select the idle LWP. 543 * 544 * Note that spc_lwplock might not necessary be held, and 545 * new thread would be unlocked after setting the LWP-lock. 546 */ 547 newl = sched_nextlwp(); 548 if (newl != NULL) { 549 sched_dequeue(newl); 550 KASSERT(lwp_locked(newl, spc->spc_mutex)); 551 KASSERT(newl->l_cpu == ci); 552 newl->l_stat = LSONPROC; 553 newl->l_pflag |= LP_RUNNING; 554 lwp_setlock(newl, spc->spc_lwplock); 555 } else { 556 newl = ci->ci_data.cpu_idlelwp; 557 newl->l_stat = LSONPROC; 558 newl->l_pflag |= LP_RUNNING; 559 } 560 561 /* 562 * Only clear want_resched if there are no pending (slow) 563 * software interrupts. 564 */ 565 ci->ci_want_resched = ci->ci_data.cpu_softints; 566 spc->spc_flags &= ~SPCF_SWITCHCLEAR; 567 spc->spc_curpriority = lwp_eprio(newl); 568 569 return newl; 570 } 571 572 /* 573 * The machine independent parts of context switch. 574 * 575 * Returns 1 if another LWP was actually run. 576 */ 577 int 578 mi_switch(lwp_t *l) 579 { 580 struct cpu_info *ci; 581 struct schedstate_percpu *spc; 582 struct lwp *newl; 583 int retval, oldspl; 584 struct bintime bt; 585 bool returning; 586 587 KASSERT(lwp_locked(l, NULL)); 588 KASSERT(kpreempt_disabled()); 589 LOCKDEBUG_BARRIER(l->l_mutex, 1); 590 591 kstack_check_magic(l); 592 593 binuptime(&bt); 594 595 KASSERT((l->l_pflag & LP_RUNNING) != 0); 596 KASSERT(l->l_cpu == curcpu()); 597 ci = l->l_cpu; 598 spc = &ci->ci_schedstate; 599 returning = false; 600 newl = NULL; 601 602 /* 603 * If we have been asked to switch to a specific LWP, then there 604 * is no need to inspect the run queues. If a soft interrupt is 605 * blocking, then return to the interrupted thread without adjusting 606 * VM context or its start time: neither have been changed in order 607 * to take the interrupt. 608 */ 609 if (l->l_switchto != NULL) { 610 if ((l->l_pflag & LP_INTR) != 0) { 611 returning = true; 612 softint_block(l); 613 if ((l->l_pflag & LP_TIMEINTR) != 0) 614 updatertime(l, &bt); 615 } 616 newl = l->l_switchto; 617 l->l_switchto = NULL; 618 } 619 #ifndef __HAVE_FAST_SOFTINTS 620 else if (ci->ci_data.cpu_softints != 0) { 621 /* There are pending soft interrupts, so pick one. */ 622 newl = softint_picklwp(); 623 newl->l_stat = LSONPROC; 624 newl->l_pflag |= LP_RUNNING; 625 } 626 #endif /* !__HAVE_FAST_SOFTINTS */ 627 628 /* Count time spent in current system call */ 629 if (!returning) { 630 SYSCALL_TIME_SLEEP(l); 631 632 /* 633 * XXXSMP If we are using h/w performance counters, 634 * save context. 635 */ 636 #if PERFCTRS 637 if (PMC_ENABLED(l->l_proc)) { 638 pmc_save_context(l->l_proc); 639 } 640 #endif 641 updatertime(l, &bt); 642 } 643 644 /* Lock the runqueue */ 645 KASSERT(l->l_stat != LSRUN); 646 mutex_spin_enter(spc->spc_mutex); 647 648 /* 649 * If on the CPU and we have gotten this far, then we must yield. 650 */ 651 if (l->l_stat == LSONPROC && l != newl) { 652 KASSERT(lwp_locked(l, spc->spc_lwplock)); 653 if ((l->l_flag & LW_IDLE) == 0) { 654 l->l_stat = LSRUN; 655 lwp_setlock(l, spc->spc_mutex); 656 sched_enqueue(l, true); 657 /* 658 * Handle migration. Note that "migrating LWP" may 659 * be reset here, if interrupt/preemption happens 660 * early in idle LWP. 661 */ 662 if (l->l_target_cpu != NULL) { 663 KASSERT((l->l_pflag & LP_INTR) == 0); 664 spc->spc_migrating = l; 665 } 666 } else 667 l->l_stat = LSIDL; 668 } 669 670 /* Pick new LWP to run. */ 671 if (newl == NULL) { 672 newl = nextlwp(ci, spc); 673 } 674 675 /* Items that must be updated with the CPU locked. */ 676 if (!returning) { 677 /* Update the new LWP's start time. */ 678 newl->l_stime = bt; 679 680 /* 681 * ci_curlwp changes when a fast soft interrupt occurs. 682 * We use cpu_onproc to keep track of which kernel or 683 * user thread is running 'underneath' the software 684 * interrupt. This is important for time accounting, 685 * itimers and forcing user threads to preempt (aston). 686 */ 687 ci->ci_data.cpu_onproc = newl; 688 } 689 690 /* 691 * Preemption related tasks. Must be done with the current 692 * CPU locked. 693 */ 694 cpu_did_resched(l); 695 l->l_dopreempt = 0; 696 if (__predict_false(l->l_pfailaddr != 0)) { 697 LOCKSTAT_FLAG(lsflag); 698 LOCKSTAT_ENTER(lsflag); 699 LOCKSTAT_STOP_TIMER(lsflag, l->l_pfailtime); 700 LOCKSTAT_EVENT_RA(lsflag, l->l_pfaillock, LB_NOPREEMPT|LB_SPIN, 701 1, l->l_pfailtime, l->l_pfailaddr); 702 LOCKSTAT_EXIT(lsflag); 703 l->l_pfailtime = 0; 704 l->l_pfaillock = 0; 705 l->l_pfailaddr = 0; 706 } 707 708 if (l != newl) { 709 struct lwp *prevlwp; 710 711 /* Release all locks, but leave the current LWP locked */ 712 if (l->l_mutex == spc->spc_mutex) { 713 /* 714 * Drop spc_lwplock, if the current LWP has been moved 715 * to the run queue (it is now locked by spc_mutex). 716 */ 717 mutex_spin_exit(spc->spc_lwplock); 718 } else { 719 /* 720 * Otherwise, drop the spc_mutex, we are done with the 721 * run queues. 722 */ 723 mutex_spin_exit(spc->spc_mutex); 724 } 725 726 /* 727 * Mark that context switch is going to be performed 728 * for this LWP, to protect it from being switched 729 * to on another CPU. 730 */ 731 KASSERT(l->l_ctxswtch == 0); 732 l->l_ctxswtch = 1; 733 l->l_ncsw++; 734 KASSERT((l->l_pflag & LP_RUNNING) != 0); 735 l->l_pflag &= ~LP_RUNNING; 736 737 /* 738 * Increase the count of spin-mutexes before the release 739 * of the last lock - we must remain at IPL_SCHED during 740 * the context switch. 741 */ 742 oldspl = MUTEX_SPIN_OLDSPL(ci); 743 ci->ci_mtx_count--; 744 lwp_unlock(l); 745 746 /* Count the context switch on this CPU. */ 747 ci->ci_data.cpu_nswtch++; 748 749 /* Update status for lwpctl, if present. */ 750 if (l->l_lwpctl != NULL) 751 l->l_lwpctl->lc_curcpu = LWPCTL_CPU_NONE; 752 753 /* 754 * Save old VM context, unless a soft interrupt 755 * handler is blocking. 756 */ 757 if (!returning) 758 pmap_deactivate(l); 759 760 /* 761 * We may need to spin-wait if 'newl' is still 762 * context switching on another CPU. 763 */ 764 if (__predict_false(newl->l_ctxswtch != 0)) { 765 u_int count; 766 count = SPINLOCK_BACKOFF_MIN; 767 while (newl->l_ctxswtch) 768 SPINLOCK_BACKOFF(count); 769 } 770 771 /* 772 * If DTrace has set the active vtime enum to anything 773 * other than INACTIVE (0), then it should have set the 774 * function to call. 775 */ 776 if (__predict_false(dtrace_vtime_active)) { 777 (*dtrace_vtime_switch_func)(newl); 778 } 779 780 /* Switch to the new LWP.. */ 781 prevlwp = cpu_switchto(l, newl, returning); 782 ci = curcpu(); 783 784 /* 785 * Switched away - we have new curlwp. 786 * Restore VM context and IPL. 787 */ 788 pmap_activate(l); 789 uvm_emap_switch(l); 790 791 if (prevlwp != NULL) { 792 /* Normalize the count of the spin-mutexes */ 793 ci->ci_mtx_count++; 794 /* Unmark the state of context switch */ 795 membar_exit(); 796 prevlwp->l_ctxswtch = 0; 797 } 798 799 /* Update status for lwpctl, if present. */ 800 if (l->l_lwpctl != NULL) { 801 l->l_lwpctl->lc_curcpu = (int)cpu_index(ci); 802 l->l_lwpctl->lc_pctr++; 803 } 804 805 KASSERT(l->l_cpu == ci); 806 splx(oldspl); 807 retval = 1; 808 } else { 809 /* Nothing to do - just unlock and return. */ 810 mutex_spin_exit(spc->spc_mutex); 811 lwp_unlock(l); 812 retval = 0; 813 } 814 815 KASSERT(l == curlwp); 816 KASSERT(l->l_stat == LSONPROC); 817 818 /* 819 * XXXSMP If we are using h/w performance counters, restore context. 820 * XXXSMP preemption problem. 821 */ 822 #if PERFCTRS 823 if (PMC_ENABLED(l->l_proc)) { 824 pmc_restore_context(l->l_proc); 825 } 826 #endif 827 SYSCALL_TIME_WAKEUP(l); 828 LOCKDEBUG_BARRIER(NULL, 1); 829 830 return retval; 831 } 832 833 /* 834 * The machine independent parts of context switch to oblivion. 835 * Does not return. Call with the LWP unlocked. 836 */ 837 void 838 lwp_exit_switchaway(lwp_t *l) 839 { 840 struct cpu_info *ci; 841 struct lwp *newl; 842 struct bintime bt; 843 844 ci = l->l_cpu; 845 846 KASSERT(kpreempt_disabled()); 847 KASSERT(l->l_stat == LSZOMB || l->l_stat == LSIDL); 848 KASSERT(ci == curcpu()); 849 LOCKDEBUG_BARRIER(NULL, 0); 850 851 kstack_check_magic(l); 852 853 /* Count time spent in current system call */ 854 SYSCALL_TIME_SLEEP(l); 855 binuptime(&bt); 856 updatertime(l, &bt); 857 858 /* Must stay at IPL_SCHED even after releasing run queue lock. */ 859 (void)splsched(); 860 861 /* 862 * Let sched_nextlwp() select the LWP to run the CPU next. 863 * If no LWP is runnable, select the idle LWP. 864 * 865 * Note that spc_lwplock might not necessary be held, and 866 * new thread would be unlocked after setting the LWP-lock. 867 */ 868 spc_lock(ci); 869 #ifndef __HAVE_FAST_SOFTINTS 870 if (ci->ci_data.cpu_softints != 0) { 871 /* There are pending soft interrupts, so pick one. */ 872 newl = softint_picklwp(); 873 newl->l_stat = LSONPROC; 874 newl->l_pflag |= LP_RUNNING; 875 } else 876 #endif /* !__HAVE_FAST_SOFTINTS */ 877 { 878 newl = nextlwp(ci, &ci->ci_schedstate); 879 } 880 881 /* Update the new LWP's start time. */ 882 newl->l_stime = bt; 883 l->l_pflag &= ~LP_RUNNING; 884 885 /* 886 * ci_curlwp changes when a fast soft interrupt occurs. 887 * We use cpu_onproc to keep track of which kernel or 888 * user thread is running 'underneath' the software 889 * interrupt. This is important for time accounting, 890 * itimers and forcing user threads to preempt (aston). 891 */ 892 ci->ci_data.cpu_onproc = newl; 893 894 /* 895 * Preemption related tasks. Must be done with the current 896 * CPU locked. 897 */ 898 cpu_did_resched(l); 899 900 /* Unlock the run queue. */ 901 spc_unlock(ci); 902 903 /* Count the context switch on this CPU. */ 904 ci->ci_data.cpu_nswtch++; 905 906 /* Update status for lwpctl, if present. */ 907 if (l->l_lwpctl != NULL) 908 l->l_lwpctl->lc_curcpu = LWPCTL_CPU_EXITED; 909 910 /* 911 * We may need to spin-wait if 'newl' is still 912 * context switching on another CPU. 913 */ 914 if (__predict_false(newl->l_ctxswtch != 0)) { 915 u_int count; 916 count = SPINLOCK_BACKOFF_MIN; 917 while (newl->l_ctxswtch) 918 SPINLOCK_BACKOFF(count); 919 } 920 921 /* 922 * If DTrace has set the active vtime enum to anything 923 * other than INACTIVE (0), then it should have set the 924 * function to call. 925 */ 926 if (__predict_false(dtrace_vtime_active)) { 927 (*dtrace_vtime_switch_func)(newl); 928 } 929 930 /* Switch to the new LWP.. */ 931 (void)cpu_switchto(NULL, newl, false); 932 933 for (;;) continue; /* XXX: convince gcc about "noreturn" */ 934 /* NOTREACHED */ 935 } 936 937 /* 938 * setrunnable: change LWP state to be runnable, placing it on the run queue. 939 * 940 * Call with the process and LWP locked. Will return with the LWP unlocked. 941 */ 942 void 943 setrunnable(struct lwp *l) 944 { 945 struct proc *p = l->l_proc; 946 struct cpu_info *ci; 947 948 KASSERT((l->l_flag & LW_IDLE) == 0); 949 KASSERT(mutex_owned(p->p_lock)); 950 KASSERT(lwp_locked(l, NULL)); 951 KASSERT(l->l_mutex != l->l_cpu->ci_schedstate.spc_mutex); 952 953 switch (l->l_stat) { 954 case LSSTOP: 955 /* 956 * If we're being traced (possibly because someone attached us 957 * while we were stopped), check for a signal from the debugger. 958 */ 959 if ((p->p_slflag & PSL_TRACED) != 0 && p->p_xstat != 0) 960 signotify(l); 961 p->p_nrlwps++; 962 break; 963 case LSSUSPENDED: 964 l->l_flag &= ~LW_WSUSPEND; 965 p->p_nrlwps++; 966 cv_broadcast(&p->p_lwpcv); 967 break; 968 case LSSLEEP: 969 KASSERT(l->l_wchan != NULL); 970 break; 971 default: 972 panic("setrunnable: lwp %p state was %d", l, l->l_stat); 973 } 974 975 #ifdef KERN_SA 976 if (l->l_proc->p_sa) 977 sa_awaken(l); 978 #endif /* KERN_SA */ 979 980 /* 981 * If the LWP was sleeping interruptably, then it's OK to start it 982 * again. If not, mark it as still sleeping. 983 */ 984 if (l->l_wchan != NULL) { 985 l->l_stat = LSSLEEP; 986 /* lwp_unsleep() will release the lock. */ 987 lwp_unsleep(l, true); 988 return; 989 } 990 991 /* 992 * If the LWP is still on the CPU, mark it as LSONPROC. It may be 993 * about to call mi_switch(), in which case it will yield. 994 */ 995 if ((l->l_pflag & LP_RUNNING) != 0) { 996 l->l_stat = LSONPROC; 997 l->l_slptime = 0; 998 lwp_unlock(l); 999 return; 1000 } 1001 1002 /* 1003 * Look for a CPU to run. 1004 * Set the LWP runnable. 1005 */ 1006 ci = sched_takecpu(l); 1007 l->l_cpu = ci; 1008 spc_lock(ci); 1009 lwp_unlock_to(l, ci->ci_schedstate.spc_mutex); 1010 sched_setrunnable(l); 1011 l->l_stat = LSRUN; 1012 l->l_slptime = 0; 1013 1014 sched_enqueue(l, false); 1015 resched_cpu(l); 1016 lwp_unlock(l); 1017 } 1018 1019 /* 1020 * suspendsched: 1021 * 1022 * Convert all non-LW_SYSTEM LSSLEEP or LSRUN LWPs to LSSUSPENDED. 1023 */ 1024 void 1025 suspendsched(void) 1026 { 1027 CPU_INFO_ITERATOR cii; 1028 struct cpu_info *ci; 1029 struct lwp *l; 1030 struct proc *p; 1031 1032 /* 1033 * We do this by process in order not to violate the locking rules. 1034 */ 1035 mutex_enter(proc_lock); 1036 PROCLIST_FOREACH(p, &allproc) { 1037 mutex_enter(p->p_lock); 1038 if ((p->p_flag & PK_SYSTEM) != 0) { 1039 mutex_exit(p->p_lock); 1040 continue; 1041 } 1042 1043 p->p_stat = SSTOP; 1044 1045 LIST_FOREACH(l, &p->p_lwps, l_sibling) { 1046 if (l == curlwp) 1047 continue; 1048 1049 lwp_lock(l); 1050 1051 /* 1052 * Set L_WREBOOT so that the LWP will suspend itself 1053 * when it tries to return to user mode. We want to 1054 * try and get to get as many LWPs as possible to 1055 * the user / kernel boundary, so that they will 1056 * release any locks that they hold. 1057 */ 1058 l->l_flag |= (LW_WREBOOT | LW_WSUSPEND); 1059 1060 if (l->l_stat == LSSLEEP && 1061 (l->l_flag & LW_SINTR) != 0) { 1062 /* setrunnable() will release the lock. */ 1063 setrunnable(l); 1064 continue; 1065 } 1066 1067 lwp_unlock(l); 1068 } 1069 1070 mutex_exit(p->p_lock); 1071 } 1072 mutex_exit(proc_lock); 1073 1074 /* 1075 * Kick all CPUs to make them preempt any LWPs running in user mode. 1076 * They'll trap into the kernel and suspend themselves in userret(). 1077 */ 1078 for (CPU_INFO_FOREACH(cii, ci)) { 1079 spc_lock(ci); 1080 cpu_need_resched(ci, RESCHED_IMMED); 1081 spc_unlock(ci); 1082 } 1083 } 1084 1085 /* 1086 * sched_unsleep: 1087 * 1088 * The is called when the LWP has not been awoken normally but instead 1089 * interrupted: for example, if the sleep timed out. Because of this, 1090 * it's not a valid action for running or idle LWPs. 1091 */ 1092 static void 1093 sched_unsleep(struct lwp *l, bool cleanup) 1094 { 1095 1096 lwp_unlock(l); 1097 panic("sched_unsleep"); 1098 } 1099 1100 static void 1101 resched_cpu(struct lwp *l) 1102 { 1103 struct cpu_info *ci = l->l_cpu; 1104 1105 KASSERT(lwp_locked(l, NULL)); 1106 if (lwp_eprio(l) > ci->ci_schedstate.spc_curpriority) 1107 cpu_need_resched(ci, 0); 1108 } 1109 1110 static void 1111 sched_changepri(struct lwp *l, pri_t pri) 1112 { 1113 1114 KASSERT(lwp_locked(l, NULL)); 1115 1116 if (l->l_stat == LSRUN) { 1117 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex)); 1118 sched_dequeue(l); 1119 l->l_priority = pri; 1120 sched_enqueue(l, false); 1121 } else { 1122 l->l_priority = pri; 1123 } 1124 resched_cpu(l); 1125 } 1126 1127 static void 1128 sched_lendpri(struct lwp *l, pri_t pri) 1129 { 1130 1131 KASSERT(lwp_locked(l, NULL)); 1132 1133 if (l->l_stat == LSRUN) { 1134 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex)); 1135 sched_dequeue(l); 1136 l->l_inheritedprio = pri; 1137 sched_enqueue(l, false); 1138 } else { 1139 l->l_inheritedprio = pri; 1140 } 1141 resched_cpu(l); 1142 } 1143 1144 struct lwp * 1145 syncobj_noowner(wchan_t wchan) 1146 { 1147 1148 return NULL; 1149 } 1150 1151 /* Decay 95% of proc::p_pctcpu in 60 seconds, ccpu = exp(-1/20) */ 1152 const fixpt_t ccpu = 0.95122942450071400909 * FSCALE; 1153 1154 /* 1155 * Constants for averages over 1, 5 and 15 minutes when sampling at 1156 * 5 second intervals. 1157 */ 1158 static const fixpt_t cexp[ ] = { 1159 0.9200444146293232 * FSCALE, /* exp(-1/12) */ 1160 0.9834714538216174 * FSCALE, /* exp(-1/60) */ 1161 0.9944598480048967 * FSCALE, /* exp(-1/180) */ 1162 }; 1163 1164 /* 1165 * sched_pstats: 1166 * 1167 * => Update process statistics and check CPU resource allocation. 1168 * => Call scheduler-specific hook to eventually adjust LWP priorities. 1169 * => Compute load average of a quantity on 1, 5 and 15 minute intervals. 1170 */ 1171 void 1172 sched_pstats(void) 1173 { 1174 extern struct loadavg averunnable; 1175 struct loadavg *avg = &averunnable; 1176 const int clkhz = (stathz != 0 ? stathz : hz); 1177 static bool backwards = false; 1178 static u_int lavg_count = 0; 1179 struct proc *p; 1180 int nrun; 1181 1182 sched_pstats_ticks++; 1183 if (++lavg_count >= 5) { 1184 lavg_count = 0; 1185 nrun = 0; 1186 } 1187 mutex_enter(proc_lock); 1188 PROCLIST_FOREACH(p, &allproc) { 1189 struct lwp *l; 1190 struct rlimit *rlim; 1191 long runtm; 1192 int sig; 1193 1194 /* Increment sleep time (if sleeping), ignore overflow. */ 1195 mutex_enter(p->p_lock); 1196 runtm = p->p_rtime.sec; 1197 LIST_FOREACH(l, &p->p_lwps, l_sibling) { 1198 fixpt_t lpctcpu; 1199 u_int lcpticks; 1200 1201 if (__predict_false((l->l_flag & LW_IDLE) != 0)) 1202 continue; 1203 lwp_lock(l); 1204 runtm += l->l_rtime.sec; 1205 l->l_swtime++; 1206 sched_lwp_stats(l); 1207 1208 /* For load average calculation. */ 1209 if (__predict_false(lavg_count == 0) && 1210 (l->l_flag & (LW_SINTR | LW_SYSTEM)) == 0) { 1211 switch (l->l_stat) { 1212 case LSSLEEP: 1213 if (l->l_slptime > 1) { 1214 break; 1215 } 1216 case LSRUN: 1217 case LSONPROC: 1218 case LSIDL: 1219 nrun++; 1220 } 1221 } 1222 lwp_unlock(l); 1223 1224 l->l_pctcpu = (l->l_pctcpu * ccpu) >> FSHIFT; 1225 if (l->l_slptime != 0) 1226 continue; 1227 1228 lpctcpu = l->l_pctcpu; 1229 lcpticks = atomic_swap_uint(&l->l_cpticks, 0); 1230 lpctcpu += ((FSCALE - ccpu) * 1231 (lcpticks * FSCALE / clkhz)) >> FSHIFT; 1232 l->l_pctcpu = lpctcpu; 1233 } 1234 /* Calculating p_pctcpu only for ps(1) */ 1235 p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT; 1236 1237 /* 1238 * Check if the process exceeds its CPU resource allocation. 1239 * If over max, kill it. 1240 */ 1241 rlim = &p->p_rlimit[RLIMIT_CPU]; 1242 sig = 0; 1243 if (__predict_false(runtm >= rlim->rlim_cur)) { 1244 if (runtm >= rlim->rlim_max) 1245 sig = SIGKILL; 1246 else { 1247 sig = SIGXCPU; 1248 if (rlim->rlim_cur < rlim->rlim_max) 1249 rlim->rlim_cur += 5; 1250 } 1251 } 1252 mutex_exit(p->p_lock); 1253 if (__predict_false(runtm < 0)) { 1254 if (!backwards) { 1255 backwards = true; 1256 printf("WARNING: negative runtime; " 1257 "monotonic clock has gone backwards\n"); 1258 } 1259 } else if (__predict_false(sig)) { 1260 KASSERT((p->p_flag & PK_SYSTEM) == 0); 1261 psignal(p, sig); 1262 } 1263 } 1264 mutex_exit(proc_lock); 1265 1266 /* Load average calculation. */ 1267 if (__predict_false(lavg_count == 0)) { 1268 int i; 1269 CTASSERT(__arraycount(cexp) == __arraycount(avg->ldavg)); 1270 for (i = 0; i < __arraycount(cexp); i++) { 1271 avg->ldavg[i] = (cexp[i] * avg->ldavg[i] + 1272 nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT; 1273 } 1274 } 1275 1276 /* Lightning bolt. */ 1277 cv_broadcast(&lbolt); 1278 } 1279