1 /* $NetBSD: kern_synch.c,v 1.261 2009/03/28 21:43:16 rmind Exp $ */ 2 3 /*- 4 * Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008, 2009 5 * The NetBSD Foundation, Inc. 6 * All rights reserved. 7 * 8 * This code is derived from software contributed to The NetBSD Foundation 9 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 10 * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran and 11 * Daniel Sieger. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 /*- 36 * Copyright (c) 1982, 1986, 1990, 1991, 1993 37 * The Regents of the University of California. All rights reserved. 38 * (c) UNIX System Laboratories, Inc. 39 * All or some portions of this file are derived from material licensed 40 * to the University of California by American Telephone and Telegraph 41 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 42 * the permission of UNIX System Laboratories, Inc. 43 * 44 * Redistribution and use in source and binary forms, with or without 45 * modification, are permitted provided that the following conditions 46 * are met: 47 * 1. Redistributions of source code must retain the above copyright 48 * notice, this list of conditions and the following disclaimer. 49 * 2. Redistributions in binary form must reproduce the above copyright 50 * notice, this list of conditions and the following disclaimer in the 51 * documentation and/or other materials provided with the distribution. 52 * 3. Neither the name of the University nor the names of its contributors 53 * may be used to endorse or promote products derived from this software 54 * without specific prior written permission. 55 * 56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 66 * SUCH DAMAGE. 67 * 68 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95 69 */ 70 71 #include <sys/cdefs.h> 72 __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.261 2009/03/28 21:43:16 rmind Exp $"); 73 74 #include "opt_kstack.h" 75 #include "opt_perfctrs.h" 76 #include "opt_sa.h" 77 78 #define __MUTEX_PRIVATE 79 80 #include <sys/param.h> 81 #include <sys/systm.h> 82 #include <sys/proc.h> 83 #include <sys/kernel.h> 84 #if defined(PERFCTRS) 85 #include <sys/pmc.h> 86 #endif 87 #include <sys/cpu.h> 88 #include <sys/resourcevar.h> 89 #include <sys/sched.h> 90 #include <sys/sa.h> 91 #include <sys/savar.h> 92 #include <sys/syscall_stats.h> 93 #include <sys/sleepq.h> 94 #include <sys/lockdebug.h> 95 #include <sys/evcnt.h> 96 #include <sys/intr.h> 97 #include <sys/lwpctl.h> 98 #include <sys/atomic.h> 99 #include <sys/simplelock.h> 100 101 #include <uvm/uvm_extern.h> 102 103 #include <dev/lockstat.h> 104 105 static u_int sched_unsleep(struct lwp *, bool); 106 static void sched_changepri(struct lwp *, pri_t); 107 static void sched_lendpri(struct lwp *, pri_t); 108 static void resched_cpu(struct lwp *); 109 110 syncobj_t sleep_syncobj = { 111 SOBJ_SLEEPQ_SORTED, 112 sleepq_unsleep, 113 sleepq_changepri, 114 sleepq_lendpri, 115 syncobj_noowner, 116 }; 117 118 syncobj_t sched_syncobj = { 119 SOBJ_SLEEPQ_SORTED, 120 sched_unsleep, 121 sched_changepri, 122 sched_lendpri, 123 syncobj_noowner, 124 }; 125 126 callout_t sched_pstats_ch; 127 unsigned sched_pstats_ticks; 128 kcondvar_t lbolt; /* once a second sleep address */ 129 130 /* Preemption event counters */ 131 static struct evcnt kpreempt_ev_crit; 132 static struct evcnt kpreempt_ev_klock; 133 static struct evcnt kpreempt_ev_immed; 134 135 /* 136 * During autoconfiguration or after a panic, a sleep will simply lower the 137 * priority briefly to allow interrupts, then return. The priority to be 138 * used (safepri) is machine-dependent, thus this value is initialized and 139 * maintained in the machine-dependent layers. This priority will typically 140 * be 0, or the lowest priority that is safe for use on the interrupt stack; 141 * it can be made higher to block network software interrupts after panics. 142 */ 143 int safepri; 144 145 void 146 sched_init(void) 147 { 148 149 cv_init(&lbolt, "lbolt"); 150 callout_init(&sched_pstats_ch, CALLOUT_MPSAFE); 151 callout_setfunc(&sched_pstats_ch, sched_pstats, NULL); 152 153 evcnt_attach_dynamic(&kpreempt_ev_crit, EVCNT_TYPE_MISC, NULL, 154 "kpreempt", "defer: critical section"); 155 evcnt_attach_dynamic(&kpreempt_ev_klock, EVCNT_TYPE_MISC, NULL, 156 "kpreempt", "defer: kernel_lock"); 157 evcnt_attach_dynamic(&kpreempt_ev_immed, EVCNT_TYPE_MISC, NULL, 158 "kpreempt", "immediate"); 159 160 sched_pstats(NULL); 161 } 162 163 /* 164 * OBSOLETE INTERFACE 165 * 166 * General sleep call. Suspends the current LWP until a wakeup is 167 * performed on the specified identifier. The LWP will then be made 168 * runnable with the specified priority. Sleeps at most timo/hz seconds (0 169 * means no timeout). If pri includes PCATCH flag, signals are checked 170 * before and after sleeping, else signals are not checked. Returns 0 if 171 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a 172 * signal needs to be delivered, ERESTART is returned if the current system 173 * call should be restarted if possible, and EINTR is returned if the system 174 * call should be interrupted by the signal (return EINTR). 175 * 176 * The interlock is held until we are on a sleep queue. The interlock will 177 * be locked before returning back to the caller unless the PNORELOCK flag 178 * is specified, in which case the interlock will always be unlocked upon 179 * return. 180 */ 181 int 182 ltsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo, 183 volatile struct simplelock *interlock) 184 { 185 struct lwp *l = curlwp; 186 sleepq_t *sq; 187 kmutex_t *mp; 188 int error; 189 190 KASSERT((l->l_pflag & LP_INTR) == 0); 191 192 if (sleepq_dontsleep(l)) { 193 (void)sleepq_abort(NULL, 0); 194 if ((priority & PNORELOCK) != 0) 195 simple_unlock(interlock); 196 return 0; 197 } 198 199 l->l_kpriority = true; 200 sq = sleeptab_lookup(&sleeptab, ident, &mp); 201 sleepq_enter(sq, l, mp); 202 sleepq_enqueue(sq, ident, wmesg, &sleep_syncobj); 203 204 if (interlock != NULL) { 205 KASSERT(simple_lock_held(interlock)); 206 simple_unlock(interlock); 207 } 208 209 error = sleepq_block(timo, priority & PCATCH); 210 211 if (interlock != NULL && (priority & PNORELOCK) == 0) 212 simple_lock(interlock); 213 214 return error; 215 } 216 217 int 218 mtsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo, 219 kmutex_t *mtx) 220 { 221 struct lwp *l = curlwp; 222 sleepq_t *sq; 223 kmutex_t *mp; 224 int error; 225 226 KASSERT((l->l_pflag & LP_INTR) == 0); 227 228 if (sleepq_dontsleep(l)) { 229 (void)sleepq_abort(mtx, (priority & PNORELOCK) != 0); 230 return 0; 231 } 232 233 l->l_kpriority = true; 234 sq = sleeptab_lookup(&sleeptab, ident, &mp); 235 sleepq_enter(sq, l, mp); 236 sleepq_enqueue(sq, ident, wmesg, &sleep_syncobj); 237 mutex_exit(mtx); 238 error = sleepq_block(timo, priority & PCATCH); 239 240 if ((priority & PNORELOCK) == 0) 241 mutex_enter(mtx); 242 243 return error; 244 } 245 246 /* 247 * General sleep call for situations where a wake-up is not expected. 248 */ 249 int 250 kpause(const char *wmesg, bool intr, int timo, kmutex_t *mtx) 251 { 252 struct lwp *l = curlwp; 253 kmutex_t *mp; 254 sleepq_t *sq; 255 int error; 256 257 if (sleepq_dontsleep(l)) 258 return sleepq_abort(NULL, 0); 259 260 if (mtx != NULL) 261 mutex_exit(mtx); 262 l->l_kpriority = true; 263 sq = sleeptab_lookup(&sleeptab, l, &mp); 264 sleepq_enter(sq, l, mp); 265 sleepq_enqueue(sq, l, wmesg, &sleep_syncobj); 266 error = sleepq_block(timo, intr); 267 if (mtx != NULL) 268 mutex_enter(mtx); 269 270 return error; 271 } 272 273 #ifdef KERN_SA 274 /* 275 * sa_awaken: 276 * 277 * We believe this lwp is an SA lwp. If it's yielding, 278 * let it know it needs to wake up. 279 * 280 * We are called and exit with the lwp locked. We are 281 * called in the middle of wakeup operations, so we need 282 * to not touch the locks at all. 283 */ 284 void 285 sa_awaken(struct lwp *l) 286 { 287 /* LOCK_ASSERT(lwp_locked(l, NULL)); */ 288 289 if (l == l->l_savp->savp_lwp && l->l_flag & LW_SA_YIELD) 290 l->l_flag &= ~LW_SA_IDLE; 291 } 292 #endif /* KERN_SA */ 293 294 /* 295 * OBSOLETE INTERFACE 296 * 297 * Make all LWPs sleeping on the specified identifier runnable. 298 */ 299 void 300 wakeup(wchan_t ident) 301 { 302 sleepq_t *sq; 303 kmutex_t *mp; 304 305 if (__predict_false(cold)) 306 return; 307 308 sq = sleeptab_lookup(&sleeptab, ident, &mp); 309 sleepq_wake(sq, ident, (u_int)-1, mp); 310 } 311 312 /* 313 * OBSOLETE INTERFACE 314 * 315 * Make the highest priority LWP first in line on the specified 316 * identifier runnable. 317 */ 318 void 319 wakeup_one(wchan_t ident) 320 { 321 sleepq_t *sq; 322 kmutex_t *mp; 323 324 if (__predict_false(cold)) 325 return; 326 327 sq = sleeptab_lookup(&sleeptab, ident, &mp); 328 sleepq_wake(sq, ident, 1, mp); 329 } 330 331 332 /* 333 * General yield call. Puts the current LWP back on its run queue and 334 * performs a voluntary context switch. Should only be called when the 335 * current LWP explicitly requests it (eg sched_yield(2)). 336 */ 337 void 338 yield(void) 339 { 340 struct lwp *l = curlwp; 341 342 KERNEL_UNLOCK_ALL(l, &l->l_biglocks); 343 lwp_lock(l); 344 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock)); 345 KASSERT(l->l_stat == LSONPROC); 346 l->l_kpriority = false; 347 (void)mi_switch(l); 348 KERNEL_LOCK(l->l_biglocks, l); 349 } 350 351 /* 352 * General preemption call. Puts the current LWP back on its run queue 353 * and performs an involuntary context switch. 354 */ 355 void 356 preempt(void) 357 { 358 struct lwp *l = curlwp; 359 360 KERNEL_UNLOCK_ALL(l, &l->l_biglocks); 361 lwp_lock(l); 362 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock)); 363 KASSERT(l->l_stat == LSONPROC); 364 l->l_kpriority = false; 365 l->l_nivcsw++; 366 (void)mi_switch(l); 367 KERNEL_LOCK(l->l_biglocks, l); 368 } 369 370 /* 371 * Handle a request made by another agent to preempt the current LWP 372 * in-kernel. Usually called when l_dopreempt may be non-zero. 373 * 374 * Character addresses for lockstat only. 375 */ 376 static char in_critical_section; 377 static char kernel_lock_held; 378 static char is_softint; 379 380 bool 381 kpreempt(uintptr_t where) 382 { 383 uintptr_t failed; 384 lwp_t *l; 385 int s, dop; 386 387 l = curlwp; 388 failed = 0; 389 while ((dop = l->l_dopreempt) != 0) { 390 if (l->l_stat != LSONPROC) { 391 /* 392 * About to block (or die), let it happen. 393 * Doesn't really count as "preemption has 394 * been blocked", since we're going to 395 * context switch. 396 */ 397 l->l_dopreempt = 0; 398 return true; 399 } 400 if (__predict_false((l->l_flag & LW_IDLE) != 0)) { 401 /* Can't preempt idle loop, don't count as failure. */ 402 l->l_dopreempt = 0; 403 return true; 404 } 405 if (__predict_false(l->l_nopreempt != 0)) { 406 /* LWP holds preemption disabled, explicitly. */ 407 if ((dop & DOPREEMPT_COUNTED) == 0) { 408 kpreempt_ev_crit.ev_count++; 409 } 410 failed = (uintptr_t)&in_critical_section; 411 break; 412 } 413 if (__predict_false((l->l_pflag & LP_INTR) != 0)) { 414 /* Can't preempt soft interrupts yet. */ 415 l->l_dopreempt = 0; 416 failed = (uintptr_t)&is_softint; 417 break; 418 } 419 s = splsched(); 420 if (__predict_false(l->l_blcnt != 0 || 421 curcpu()->ci_biglock_wanted != NULL)) { 422 /* Hold or want kernel_lock, code is not MT safe. */ 423 splx(s); 424 if ((dop & DOPREEMPT_COUNTED) == 0) { 425 kpreempt_ev_klock.ev_count++; 426 } 427 failed = (uintptr_t)&kernel_lock_held; 428 break; 429 } 430 if (__predict_false(!cpu_kpreempt_enter(where, s))) { 431 /* 432 * It may be that the IPL is too high. 433 * kpreempt_enter() can schedule an 434 * interrupt to retry later. 435 */ 436 splx(s); 437 break; 438 } 439 /* Do it! */ 440 if (__predict_true((dop & DOPREEMPT_COUNTED) == 0)) { 441 kpreempt_ev_immed.ev_count++; 442 } 443 lwp_lock(l); 444 mi_switch(l); 445 l->l_nopreempt++; 446 splx(s); 447 448 /* Take care of any MD cleanup. */ 449 cpu_kpreempt_exit(where); 450 l->l_nopreempt--; 451 } 452 453 /* Record preemption failure for reporting via lockstat. */ 454 if (__predict_false(failed)) { 455 int lsflag = 0; 456 atomic_or_uint(&l->l_dopreempt, DOPREEMPT_COUNTED); 457 LOCKSTAT_ENTER(lsflag); 458 /* Might recurse, make it atomic. */ 459 if (__predict_false(lsflag)) { 460 if (where == 0) { 461 where = (uintptr_t)__builtin_return_address(0); 462 } 463 if (atomic_cas_ptr_ni((void *)&l->l_pfailaddr, 464 NULL, (void *)where) == NULL) { 465 LOCKSTAT_START_TIMER(lsflag, l->l_pfailtime); 466 l->l_pfaillock = failed; 467 } 468 } 469 LOCKSTAT_EXIT(lsflag); 470 } 471 472 return failed; 473 } 474 475 /* 476 * Return true if preemption is explicitly disabled. 477 */ 478 bool 479 kpreempt_disabled(void) 480 { 481 const lwp_t *l = curlwp; 482 483 return l->l_nopreempt != 0 || l->l_stat == LSZOMB || 484 (l->l_flag & LW_IDLE) != 0 || cpu_kpreempt_disabled(); 485 } 486 487 /* 488 * Disable kernel preemption. 489 */ 490 void 491 kpreempt_disable(void) 492 { 493 494 KPREEMPT_DISABLE(curlwp); 495 } 496 497 /* 498 * Reenable kernel preemption. 499 */ 500 void 501 kpreempt_enable(void) 502 { 503 504 KPREEMPT_ENABLE(curlwp); 505 } 506 507 /* 508 * Compute the amount of time during which the current lwp was running. 509 * 510 * - update l_rtime unless it's an idle lwp. 511 */ 512 513 void 514 updatertime(lwp_t *l, const struct bintime *now) 515 { 516 517 if (__predict_false(l->l_flag & LW_IDLE)) 518 return; 519 520 /* rtime += now - stime */ 521 bintime_add(&l->l_rtime, now); 522 bintime_sub(&l->l_rtime, &l->l_stime); 523 } 524 525 /* 526 * Select next LWP from the current CPU to run.. 527 */ 528 static inline lwp_t * 529 nextlwp(struct cpu_info *ci, struct schedstate_percpu *spc) 530 { 531 lwp_t *newl; 532 533 /* 534 * Let sched_nextlwp() select the LWP to run the CPU next. 535 * If no LWP is runnable, select the idle LWP. 536 * 537 * Note that spc_lwplock might not necessary be held, and 538 * new thread would be unlocked after setting the LWP-lock. 539 */ 540 newl = sched_nextlwp(); 541 if (newl != NULL) { 542 sched_dequeue(newl); 543 KASSERT(lwp_locked(newl, spc->spc_mutex)); 544 newl->l_stat = LSONPROC; 545 newl->l_cpu = ci; 546 newl->l_pflag |= LP_RUNNING; 547 lwp_setlock(newl, spc->spc_lwplock); 548 } else { 549 newl = ci->ci_data.cpu_idlelwp; 550 newl->l_stat = LSONPROC; 551 newl->l_pflag |= LP_RUNNING; 552 } 553 554 /* 555 * Only clear want_resched if there are no pending (slow) 556 * software interrupts. 557 */ 558 ci->ci_want_resched = ci->ci_data.cpu_softints; 559 spc->spc_flags &= ~SPCF_SWITCHCLEAR; 560 spc->spc_curpriority = lwp_eprio(newl); 561 562 return newl; 563 } 564 565 /* 566 * The machine independent parts of context switch. 567 * 568 * Returns 1 if another LWP was actually run. 569 */ 570 int 571 mi_switch(lwp_t *l) 572 { 573 struct cpu_info *ci; 574 struct schedstate_percpu *spc; 575 struct lwp *newl; 576 int retval, oldspl; 577 struct bintime bt; 578 bool returning; 579 580 KASSERT(lwp_locked(l, NULL)); 581 KASSERT(kpreempt_disabled()); 582 LOCKDEBUG_BARRIER(l->l_mutex, 1); 583 584 #ifdef KSTACK_CHECK_MAGIC 585 kstack_check_magic(l); 586 #endif 587 588 binuptime(&bt); 589 590 KASSERT(l->l_cpu == curcpu()); 591 ci = l->l_cpu; 592 spc = &ci->ci_schedstate; 593 returning = false; 594 newl = NULL; 595 596 /* 597 * If we have been asked to switch to a specific LWP, then there 598 * is no need to inspect the run queues. If a soft interrupt is 599 * blocking, then return to the interrupted thread without adjusting 600 * VM context or its start time: neither have been changed in order 601 * to take the interrupt. 602 */ 603 if (l->l_switchto != NULL) { 604 if ((l->l_pflag & LP_INTR) != 0) { 605 returning = true; 606 softint_block(l); 607 if ((l->l_pflag & LP_TIMEINTR) != 0) 608 updatertime(l, &bt); 609 } 610 newl = l->l_switchto; 611 l->l_switchto = NULL; 612 } 613 #ifndef __HAVE_FAST_SOFTINTS 614 else if (ci->ci_data.cpu_softints != 0) { 615 /* There are pending soft interrupts, so pick one. */ 616 newl = softint_picklwp(); 617 newl->l_stat = LSONPROC; 618 newl->l_pflag |= LP_RUNNING; 619 } 620 #endif /* !__HAVE_FAST_SOFTINTS */ 621 622 /* Count time spent in current system call */ 623 if (!returning) { 624 SYSCALL_TIME_SLEEP(l); 625 626 /* 627 * XXXSMP If we are using h/w performance counters, 628 * save context. 629 */ 630 #if PERFCTRS 631 if (PMC_ENABLED(l->l_proc)) { 632 pmc_save_context(l->l_proc); 633 } 634 #endif 635 updatertime(l, &bt); 636 } 637 638 /* Lock the runqueue */ 639 KASSERT(l->l_stat != LSRUN); 640 mutex_spin_enter(spc->spc_mutex); 641 642 /* 643 * If on the CPU and we have gotten this far, then we must yield. 644 */ 645 if (l->l_stat == LSONPROC && l != newl) { 646 KASSERT(lwp_locked(l, spc->spc_lwplock)); 647 if ((l->l_flag & LW_IDLE) == 0) { 648 l->l_stat = LSRUN; 649 lwp_setlock(l, spc->spc_mutex); 650 sched_enqueue(l, true); 651 /* Handle migration case */ 652 KASSERT(spc->spc_migrating == NULL); 653 if (l->l_target_cpu != NULL) { 654 spc->spc_migrating = l; 655 } 656 } else 657 l->l_stat = LSIDL; 658 } 659 660 /* Pick new LWP to run. */ 661 if (newl == NULL) { 662 newl = nextlwp(ci, spc); 663 } 664 665 /* Items that must be updated with the CPU locked. */ 666 if (!returning) { 667 /* Update the new LWP's start time. */ 668 newl->l_stime = bt; 669 670 /* 671 * ci_curlwp changes when a fast soft interrupt occurs. 672 * We use cpu_onproc to keep track of which kernel or 673 * user thread is running 'underneath' the software 674 * interrupt. This is important for time accounting, 675 * itimers and forcing user threads to preempt (aston). 676 */ 677 ci->ci_data.cpu_onproc = newl; 678 } 679 680 /* 681 * Preemption related tasks. Must be done with the current 682 * CPU locked. 683 */ 684 cpu_did_resched(l); 685 l->l_dopreempt = 0; 686 if (__predict_false(l->l_pfailaddr != 0)) { 687 LOCKSTAT_FLAG(lsflag); 688 LOCKSTAT_ENTER(lsflag); 689 LOCKSTAT_STOP_TIMER(lsflag, l->l_pfailtime); 690 LOCKSTAT_EVENT_RA(lsflag, l->l_pfaillock, LB_NOPREEMPT|LB_SPIN, 691 1, l->l_pfailtime, l->l_pfailaddr); 692 LOCKSTAT_EXIT(lsflag); 693 l->l_pfailtime = 0; 694 l->l_pfaillock = 0; 695 l->l_pfailaddr = 0; 696 } 697 698 if (l != newl) { 699 struct lwp *prevlwp; 700 701 /* Release all locks, but leave the current LWP locked */ 702 if (l->l_mutex == spc->spc_mutex) { 703 /* 704 * Drop spc_lwplock, if the current LWP has been moved 705 * to the run queue (it is now locked by spc_mutex). 706 */ 707 mutex_spin_exit(spc->spc_lwplock); 708 } else { 709 /* 710 * Otherwise, drop the spc_mutex, we are done with the 711 * run queues. 712 */ 713 mutex_spin_exit(spc->spc_mutex); 714 } 715 716 /* 717 * Mark that context switch is going to be performed 718 * for this LWP, to protect it from being switched 719 * to on another CPU. 720 */ 721 KASSERT(l->l_ctxswtch == 0); 722 l->l_ctxswtch = 1; 723 l->l_ncsw++; 724 l->l_pflag &= ~LP_RUNNING; 725 726 /* 727 * Increase the count of spin-mutexes before the release 728 * of the last lock - we must remain at IPL_SCHED during 729 * the context switch. 730 */ 731 oldspl = MUTEX_SPIN_OLDSPL(ci); 732 ci->ci_mtx_count--; 733 lwp_unlock(l); 734 735 /* Count the context switch on this CPU. */ 736 ci->ci_data.cpu_nswtch++; 737 738 /* Update status for lwpctl, if present. */ 739 if (l->l_lwpctl != NULL) 740 l->l_lwpctl->lc_curcpu = LWPCTL_CPU_NONE; 741 742 /* 743 * Save old VM context, unless a soft interrupt 744 * handler is blocking. 745 */ 746 if (!returning) 747 pmap_deactivate(l); 748 749 /* 750 * We may need to spin-wait for if 'newl' is still 751 * context switching on another CPU. 752 */ 753 if (__predict_false(newl->l_ctxswtch != 0)) { 754 u_int count; 755 count = SPINLOCK_BACKOFF_MIN; 756 while (newl->l_ctxswtch) 757 SPINLOCK_BACKOFF(count); 758 } 759 760 /* Switch to the new LWP.. */ 761 prevlwp = cpu_switchto(l, newl, returning); 762 ci = curcpu(); 763 764 /* 765 * Switched away - we have new curlwp. 766 * Restore VM context and IPL. 767 */ 768 pmap_activate(l); 769 if (prevlwp != NULL) { 770 /* Normalize the count of the spin-mutexes */ 771 ci->ci_mtx_count++; 772 /* Unmark the state of context switch */ 773 membar_exit(); 774 prevlwp->l_ctxswtch = 0; 775 } 776 777 /* Update status for lwpctl, if present. */ 778 if (l->l_lwpctl != NULL) { 779 l->l_lwpctl->lc_curcpu = (int)cpu_index(ci); 780 l->l_lwpctl->lc_pctr++; 781 } 782 783 KASSERT(l->l_cpu == ci); 784 splx(oldspl); 785 retval = 1; 786 } else { 787 /* Nothing to do - just unlock and return. */ 788 mutex_spin_exit(spc->spc_mutex); 789 lwp_unlock(l); 790 retval = 0; 791 } 792 793 KASSERT(l == curlwp); 794 KASSERT(l->l_stat == LSONPROC); 795 796 /* 797 * XXXSMP If we are using h/w performance counters, restore context. 798 * XXXSMP preemption problem. 799 */ 800 #if PERFCTRS 801 if (PMC_ENABLED(l->l_proc)) { 802 pmc_restore_context(l->l_proc); 803 } 804 #endif 805 SYSCALL_TIME_WAKEUP(l); 806 LOCKDEBUG_BARRIER(NULL, 1); 807 808 return retval; 809 } 810 811 /* 812 * The machine independent parts of context switch to oblivion. 813 * Does not return. Call with the LWP unlocked. 814 */ 815 void 816 lwp_exit_switchaway(lwp_t *l) 817 { 818 struct cpu_info *ci; 819 struct lwp *newl; 820 struct bintime bt; 821 822 ci = l->l_cpu; 823 824 KASSERT(kpreempt_disabled()); 825 KASSERT(l->l_stat == LSZOMB || l->l_stat == LSIDL); 826 KASSERT(ci == curcpu()); 827 LOCKDEBUG_BARRIER(NULL, 0); 828 829 #ifdef KSTACK_CHECK_MAGIC 830 kstack_check_magic(l); 831 #endif 832 833 /* Count time spent in current system call */ 834 SYSCALL_TIME_SLEEP(l); 835 binuptime(&bt); 836 updatertime(l, &bt); 837 838 /* Must stay at IPL_SCHED even after releasing run queue lock. */ 839 (void)splsched(); 840 841 /* 842 * Let sched_nextlwp() select the LWP to run the CPU next. 843 * If no LWP is runnable, select the idle LWP. 844 * 845 * Note that spc_lwplock might not necessary be held, and 846 * new thread would be unlocked after setting the LWP-lock. 847 */ 848 spc_lock(ci); 849 #ifndef __HAVE_FAST_SOFTINTS 850 if (ci->ci_data.cpu_softints != 0) { 851 /* There are pending soft interrupts, so pick one. */ 852 newl = softint_picklwp(); 853 newl->l_stat = LSONPROC; 854 newl->l_pflag |= LP_RUNNING; 855 } else 856 #endif /* !__HAVE_FAST_SOFTINTS */ 857 { 858 newl = nextlwp(ci, &ci->ci_schedstate); 859 } 860 861 /* Update the new LWP's start time. */ 862 newl->l_stime = bt; 863 l->l_pflag &= ~LP_RUNNING; 864 865 /* 866 * ci_curlwp changes when a fast soft interrupt occurs. 867 * We use cpu_onproc to keep track of which kernel or 868 * user thread is running 'underneath' the software 869 * interrupt. This is important for time accounting, 870 * itimers and forcing user threads to preempt (aston). 871 */ 872 ci->ci_data.cpu_onproc = newl; 873 874 /* 875 * Preemption related tasks. Must be done with the current 876 * CPU locked. 877 */ 878 cpu_did_resched(l); 879 880 /* Unlock the run queue. */ 881 spc_unlock(ci); 882 883 /* Count the context switch on this CPU. */ 884 ci->ci_data.cpu_nswtch++; 885 886 /* Update status for lwpctl, if present. */ 887 if (l->l_lwpctl != NULL) 888 l->l_lwpctl->lc_curcpu = LWPCTL_CPU_EXITED; 889 890 /* 891 * We may need to spin-wait for if 'newl' is still 892 * context switching on another CPU. 893 */ 894 if (__predict_false(newl->l_ctxswtch != 0)) { 895 u_int count; 896 count = SPINLOCK_BACKOFF_MIN; 897 while (newl->l_ctxswtch) 898 SPINLOCK_BACKOFF(count); 899 } 900 901 /* Switch to the new LWP.. */ 902 (void)cpu_switchto(NULL, newl, false); 903 904 for (;;) continue; /* XXX: convince gcc about "noreturn" */ 905 /* NOTREACHED */ 906 } 907 908 /* 909 * Change LWP state to be runnable, placing it on the run queue if it is 910 * in memory, and awakening the swapper if it isn't in memory. 911 * 912 * Call with the process and LWP locked. Will return with the LWP unlocked. 913 */ 914 void 915 setrunnable(struct lwp *l) 916 { 917 struct proc *p = l->l_proc; 918 struct cpu_info *ci; 919 920 KASSERT((l->l_flag & LW_IDLE) == 0); 921 KASSERT(mutex_owned(p->p_lock)); 922 KASSERT(lwp_locked(l, NULL)); 923 KASSERT(l->l_mutex != l->l_cpu->ci_schedstate.spc_mutex); 924 925 switch (l->l_stat) { 926 case LSSTOP: 927 /* 928 * If we're being traced (possibly because someone attached us 929 * while we were stopped), check for a signal from the debugger. 930 */ 931 if ((p->p_slflag & PSL_TRACED) != 0 && p->p_xstat != 0) 932 signotify(l); 933 p->p_nrlwps++; 934 break; 935 case LSSUSPENDED: 936 l->l_flag &= ~LW_WSUSPEND; 937 p->p_nrlwps++; 938 cv_broadcast(&p->p_lwpcv); 939 break; 940 case LSSLEEP: 941 KASSERT(l->l_wchan != NULL); 942 break; 943 default: 944 panic("setrunnable: lwp %p state was %d", l, l->l_stat); 945 } 946 947 #ifdef KERN_SA 948 if (l->l_proc->p_sa) 949 sa_awaken(l); 950 #endif /* KERN_SA */ 951 952 /* 953 * If the LWP was sleeping interruptably, then it's OK to start it 954 * again. If not, mark it as still sleeping. 955 */ 956 if (l->l_wchan != NULL) { 957 l->l_stat = LSSLEEP; 958 /* lwp_unsleep() will release the lock. */ 959 lwp_unsleep(l, true); 960 return; 961 } 962 963 /* 964 * If the LWP is still on the CPU, mark it as LSONPROC. It may be 965 * about to call mi_switch(), in which case it will yield. 966 */ 967 if ((l->l_pflag & LP_RUNNING) != 0) { 968 l->l_stat = LSONPROC; 969 l->l_slptime = 0; 970 lwp_unlock(l); 971 return; 972 } 973 974 /* 975 * Look for a CPU to run. 976 * Set the LWP runnable. 977 */ 978 ci = sched_takecpu(l); 979 l->l_cpu = ci; 980 spc_lock(ci); 981 lwp_unlock_to(l, ci->ci_schedstate.spc_mutex); 982 sched_setrunnable(l); 983 l->l_stat = LSRUN; 984 l->l_slptime = 0; 985 986 /* 987 * If thread is swapped out - wake the swapper to bring it back in. 988 * Otherwise, enter it into a run queue. 989 */ 990 if (l->l_flag & LW_INMEM) { 991 sched_enqueue(l, false); 992 resched_cpu(l); 993 lwp_unlock(l); 994 } else { 995 lwp_unlock(l); 996 uvm_kick_scheduler(); 997 } 998 } 999 1000 /* 1001 * suspendsched: 1002 * 1003 * Convert all non-L_SYSTEM LSSLEEP or LSRUN LWPs to LSSUSPENDED. 1004 */ 1005 void 1006 suspendsched(void) 1007 { 1008 CPU_INFO_ITERATOR cii; 1009 struct cpu_info *ci; 1010 struct lwp *l; 1011 struct proc *p; 1012 1013 /* 1014 * We do this by process in order not to violate the locking rules. 1015 */ 1016 mutex_enter(proc_lock); 1017 PROCLIST_FOREACH(p, &allproc) { 1018 if ((p->p_flag & PK_MARKER) != 0) 1019 continue; 1020 1021 mutex_enter(p->p_lock); 1022 if ((p->p_flag & PK_SYSTEM) != 0) { 1023 mutex_exit(p->p_lock); 1024 continue; 1025 } 1026 1027 p->p_stat = SSTOP; 1028 1029 LIST_FOREACH(l, &p->p_lwps, l_sibling) { 1030 if (l == curlwp) 1031 continue; 1032 1033 lwp_lock(l); 1034 1035 /* 1036 * Set L_WREBOOT so that the LWP will suspend itself 1037 * when it tries to return to user mode. We want to 1038 * try and get to get as many LWPs as possible to 1039 * the user / kernel boundary, so that they will 1040 * release any locks that they hold. 1041 */ 1042 l->l_flag |= (LW_WREBOOT | LW_WSUSPEND); 1043 1044 if (l->l_stat == LSSLEEP && 1045 (l->l_flag & LW_SINTR) != 0) { 1046 /* setrunnable() will release the lock. */ 1047 setrunnable(l); 1048 continue; 1049 } 1050 1051 lwp_unlock(l); 1052 } 1053 1054 mutex_exit(p->p_lock); 1055 } 1056 mutex_exit(proc_lock); 1057 1058 /* 1059 * Kick all CPUs to make them preempt any LWPs running in user mode. 1060 * They'll trap into the kernel and suspend themselves in userret(). 1061 */ 1062 for (CPU_INFO_FOREACH(cii, ci)) { 1063 spc_lock(ci); 1064 cpu_need_resched(ci, RESCHED_IMMED); 1065 spc_unlock(ci); 1066 } 1067 } 1068 1069 /* 1070 * sched_unsleep: 1071 * 1072 * The is called when the LWP has not been awoken normally but instead 1073 * interrupted: for example, if the sleep timed out. Because of this, 1074 * it's not a valid action for running or idle LWPs. 1075 */ 1076 static u_int 1077 sched_unsleep(struct lwp *l, bool cleanup) 1078 { 1079 1080 lwp_unlock(l); 1081 panic("sched_unsleep"); 1082 } 1083 1084 static void 1085 resched_cpu(struct lwp *l) 1086 { 1087 struct cpu_info *ci = ci = l->l_cpu; 1088 1089 KASSERT(lwp_locked(l, NULL)); 1090 if (lwp_eprio(l) > ci->ci_schedstate.spc_curpriority) 1091 cpu_need_resched(ci, 0); 1092 } 1093 1094 static void 1095 sched_changepri(struct lwp *l, pri_t pri) 1096 { 1097 1098 KASSERT(lwp_locked(l, NULL)); 1099 1100 if (l->l_stat == LSRUN && (l->l_flag & LW_INMEM) != 0) { 1101 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex)); 1102 sched_dequeue(l); 1103 l->l_priority = pri; 1104 sched_enqueue(l, false); 1105 } else { 1106 l->l_priority = pri; 1107 } 1108 resched_cpu(l); 1109 } 1110 1111 static void 1112 sched_lendpri(struct lwp *l, pri_t pri) 1113 { 1114 1115 KASSERT(lwp_locked(l, NULL)); 1116 1117 if (l->l_stat == LSRUN && (l->l_flag & LW_INMEM) != 0) { 1118 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_mutex)); 1119 sched_dequeue(l); 1120 l->l_inheritedprio = pri; 1121 sched_enqueue(l, false); 1122 } else { 1123 l->l_inheritedprio = pri; 1124 } 1125 resched_cpu(l); 1126 } 1127 1128 struct lwp * 1129 syncobj_noowner(wchan_t wchan) 1130 { 1131 1132 return NULL; 1133 } 1134 1135 /* Decay 95% of proc::p_pctcpu in 60 seconds, ccpu = exp(-1/20) */ 1136 const fixpt_t ccpu = 0.95122942450071400909 * FSCALE; 1137 1138 /* 1139 * sched_pstats: 1140 * 1141 * Update process statistics and check CPU resource allocation. 1142 * Call scheduler-specific hook to eventually adjust process/LWP 1143 * priorities. 1144 */ 1145 /* ARGSUSED */ 1146 void 1147 sched_pstats(void *arg) 1148 { 1149 const int clkhz = (stathz != 0 ? stathz : hz); 1150 static bool backwards; 1151 struct rlimit *rlim; 1152 struct lwp *l; 1153 struct proc *p; 1154 long runtm; 1155 fixpt_t lpctcpu; 1156 u_int lcpticks; 1157 int sig; 1158 1159 sched_pstats_ticks++; 1160 1161 mutex_enter(proc_lock); 1162 PROCLIST_FOREACH(p, &allproc) { 1163 if (__predict_false((p->p_flag & PK_MARKER) != 0)) 1164 continue; 1165 1166 /* 1167 * Increment time in/out of memory and sleep 1168 * time (if sleeping), ignore overflow. 1169 */ 1170 mutex_enter(p->p_lock); 1171 runtm = p->p_rtime.sec; 1172 LIST_FOREACH(l, &p->p_lwps, l_sibling) { 1173 if (__predict_false((l->l_flag & LW_IDLE) != 0)) 1174 continue; 1175 lwp_lock(l); 1176 runtm += l->l_rtime.sec; 1177 l->l_swtime++; 1178 sched_lwp_stats(l); 1179 lwp_unlock(l); 1180 1181 l->l_pctcpu = (l->l_pctcpu * ccpu) >> FSHIFT; 1182 if (l->l_slptime != 0) 1183 continue; 1184 1185 lpctcpu = l->l_pctcpu; 1186 lcpticks = atomic_swap_uint(&l->l_cpticks, 0); 1187 lpctcpu += ((FSCALE - ccpu) * 1188 (lcpticks * FSCALE / clkhz)) >> FSHIFT; 1189 l->l_pctcpu = lpctcpu; 1190 } 1191 /* Calculating p_pctcpu only for ps(1) */ 1192 p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT; 1193 1194 /* 1195 * Check if the process exceeds its CPU resource allocation. 1196 * If over max, kill it. 1197 */ 1198 rlim = &p->p_rlimit[RLIMIT_CPU]; 1199 sig = 0; 1200 if (__predict_false(runtm >= rlim->rlim_cur)) { 1201 if (runtm >= rlim->rlim_max) 1202 sig = SIGKILL; 1203 else { 1204 sig = SIGXCPU; 1205 if (rlim->rlim_cur < rlim->rlim_max) 1206 rlim->rlim_cur += 5; 1207 } 1208 } 1209 mutex_exit(p->p_lock); 1210 if (__predict_false(runtm < 0)) { 1211 if (!backwards) { 1212 backwards = true; 1213 printf("WARNING: negative runtime; " 1214 "monotonic clock has gone backwards\n"); 1215 } 1216 } else if (__predict_false(sig)) { 1217 KASSERT((p->p_flag & PK_SYSTEM) == 0); 1218 psignal(p, sig); 1219 } 1220 } 1221 mutex_exit(proc_lock); 1222 uvm_meter(); 1223 cv_wakeup(&lbolt); 1224 callout_schedule(&sched_pstats_ch, hz); 1225 } 1226