1 /* $NetBSD: kern_synch.c,v 1.349 2020/05/23 23:42:43 ad Exp $ */ 2 3 /*- 4 * Copyright (c) 1999, 2000, 2004, 2006, 2007, 2008, 2009, 2019, 2020 5 * The NetBSD Foundation, Inc. 6 * All rights reserved. 7 * 8 * This code is derived from software contributed to The NetBSD Foundation 9 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 10 * NASA Ames Research Center, by Charles M. Hannum, Andrew Doran and 11 * Daniel Sieger. 12 * 13 * Redistribution and use in source and binary forms, with or without 14 * modification, are permitted provided that the following conditions 15 * are met: 16 * 1. Redistributions of source code must retain the above copyright 17 * notice, this list of conditions and the following disclaimer. 18 * 2. Redistributions in binary form must reproduce the above copyright 19 * notice, this list of conditions and the following disclaimer in the 20 * documentation and/or other materials provided with the distribution. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 23 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 24 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 25 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 32 * POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 /*- 36 * Copyright (c) 1982, 1986, 1990, 1991, 1993 37 * The Regents of the University of California. All rights reserved. 38 * (c) UNIX System Laboratories, Inc. 39 * All or some portions of this file are derived from material licensed 40 * to the University of California by American Telephone and Telegraph 41 * Co. or Unix System Laboratories, Inc. and are reproduced herein with 42 * the permission of UNIX System Laboratories, Inc. 43 * 44 * Redistribution and use in source and binary forms, with or without 45 * modification, are permitted provided that the following conditions 46 * are met: 47 * 1. Redistributions of source code must retain the above copyright 48 * notice, this list of conditions and the following disclaimer. 49 * 2. Redistributions in binary form must reproduce the above copyright 50 * notice, this list of conditions and the following disclaimer in the 51 * documentation and/or other materials provided with the distribution. 52 * 3. Neither the name of the University nor the names of its contributors 53 * may be used to endorse or promote products derived from this software 54 * without specific prior written permission. 55 * 56 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 57 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 58 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 59 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 60 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 61 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 62 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 63 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 64 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 65 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 66 * SUCH DAMAGE. 67 * 68 * @(#)kern_synch.c 8.9 (Berkeley) 5/19/95 69 */ 70 71 #include <sys/cdefs.h> 72 __KERNEL_RCSID(0, "$NetBSD: kern_synch.c,v 1.349 2020/05/23 23:42:43 ad Exp $"); 73 74 #include "opt_kstack.h" 75 #include "opt_dtrace.h" 76 77 #define __MUTEX_PRIVATE 78 79 #include <sys/param.h> 80 #include <sys/systm.h> 81 #include <sys/proc.h> 82 #include <sys/kernel.h> 83 #include <sys/cpu.h> 84 #include <sys/pserialize.h> 85 #include <sys/resourcevar.h> 86 #include <sys/rwlock.h> 87 #include <sys/sched.h> 88 #include <sys/syscall_stats.h> 89 #include <sys/sleepq.h> 90 #include <sys/lockdebug.h> 91 #include <sys/evcnt.h> 92 #include <sys/intr.h> 93 #include <sys/lwpctl.h> 94 #include <sys/atomic.h> 95 #include <sys/syslog.h> 96 97 #include <uvm/uvm_extern.h> 98 99 #include <dev/lockstat.h> 100 101 #include <sys/dtrace_bsd.h> 102 int dtrace_vtime_active=0; 103 dtrace_vtime_switch_func_t dtrace_vtime_switch_func; 104 105 static void sched_unsleep(struct lwp *, bool); 106 static void sched_changepri(struct lwp *, pri_t); 107 static void sched_lendpri(struct lwp *, pri_t); 108 109 syncobj_t sleep_syncobj = { 110 .sobj_flag = SOBJ_SLEEPQ_SORTED, 111 .sobj_unsleep = sleepq_unsleep, 112 .sobj_changepri = sleepq_changepri, 113 .sobj_lendpri = sleepq_lendpri, 114 .sobj_owner = syncobj_noowner, 115 }; 116 117 syncobj_t sched_syncobj = { 118 .sobj_flag = SOBJ_SLEEPQ_SORTED, 119 .sobj_unsleep = sched_unsleep, 120 .sobj_changepri = sched_changepri, 121 .sobj_lendpri = sched_lendpri, 122 .sobj_owner = syncobj_noowner, 123 }; 124 125 syncobj_t kpause_syncobj = { 126 .sobj_flag = SOBJ_SLEEPQ_NULL, 127 .sobj_unsleep = sleepq_unsleep, 128 .sobj_changepri = sleepq_changepri, 129 .sobj_lendpri = sleepq_lendpri, 130 .sobj_owner = syncobj_noowner, 131 }; 132 133 /* "Lightning bolt": once a second sleep address. */ 134 kcondvar_t lbolt __cacheline_aligned; 135 136 u_int sched_pstats_ticks __cacheline_aligned; 137 138 /* Preemption event counters. */ 139 static struct evcnt kpreempt_ev_crit __cacheline_aligned; 140 static struct evcnt kpreempt_ev_klock __cacheline_aligned; 141 static struct evcnt kpreempt_ev_immed __cacheline_aligned; 142 143 void 144 synch_init(void) 145 { 146 147 cv_init(&lbolt, "lbolt"); 148 149 evcnt_attach_dynamic(&kpreempt_ev_crit, EVCNT_TYPE_MISC, NULL, 150 "kpreempt", "defer: critical section"); 151 evcnt_attach_dynamic(&kpreempt_ev_klock, EVCNT_TYPE_MISC, NULL, 152 "kpreempt", "defer: kernel_lock"); 153 evcnt_attach_dynamic(&kpreempt_ev_immed, EVCNT_TYPE_MISC, NULL, 154 "kpreempt", "immediate"); 155 } 156 157 /* 158 * OBSOLETE INTERFACE 159 * 160 * General sleep call. Suspends the current LWP until a wakeup is 161 * performed on the specified identifier. The LWP will then be made 162 * runnable with the specified priority. Sleeps at most timo/hz seconds (0 163 * means no timeout). If pri includes PCATCH flag, signals are checked 164 * before and after sleeping, else signals are not checked. Returns 0 if 165 * awakened, EWOULDBLOCK if the timeout expires. If PCATCH is set and a 166 * signal needs to be delivered, ERESTART is returned if the current system 167 * call should be restarted if possible, and EINTR is returned if the system 168 * call should be interrupted by the signal (return EINTR). 169 */ 170 int 171 tsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo) 172 { 173 struct lwp *l = curlwp; 174 sleepq_t *sq; 175 kmutex_t *mp; 176 bool catch_p; 177 178 KASSERT((l->l_pflag & LP_INTR) == 0); 179 KASSERT(ident != &lbolt); 180 181 if (sleepq_dontsleep(l)) { 182 (void)sleepq_abort(NULL, 0); 183 return 0; 184 } 185 186 l->l_kpriority = true; 187 catch_p = priority & PCATCH; 188 sq = sleeptab_lookup(&sleeptab, ident, &mp); 189 sleepq_enter(sq, l, mp); 190 sleepq_enqueue(sq, ident, wmesg, &sleep_syncobj, catch_p); 191 return sleepq_block(timo, catch_p); 192 } 193 194 int 195 mtsleep(wchan_t ident, pri_t priority, const char *wmesg, int timo, 196 kmutex_t *mtx) 197 { 198 struct lwp *l = curlwp; 199 sleepq_t *sq; 200 kmutex_t *mp; 201 bool catch_p; 202 int error; 203 204 KASSERT((l->l_pflag & LP_INTR) == 0); 205 KASSERT(ident != &lbolt); 206 207 if (sleepq_dontsleep(l)) { 208 (void)sleepq_abort(mtx, (priority & PNORELOCK) != 0); 209 return 0; 210 } 211 212 l->l_kpriority = true; 213 catch_p = priority & PCATCH; 214 sq = sleeptab_lookup(&sleeptab, ident, &mp); 215 sleepq_enter(sq, l, mp); 216 sleepq_enqueue(sq, ident, wmesg, &sleep_syncobj, catch_p); 217 mutex_exit(mtx); 218 error = sleepq_block(timo, catch_p); 219 220 if ((priority & PNORELOCK) == 0) 221 mutex_enter(mtx); 222 223 return error; 224 } 225 226 /* 227 * General sleep call for situations where a wake-up is not expected. 228 */ 229 int 230 kpause(const char *wmesg, bool intr, int timo, kmutex_t *mtx) 231 { 232 struct lwp *l = curlwp; 233 int error; 234 235 KASSERT(!(timo == 0 && intr == false)); 236 237 if (sleepq_dontsleep(l)) 238 return sleepq_abort(NULL, 0); 239 240 if (mtx != NULL) 241 mutex_exit(mtx); 242 l->l_kpriority = true; 243 lwp_lock(l); 244 KERNEL_UNLOCK_ALL(NULL, &l->l_biglocks); 245 sleepq_enqueue(NULL, l, wmesg, &kpause_syncobj, intr); 246 error = sleepq_block(timo, intr); 247 if (mtx != NULL) 248 mutex_enter(mtx); 249 250 return error; 251 } 252 253 /* 254 * OBSOLETE INTERFACE 255 * 256 * Make all LWPs sleeping on the specified identifier runnable. 257 */ 258 void 259 wakeup(wchan_t ident) 260 { 261 sleepq_t *sq; 262 kmutex_t *mp; 263 264 if (__predict_false(cold)) 265 return; 266 267 sq = sleeptab_lookup(&sleeptab, ident, &mp); 268 sleepq_wake(sq, ident, (u_int)-1, mp); 269 } 270 271 /* 272 * General yield call. Puts the current LWP back on its run queue and 273 * performs a context switch. 274 */ 275 void 276 yield(void) 277 { 278 struct lwp *l = curlwp; 279 280 KERNEL_UNLOCK_ALL(l, &l->l_biglocks); 281 lwp_lock(l); 282 283 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock)); 284 KASSERT(l->l_stat == LSONPROC); 285 286 /* Voluntary - ditch kpriority boost. */ 287 l->l_kpriority = false; 288 spc_lock(l->l_cpu); 289 mi_switch(l); 290 KERNEL_LOCK(l->l_biglocks, l); 291 } 292 293 /* 294 * General preemption call. Puts the current LWP back on its run queue 295 * and performs an involuntary context switch. Different from yield() 296 * in that: 297 * 298 * - It's counted differently (involuntary vs. voluntary). 299 * - Realtime threads go to the head of their runqueue vs. tail for yield(). 300 * - Priority boost is retained unless LWP has exceeded timeslice. 301 */ 302 void 303 preempt(void) 304 { 305 struct lwp *l = curlwp; 306 307 KERNEL_UNLOCK_ALL(l, &l->l_biglocks); 308 lwp_lock(l); 309 310 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock)); 311 KASSERT(l->l_stat == LSONPROC); 312 313 spc_lock(l->l_cpu); 314 /* Involuntary - keep kpriority boost unless a CPU hog. */ 315 if ((l->l_cpu->ci_schedstate.spc_flags & SPCF_SHOULDYIELD) != 0) { 316 l->l_kpriority = false; 317 } 318 l->l_pflag |= LP_PREEMPTING; 319 mi_switch(l); 320 KERNEL_LOCK(l->l_biglocks, l); 321 } 322 323 /* 324 * Return true if the current LWP should yield the processor. Intended to 325 * be used by long-running code in kernel. 326 */ 327 inline bool 328 preempt_needed(void) 329 { 330 lwp_t *l = curlwp; 331 int needed; 332 333 KPREEMPT_DISABLE(l); 334 needed = l->l_cpu->ci_want_resched; 335 KPREEMPT_ENABLE(l); 336 337 return (needed != 0); 338 } 339 340 /* 341 * A breathing point for long running code in kernel. 342 */ 343 void 344 preempt_point(void) 345 { 346 347 if (__predict_false(preempt_needed())) { 348 preempt(); 349 } 350 } 351 352 /* 353 * Handle a request made by another agent to preempt the current LWP 354 * in-kernel. Usually called when l_dopreempt may be non-zero. 355 * 356 * Character addresses for lockstat only. 357 */ 358 static char kpreempt_is_disabled; 359 static char kernel_lock_held; 360 static char is_softint_lwp; 361 static char spl_is_raised; 362 363 bool 364 kpreempt(uintptr_t where) 365 { 366 uintptr_t failed; 367 lwp_t *l; 368 int s, dop, lsflag; 369 370 l = curlwp; 371 failed = 0; 372 while ((dop = l->l_dopreempt) != 0) { 373 if (l->l_stat != LSONPROC) { 374 /* 375 * About to block (or die), let it happen. 376 * Doesn't really count as "preemption has 377 * been blocked", since we're going to 378 * context switch. 379 */ 380 atomic_swap_uint(&l->l_dopreempt, 0); 381 return true; 382 } 383 KASSERT((l->l_flag & LW_IDLE) == 0); 384 if (__predict_false(l->l_nopreempt != 0)) { 385 /* LWP holds preemption disabled, explicitly. */ 386 if ((dop & DOPREEMPT_COUNTED) == 0) { 387 kpreempt_ev_crit.ev_count++; 388 } 389 failed = (uintptr_t)&kpreempt_is_disabled; 390 break; 391 } 392 if (__predict_false((l->l_pflag & LP_INTR) != 0)) { 393 /* Can't preempt soft interrupts yet. */ 394 atomic_swap_uint(&l->l_dopreempt, 0); 395 failed = (uintptr_t)&is_softint_lwp; 396 break; 397 } 398 s = splsched(); 399 if (__predict_false(l->l_blcnt != 0 || 400 curcpu()->ci_biglock_wanted != NULL)) { 401 /* Hold or want kernel_lock, code is not MT safe. */ 402 splx(s); 403 if ((dop & DOPREEMPT_COUNTED) == 0) { 404 kpreempt_ev_klock.ev_count++; 405 } 406 failed = (uintptr_t)&kernel_lock_held; 407 break; 408 } 409 if (__predict_false(!cpu_kpreempt_enter(where, s))) { 410 /* 411 * It may be that the IPL is too high. 412 * kpreempt_enter() can schedule an 413 * interrupt to retry later. 414 */ 415 splx(s); 416 failed = (uintptr_t)&spl_is_raised; 417 break; 418 } 419 /* Do it! */ 420 if (__predict_true((dop & DOPREEMPT_COUNTED) == 0)) { 421 kpreempt_ev_immed.ev_count++; 422 } 423 lwp_lock(l); 424 /* Involuntary - keep kpriority boost. */ 425 l->l_pflag |= LP_PREEMPTING; 426 spc_lock(l->l_cpu); 427 mi_switch(l); 428 l->l_nopreempt++; 429 splx(s); 430 431 /* Take care of any MD cleanup. */ 432 cpu_kpreempt_exit(where); 433 l->l_nopreempt--; 434 } 435 436 if (__predict_true(!failed)) { 437 return false; 438 } 439 440 /* Record preemption failure for reporting via lockstat. */ 441 atomic_or_uint(&l->l_dopreempt, DOPREEMPT_COUNTED); 442 lsflag = 0; 443 LOCKSTAT_ENTER(lsflag); 444 if (__predict_false(lsflag)) { 445 if (where == 0) { 446 where = (uintptr_t)__builtin_return_address(0); 447 } 448 /* Preemption is on, might recurse, so make it atomic. */ 449 if (atomic_cas_ptr_ni((void *)&l->l_pfailaddr, NULL, 450 (void *)where) == NULL) { 451 LOCKSTAT_START_TIMER(lsflag, l->l_pfailtime); 452 l->l_pfaillock = failed; 453 } 454 } 455 LOCKSTAT_EXIT(lsflag); 456 return true; 457 } 458 459 /* 460 * Return true if preemption is explicitly disabled. 461 */ 462 bool 463 kpreempt_disabled(void) 464 { 465 const lwp_t *l = curlwp; 466 467 return l->l_nopreempt != 0 || l->l_stat == LSZOMB || 468 (l->l_flag & LW_IDLE) != 0 || (l->l_pflag & LP_INTR) != 0 || 469 cpu_kpreempt_disabled(); 470 } 471 472 /* 473 * Disable kernel preemption. 474 */ 475 void 476 kpreempt_disable(void) 477 { 478 479 KPREEMPT_DISABLE(curlwp); 480 } 481 482 /* 483 * Reenable kernel preemption. 484 */ 485 void 486 kpreempt_enable(void) 487 { 488 489 KPREEMPT_ENABLE(curlwp); 490 } 491 492 /* 493 * Compute the amount of time during which the current lwp was running. 494 * 495 * - update l_rtime unless it's an idle lwp. 496 */ 497 498 void 499 updatertime(lwp_t *l, const struct bintime *now) 500 { 501 502 if (__predict_false(l->l_flag & LW_IDLE)) 503 return; 504 505 /* rtime += now - stime */ 506 bintime_add(&l->l_rtime, now); 507 bintime_sub(&l->l_rtime, &l->l_stime); 508 } 509 510 /* 511 * Select next LWP from the current CPU to run.. 512 */ 513 static inline lwp_t * 514 nextlwp(struct cpu_info *ci, struct schedstate_percpu *spc) 515 { 516 lwp_t *newl; 517 518 /* 519 * Let sched_nextlwp() select the LWP to run the CPU next. 520 * If no LWP is runnable, select the idle LWP. 521 * 522 * On arrival here LWPs on a run queue are locked by spc_mutex which 523 * is currently held. Idle LWPs are always locked by spc_lwplock, 524 * which may or may not be held here. On exit from this code block, 525 * in all cases newl is locked by spc_lwplock. 526 */ 527 newl = sched_nextlwp(); 528 if (newl != NULL) { 529 sched_dequeue(newl); 530 KASSERT(lwp_locked(newl, spc->spc_mutex)); 531 KASSERT(newl->l_cpu == ci); 532 newl->l_stat = LSONPROC; 533 newl->l_pflag |= LP_RUNNING; 534 spc->spc_curpriority = lwp_eprio(newl); 535 spc->spc_flags &= ~(SPCF_SWITCHCLEAR | SPCF_IDLE); 536 lwp_setlock(newl, spc->spc_lwplock); 537 } else { 538 /* 539 * The idle LWP does not get set to LSONPROC, because 540 * otherwise it screws up the output from top(1) etc. 541 */ 542 newl = ci->ci_data.cpu_idlelwp; 543 newl->l_pflag |= LP_RUNNING; 544 spc->spc_curpriority = PRI_IDLE; 545 spc->spc_flags = (spc->spc_flags & ~SPCF_SWITCHCLEAR) | 546 SPCF_IDLE; 547 } 548 549 /* 550 * Only clear want_resched if there are no pending (slow) software 551 * interrupts. We can do this without an atomic, because no new 552 * LWPs can appear in the queue due to our hold on spc_mutex, and 553 * the update to ci_want_resched will become globally visible before 554 * the release of spc_mutex becomes globally visible. 555 */ 556 ci->ci_want_resched = ci->ci_data.cpu_softints; 557 558 return newl; 559 } 560 561 /* 562 * The machine independent parts of context switch. 563 * 564 * NOTE: l->l_cpu is not changed in this routine, because an LWP never 565 * changes its own l_cpu (that would screw up curcpu on many ports and could 566 * cause all kinds of other evil stuff). l_cpu is always changed by some 567 * other actor, when it's known the LWP is not running (the LP_RUNNING flag 568 * is checked under lock). 569 */ 570 void 571 mi_switch(lwp_t *l) 572 { 573 struct cpu_info *ci; 574 struct schedstate_percpu *spc; 575 struct lwp *newl; 576 kmutex_t *lock; 577 int oldspl; 578 struct bintime bt; 579 bool returning; 580 581 KASSERT(lwp_locked(l, NULL)); 582 KASSERT(kpreempt_disabled()); 583 KASSERT(mutex_owned(curcpu()->ci_schedstate.spc_mutex)); 584 KASSERTMSG(l->l_blcnt == 0, "kernel_lock leaked"); 585 586 kstack_check_magic(l); 587 588 binuptime(&bt); 589 590 KASSERTMSG(l == curlwp, "l %p curlwp %p", l, curlwp); 591 KASSERT((l->l_pflag & LP_RUNNING) != 0); 592 KASSERT(l->l_cpu == curcpu() || l->l_stat == LSRUN); 593 ci = curcpu(); 594 spc = &ci->ci_schedstate; 595 returning = false; 596 newl = NULL; 597 598 /* 599 * If we have been asked to switch to a specific LWP, then there 600 * is no need to inspect the run queues. If a soft interrupt is 601 * blocking, then return to the interrupted thread without adjusting 602 * VM context or its start time: neither have been changed in order 603 * to take the interrupt. 604 */ 605 if (l->l_switchto != NULL) { 606 if ((l->l_pflag & LP_INTR) != 0) { 607 returning = true; 608 softint_block(l); 609 if ((l->l_pflag & LP_TIMEINTR) != 0) 610 updatertime(l, &bt); 611 } 612 newl = l->l_switchto; 613 l->l_switchto = NULL; 614 } 615 #ifndef __HAVE_FAST_SOFTINTS 616 else if (ci->ci_data.cpu_softints != 0) { 617 /* There are pending soft interrupts, so pick one. */ 618 newl = softint_picklwp(); 619 newl->l_stat = LSONPROC; 620 newl->l_pflag |= LP_RUNNING; 621 } 622 #endif /* !__HAVE_FAST_SOFTINTS */ 623 624 /* 625 * If on the CPU and we have gotten this far, then we must yield. 626 */ 627 if (l->l_stat == LSONPROC && l != newl) { 628 KASSERT(lwp_locked(l, spc->spc_lwplock)); 629 KASSERT((l->l_flag & LW_IDLE) == 0); 630 l->l_stat = LSRUN; 631 lwp_setlock(l, spc->spc_mutex); 632 sched_enqueue(l); 633 sched_preempted(l); 634 635 /* 636 * Handle migration. Note that "migrating LWP" may 637 * be reset here, if interrupt/preemption happens 638 * early in idle LWP. 639 */ 640 if (l->l_target_cpu != NULL && (l->l_pflag & LP_BOUND) == 0) { 641 KASSERT((l->l_pflag & LP_INTR) == 0); 642 spc->spc_migrating = l; 643 } 644 } 645 646 /* Pick new LWP to run. */ 647 if (newl == NULL) { 648 newl = nextlwp(ci, spc); 649 } 650 651 /* Items that must be updated with the CPU locked. */ 652 if (!returning) { 653 /* Count time spent in current system call */ 654 SYSCALL_TIME_SLEEP(l); 655 656 updatertime(l, &bt); 657 658 /* Update the new LWP's start time. */ 659 newl->l_stime = bt; 660 661 /* 662 * ci_curlwp changes when a fast soft interrupt occurs. 663 * We use ci_onproc to keep track of which kernel or 664 * user thread is running 'underneath' the software 665 * interrupt. This is important for time accounting, 666 * itimers and forcing user threads to preempt (aston). 667 */ 668 ci->ci_onproc = newl; 669 } 670 671 /* 672 * Preemption related tasks. Must be done holding spc_mutex. Clear 673 * l_dopreempt without an atomic - it's only ever set non-zero by 674 * sched_resched_cpu() which also holds spc_mutex, and only ever 675 * cleared by the LWP itself (us) with atomics when not under lock. 676 */ 677 l->l_dopreempt = 0; 678 if (__predict_false(l->l_pfailaddr != 0)) { 679 LOCKSTAT_FLAG(lsflag); 680 LOCKSTAT_ENTER(lsflag); 681 LOCKSTAT_STOP_TIMER(lsflag, l->l_pfailtime); 682 LOCKSTAT_EVENT_RA(lsflag, l->l_pfaillock, LB_NOPREEMPT|LB_SPIN, 683 1, l->l_pfailtime, l->l_pfailaddr); 684 LOCKSTAT_EXIT(lsflag); 685 l->l_pfailtime = 0; 686 l->l_pfaillock = 0; 687 l->l_pfailaddr = 0; 688 } 689 690 if (l != newl) { 691 struct lwp *prevlwp; 692 693 /* Release all locks, but leave the current LWP locked */ 694 if (l->l_mutex == spc->spc_mutex) { 695 /* 696 * Drop spc_lwplock, if the current LWP has been moved 697 * to the run queue (it is now locked by spc_mutex). 698 */ 699 mutex_spin_exit(spc->spc_lwplock); 700 } else { 701 /* 702 * Otherwise, drop the spc_mutex, we are done with the 703 * run queues. 704 */ 705 mutex_spin_exit(spc->spc_mutex); 706 } 707 708 /* We're down to only one lock, so do debug checks. */ 709 LOCKDEBUG_BARRIER(l->l_mutex, 1); 710 711 /* Count the context switch. */ 712 CPU_COUNT(CPU_COUNT_NSWTCH, 1); 713 l->l_ncsw++; 714 if ((l->l_pflag & LP_PREEMPTING) != 0) { 715 l->l_nivcsw++; 716 l->l_pflag &= ~LP_PREEMPTING; 717 } 718 719 /* 720 * Increase the count of spin-mutexes before the release 721 * of the last lock - we must remain at IPL_SCHED after 722 * releasing the lock. 723 */ 724 KASSERTMSG(ci->ci_mtx_count == -1, 725 "%s: cpu%u: ci_mtx_count (%d) != -1 " 726 "(block with spin-mutex held)", 727 __func__, cpu_index(ci), ci->ci_mtx_count); 728 oldspl = MUTEX_SPIN_OLDSPL(ci); 729 ci->ci_mtx_count = -2; 730 731 /* Update status for lwpctl, if present. */ 732 if (l->l_lwpctl != NULL) { 733 l->l_lwpctl->lc_curcpu = (l->l_stat == LSZOMB ? 734 LWPCTL_CPU_EXITED : LWPCTL_CPU_NONE); 735 } 736 737 /* 738 * If curlwp is a soft interrupt LWP, there's nobody on the 739 * other side to unlock - we're returning into an assembly 740 * trampoline. Unlock now. This is safe because this is a 741 * kernel LWP and is bound to current CPU: the worst anyone 742 * else will do to it, is to put it back onto this CPU's run 743 * queue (and the CPU is busy here right now!). 744 */ 745 if (returning) { 746 /* Keep IPL_SCHED after this; MD code will fix up. */ 747 l->l_pflag &= ~LP_RUNNING; 748 lwp_unlock(l); 749 } else { 750 /* A normal LWP: save old VM context. */ 751 pmap_deactivate(l); 752 } 753 754 /* 755 * If DTrace has set the active vtime enum to anything 756 * other than INACTIVE (0), then it should have set the 757 * function to call. 758 */ 759 if (__predict_false(dtrace_vtime_active)) { 760 (*dtrace_vtime_switch_func)(newl); 761 } 762 763 /* 764 * We must ensure not to come here from inside a read section. 765 */ 766 KASSERT(pserialize_not_in_read_section()); 767 768 /* Switch to the new LWP.. */ 769 #ifdef MULTIPROCESSOR 770 KASSERT(curlwp == ci->ci_curlwp); 771 #endif 772 KASSERTMSG(l == curlwp, "l %p curlwp %p", l, curlwp); 773 prevlwp = cpu_switchto(l, newl, returning); 774 ci = curcpu(); 775 #ifdef MULTIPROCESSOR 776 KASSERT(curlwp == ci->ci_curlwp); 777 #endif 778 KASSERTMSG(l == curlwp, "l %p curlwp %p prevlwp %p", 779 l, curlwp, prevlwp); 780 KASSERT(prevlwp != NULL); 781 KASSERT(l->l_cpu == ci); 782 KASSERT(ci->ci_mtx_count == -2); 783 784 /* 785 * Immediately mark the previous LWP as no longer running 786 * and unlock (to keep lock wait times short as possible). 787 * We'll still be at IPL_SCHED afterwards. If a zombie, 788 * don't touch after clearing LP_RUNNING as it could be 789 * reaped by another CPU. Issue a memory barrier to ensure 790 * this. 791 */ 792 KASSERT((prevlwp->l_pflag & LP_RUNNING) != 0); 793 lock = prevlwp->l_mutex; 794 if (__predict_false(prevlwp->l_stat == LSZOMB)) { 795 membar_sync(); 796 } 797 prevlwp->l_pflag &= ~LP_RUNNING; 798 mutex_spin_exit(lock); 799 800 /* 801 * Switched away - we have new curlwp. 802 * Restore VM context and IPL. 803 */ 804 pmap_activate(l); 805 pcu_switchpoint(l); 806 807 /* Update status for lwpctl, if present. */ 808 if (l->l_lwpctl != NULL) { 809 l->l_lwpctl->lc_curcpu = (int)cpu_index(ci); 810 l->l_lwpctl->lc_pctr++; 811 } 812 813 /* 814 * Normalize the spin mutex count and restore the previous 815 * SPL. Note that, unless the caller disabled preemption, 816 * we can be preempted at any time after this splx(). 817 */ 818 KASSERT(l->l_cpu == ci); 819 KASSERT(ci->ci_mtx_count == -1); 820 ci->ci_mtx_count = 0; 821 splx(oldspl); 822 } else { 823 /* Nothing to do - just unlock and return. */ 824 mutex_spin_exit(spc->spc_mutex); 825 l->l_pflag &= ~LP_PREEMPTING; 826 lwp_unlock(l); 827 } 828 829 KASSERT(l == curlwp); 830 KASSERT(l->l_stat == LSONPROC || (l->l_flag & LW_IDLE) != 0); 831 832 SYSCALL_TIME_WAKEUP(l); 833 LOCKDEBUG_BARRIER(NULL, 1); 834 } 835 836 /* 837 * setrunnable: change LWP state to be runnable, placing it on the run queue. 838 * 839 * Call with the process and LWP locked. Will return with the LWP unlocked. 840 */ 841 void 842 setrunnable(struct lwp *l) 843 { 844 struct proc *p = l->l_proc; 845 struct cpu_info *ci; 846 kmutex_t *oldlock; 847 848 KASSERT((l->l_flag & LW_IDLE) == 0); 849 KASSERT((l->l_flag & LW_DBGSUSPEND) == 0); 850 KASSERT(mutex_owned(p->p_lock)); 851 KASSERT(lwp_locked(l, NULL)); 852 KASSERT(l->l_mutex != l->l_cpu->ci_schedstate.spc_mutex); 853 854 switch (l->l_stat) { 855 case LSSTOP: 856 /* 857 * If we're being traced (possibly because someone attached us 858 * while we were stopped), check for a signal from the debugger. 859 */ 860 if ((p->p_slflag & PSL_TRACED) != 0 && p->p_xsig != 0) 861 signotify(l); 862 p->p_nrlwps++; 863 break; 864 case LSSUSPENDED: 865 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock)); 866 l->l_flag &= ~LW_WSUSPEND; 867 p->p_nrlwps++; 868 cv_broadcast(&p->p_lwpcv); 869 break; 870 case LSSLEEP: 871 KASSERT(l->l_wchan != NULL); 872 break; 873 case LSIDL: 874 KASSERT(lwp_locked(l, l->l_cpu->ci_schedstate.spc_lwplock)); 875 break; 876 default: 877 panic("setrunnable: lwp %p state was %d", l, l->l_stat); 878 } 879 880 /* 881 * If the LWP was sleeping, start it again. 882 */ 883 if (l->l_wchan != NULL) { 884 l->l_stat = LSSLEEP; 885 /* lwp_unsleep() will release the lock. */ 886 lwp_unsleep(l, true); 887 return; 888 } 889 890 /* 891 * If the LWP is still on the CPU, mark it as LSONPROC. It may be 892 * about to call mi_switch(), in which case it will yield. 893 */ 894 if ((l->l_pflag & LP_RUNNING) != 0) { 895 l->l_stat = LSONPROC; 896 l->l_slptime = 0; 897 lwp_unlock(l); 898 return; 899 } 900 901 /* 902 * Look for a CPU to run. 903 * Set the LWP runnable. 904 */ 905 ci = sched_takecpu(l); 906 l->l_cpu = ci; 907 spc_lock(ci); 908 oldlock = lwp_setlock(l, l->l_cpu->ci_schedstate.spc_mutex); 909 sched_setrunnable(l); 910 l->l_stat = LSRUN; 911 l->l_slptime = 0; 912 sched_enqueue(l); 913 sched_resched_lwp(l, true); 914 /* SPC & LWP now unlocked. */ 915 mutex_spin_exit(oldlock); 916 } 917 918 /* 919 * suspendsched: 920 * 921 * Convert all non-LW_SYSTEM LSSLEEP or LSRUN LWPs to LSSUSPENDED. 922 */ 923 void 924 suspendsched(void) 925 { 926 CPU_INFO_ITERATOR cii; 927 struct cpu_info *ci; 928 struct lwp *l; 929 struct proc *p; 930 931 /* 932 * We do this by process in order not to violate the locking rules. 933 */ 934 mutex_enter(&proc_lock); 935 PROCLIST_FOREACH(p, &allproc) { 936 mutex_enter(p->p_lock); 937 if ((p->p_flag & PK_SYSTEM) != 0) { 938 mutex_exit(p->p_lock); 939 continue; 940 } 941 942 if (p->p_stat != SSTOP) { 943 if (p->p_stat != SZOMB && p->p_stat != SDEAD) { 944 p->p_pptr->p_nstopchild++; 945 p->p_waited = 0; 946 } 947 p->p_stat = SSTOP; 948 } 949 950 LIST_FOREACH(l, &p->p_lwps, l_sibling) { 951 if (l == curlwp) 952 continue; 953 954 lwp_lock(l); 955 956 /* 957 * Set L_WREBOOT so that the LWP will suspend itself 958 * when it tries to return to user mode. We want to 959 * try and get to get as many LWPs as possible to 960 * the user / kernel boundary, so that they will 961 * release any locks that they hold. 962 */ 963 l->l_flag |= (LW_WREBOOT | LW_WSUSPEND); 964 965 if (l->l_stat == LSSLEEP && 966 (l->l_flag & LW_SINTR) != 0) { 967 /* setrunnable() will release the lock. */ 968 setrunnable(l); 969 continue; 970 } 971 972 lwp_unlock(l); 973 } 974 975 mutex_exit(p->p_lock); 976 } 977 mutex_exit(&proc_lock); 978 979 /* 980 * Kick all CPUs to make them preempt any LWPs running in user mode. 981 * They'll trap into the kernel and suspend themselves in userret(). 982 * 983 * Unusually, we don't hold any other scheduler object locked, which 984 * would keep preemption off for sched_resched_cpu(), so disable it 985 * explicitly. 986 */ 987 kpreempt_disable(); 988 for (CPU_INFO_FOREACH(cii, ci)) { 989 spc_lock(ci); 990 sched_resched_cpu(ci, PRI_KERNEL, true); 991 /* spc now unlocked */ 992 } 993 kpreempt_enable(); 994 } 995 996 /* 997 * sched_unsleep: 998 * 999 * The is called when the LWP has not been awoken normally but instead 1000 * interrupted: for example, if the sleep timed out. Because of this, 1001 * it's not a valid action for running or idle LWPs. 1002 */ 1003 static void 1004 sched_unsleep(struct lwp *l, bool cleanup) 1005 { 1006 1007 lwp_unlock(l); 1008 panic("sched_unsleep"); 1009 } 1010 1011 static void 1012 sched_changepri(struct lwp *l, pri_t pri) 1013 { 1014 struct schedstate_percpu *spc; 1015 struct cpu_info *ci; 1016 1017 KASSERT(lwp_locked(l, NULL)); 1018 1019 ci = l->l_cpu; 1020 spc = &ci->ci_schedstate; 1021 1022 if (l->l_stat == LSRUN) { 1023 KASSERT(lwp_locked(l, spc->spc_mutex)); 1024 sched_dequeue(l); 1025 l->l_priority = pri; 1026 sched_enqueue(l); 1027 sched_resched_lwp(l, false); 1028 } else if (l->l_stat == LSONPROC && l->l_class != SCHED_OTHER) { 1029 /* On priority drop, only evict realtime LWPs. */ 1030 KASSERT(lwp_locked(l, spc->spc_lwplock)); 1031 l->l_priority = pri; 1032 spc_lock(ci); 1033 sched_resched_cpu(ci, spc->spc_maxpriority, true); 1034 /* spc now unlocked */ 1035 } else { 1036 l->l_priority = pri; 1037 } 1038 } 1039 1040 static void 1041 sched_lendpri(struct lwp *l, pri_t pri) 1042 { 1043 struct schedstate_percpu *spc; 1044 struct cpu_info *ci; 1045 1046 KASSERT(lwp_locked(l, NULL)); 1047 1048 ci = l->l_cpu; 1049 spc = &ci->ci_schedstate; 1050 1051 if (l->l_stat == LSRUN) { 1052 KASSERT(lwp_locked(l, spc->spc_mutex)); 1053 sched_dequeue(l); 1054 l->l_inheritedprio = pri; 1055 l->l_auxprio = MAX(l->l_inheritedprio, l->l_protectprio); 1056 sched_enqueue(l); 1057 sched_resched_lwp(l, false); 1058 } else if (l->l_stat == LSONPROC && l->l_class != SCHED_OTHER) { 1059 /* On priority drop, only evict realtime LWPs. */ 1060 KASSERT(lwp_locked(l, spc->spc_lwplock)); 1061 l->l_inheritedprio = pri; 1062 l->l_auxprio = MAX(l->l_inheritedprio, l->l_protectprio); 1063 spc_lock(ci); 1064 sched_resched_cpu(ci, spc->spc_maxpriority, true); 1065 /* spc now unlocked */ 1066 } else { 1067 l->l_inheritedprio = pri; 1068 l->l_auxprio = MAX(l->l_inheritedprio, l->l_protectprio); 1069 } 1070 } 1071 1072 struct lwp * 1073 syncobj_noowner(wchan_t wchan) 1074 { 1075 1076 return NULL; 1077 } 1078 1079 /* Decay 95% of proc::p_pctcpu in 60 seconds, ccpu = exp(-1/20) */ 1080 const fixpt_t ccpu = 0.95122942450071400909 * FSCALE; 1081 1082 /* 1083 * Constants for averages over 1, 5 and 15 minutes when sampling at 1084 * 5 second intervals. 1085 */ 1086 static const fixpt_t cexp[ ] = { 1087 0.9200444146293232 * FSCALE, /* exp(-1/12) */ 1088 0.9834714538216174 * FSCALE, /* exp(-1/60) */ 1089 0.9944598480048967 * FSCALE, /* exp(-1/180) */ 1090 }; 1091 1092 /* 1093 * sched_pstats: 1094 * 1095 * => Update process statistics and check CPU resource allocation. 1096 * => Call scheduler-specific hook to eventually adjust LWP priorities. 1097 * => Compute load average of a quantity on 1, 5 and 15 minute intervals. 1098 */ 1099 void 1100 sched_pstats(void) 1101 { 1102 extern struct loadavg averunnable; 1103 struct loadavg *avg = &averunnable; 1104 const int clkhz = (stathz != 0 ? stathz : hz); 1105 static bool backwards = false; 1106 static u_int lavg_count = 0; 1107 struct proc *p; 1108 int nrun; 1109 1110 sched_pstats_ticks++; 1111 if (++lavg_count >= 5) { 1112 lavg_count = 0; 1113 nrun = 0; 1114 } 1115 mutex_enter(&proc_lock); 1116 PROCLIST_FOREACH(p, &allproc) { 1117 struct lwp *l; 1118 struct rlimit *rlim; 1119 time_t runtm; 1120 int sig; 1121 1122 /* Increment sleep time (if sleeping), ignore overflow. */ 1123 mutex_enter(p->p_lock); 1124 runtm = p->p_rtime.sec; 1125 LIST_FOREACH(l, &p->p_lwps, l_sibling) { 1126 fixpt_t lpctcpu; 1127 u_int lcpticks; 1128 1129 if (__predict_false((l->l_flag & LW_IDLE) != 0)) 1130 continue; 1131 lwp_lock(l); 1132 runtm += l->l_rtime.sec; 1133 l->l_swtime++; 1134 sched_lwp_stats(l); 1135 1136 /* For load average calculation. */ 1137 if (__predict_false(lavg_count == 0) && 1138 (l->l_flag & (LW_SINTR | LW_SYSTEM)) == 0) { 1139 switch (l->l_stat) { 1140 case LSSLEEP: 1141 if (l->l_slptime > 1) { 1142 break; 1143 } 1144 /* FALLTHROUGH */ 1145 case LSRUN: 1146 case LSONPROC: 1147 case LSIDL: 1148 nrun++; 1149 } 1150 } 1151 lwp_unlock(l); 1152 1153 l->l_pctcpu = (l->l_pctcpu * ccpu) >> FSHIFT; 1154 if (l->l_slptime != 0) 1155 continue; 1156 1157 lpctcpu = l->l_pctcpu; 1158 lcpticks = atomic_swap_uint(&l->l_cpticks, 0); 1159 lpctcpu += ((FSCALE - ccpu) * 1160 (lcpticks * FSCALE / clkhz)) >> FSHIFT; 1161 l->l_pctcpu = lpctcpu; 1162 } 1163 /* Calculating p_pctcpu only for ps(1) */ 1164 p->p_pctcpu = (p->p_pctcpu * ccpu) >> FSHIFT; 1165 1166 if (__predict_false(runtm < 0)) { 1167 if (!backwards) { 1168 backwards = true; 1169 printf("WARNING: negative runtime; " 1170 "monotonic clock has gone backwards\n"); 1171 } 1172 mutex_exit(p->p_lock); 1173 continue; 1174 } 1175 1176 /* 1177 * Check if the process exceeds its CPU resource allocation. 1178 * If over the hard limit, kill it with SIGKILL. 1179 * If over the soft limit, send SIGXCPU and raise 1180 * the soft limit a little. 1181 */ 1182 rlim = &p->p_rlimit[RLIMIT_CPU]; 1183 sig = 0; 1184 if (__predict_false(runtm >= rlim->rlim_cur)) { 1185 if (runtm >= rlim->rlim_max) { 1186 sig = SIGKILL; 1187 log(LOG_NOTICE, 1188 "pid %d, command %s, is killed: %s\n", 1189 p->p_pid, p->p_comm, "exceeded RLIMIT_CPU"); 1190 uprintf("pid %d, command %s, is killed: %s\n", 1191 p->p_pid, p->p_comm, "exceeded RLIMIT_CPU"); 1192 } else { 1193 sig = SIGXCPU; 1194 if (rlim->rlim_cur < rlim->rlim_max) 1195 rlim->rlim_cur += 5; 1196 } 1197 } 1198 mutex_exit(p->p_lock); 1199 if (__predict_false(sig)) { 1200 KASSERT((p->p_flag & PK_SYSTEM) == 0); 1201 psignal(p, sig); 1202 } 1203 } 1204 1205 /* Load average calculation. */ 1206 if (__predict_false(lavg_count == 0)) { 1207 int i; 1208 CTASSERT(__arraycount(cexp) == __arraycount(avg->ldavg)); 1209 for (i = 0; i < __arraycount(cexp); i++) { 1210 avg->ldavg[i] = (cexp[i] * avg->ldavg[i] + 1211 nrun * FSCALE * (FSCALE - cexp[i])) >> FSHIFT; 1212 } 1213 } 1214 1215 /* Lightning bolt. */ 1216 cv_broadcast(&lbolt); 1217 1218 mutex_exit(&proc_lock); 1219 } 1220