1 /* 2 * Copyright (c) 1999 Peter Wemm <peter@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $DragonFly: src/sys/kern/usched_bsd4.c,v 1.26 2008/11/01 23:31:19 dillon Exp $ 27 */ 28 29 #include <sys/param.h> 30 #include <sys/systm.h> 31 #include <sys/kernel.h> 32 #include <sys/lock.h> 33 #include <sys/queue.h> 34 #include <sys/proc.h> 35 #include <sys/rtprio.h> 36 #include <sys/uio.h> 37 #include <sys/sysctl.h> 38 #include <sys/resourcevar.h> 39 #include <sys/spinlock.h> 40 #include <machine/cpu.h> 41 #include <machine/smp.h> 42 43 #include <sys/thread2.h> 44 #include <sys/spinlock2.h> 45 #include <sys/mplock2.h> 46 47 /* 48 * Priorities. Note that with 32 run queues per scheduler each queue 49 * represents four priority levels. 50 */ 51 52 #define MAXPRI 128 53 #define PRIMASK (MAXPRI - 1) 54 #define PRIBASE_REALTIME 0 55 #define PRIBASE_NORMAL MAXPRI 56 #define PRIBASE_IDLE (MAXPRI * 2) 57 #define PRIBASE_THREAD (MAXPRI * 3) 58 #define PRIBASE_NULL (MAXPRI * 4) 59 60 #define NQS 32 /* 32 run queues. */ 61 #define PPQ (MAXPRI / NQS) /* priorities per queue */ 62 #define PPQMASK (PPQ - 1) 63 64 /* 65 * NICEPPQ - number of nice units per priority queue 66 * ESTCPURAMP - number of scheduler ticks for estcpu to switch queues 67 * 68 * ESTCPUPPQ - number of estcpu units per priority queue 69 * ESTCPUMAX - number of estcpu units 70 * ESTCPUINCR - amount we have to increment p_estcpu per scheduling tick at 71 * 100% cpu. 72 */ 73 #define NICEPPQ 2 74 #define ESTCPURAMP 4 75 #define ESTCPUPPQ 512 76 #define ESTCPUMAX (ESTCPUPPQ * NQS) 77 #define ESTCPUINCR (ESTCPUPPQ / ESTCPURAMP) 78 #define PRIO_RANGE (PRIO_MAX - PRIO_MIN + 1) 79 80 #define ESTCPULIM(v) min((v), ESTCPUMAX) 81 82 TAILQ_HEAD(rq, lwp); 83 84 #define lwp_priority lwp_usdata.bsd4.priority 85 #define lwp_rqindex lwp_usdata.bsd4.rqindex 86 #define lwp_origcpu lwp_usdata.bsd4.origcpu 87 #define lwp_estcpu lwp_usdata.bsd4.estcpu 88 #define lwp_rqtype lwp_usdata.bsd4.rqtype 89 90 static void bsd4_acquire_curproc(struct lwp *lp); 91 static void bsd4_release_curproc(struct lwp *lp); 92 static void bsd4_select_curproc(globaldata_t gd); 93 static void bsd4_setrunqueue(struct lwp *lp); 94 static void bsd4_schedulerclock(struct lwp *lp, sysclock_t period, 95 sysclock_t cpstamp); 96 static void bsd4_recalculate_estcpu(struct lwp *lp); 97 static void bsd4_resetpriority(struct lwp *lp); 98 static void bsd4_forking(struct lwp *plp, struct lwp *lp); 99 static void bsd4_exiting(struct lwp *plp, struct lwp *lp); 100 static void bsd4_yield(struct lwp *lp); 101 102 #ifdef SMP 103 static void need_user_resched_remote(void *dummy); 104 #endif 105 static struct lwp *chooseproc_locked(struct lwp *chklp); 106 static void bsd4_remrunqueue_locked(struct lwp *lp); 107 static void bsd4_setrunqueue_locked(struct lwp *lp); 108 109 struct usched usched_bsd4 = { 110 { NULL }, 111 "bsd4", "Original DragonFly Scheduler", 112 NULL, /* default registration */ 113 NULL, /* default deregistration */ 114 bsd4_acquire_curproc, 115 bsd4_release_curproc, 116 bsd4_setrunqueue, 117 bsd4_schedulerclock, 118 bsd4_recalculate_estcpu, 119 bsd4_resetpriority, 120 bsd4_forking, 121 bsd4_exiting, 122 NULL, /* setcpumask not supported */ 123 bsd4_yield 124 }; 125 126 struct usched_bsd4_pcpu { 127 struct thread helper_thread; 128 short rrcount; 129 short upri; 130 struct lwp *uschedcp; 131 }; 132 133 typedef struct usched_bsd4_pcpu *bsd4_pcpu_t; 134 135 /* 136 * We have NQS (32) run queues per scheduling class. For the normal 137 * class, there are 128 priorities scaled onto these 32 queues. New 138 * processes are added to the last entry in each queue, and processes 139 * are selected for running by taking them from the head and maintaining 140 * a simple FIFO arrangement. Realtime and Idle priority processes have 141 * and explicit 0-31 priority which maps directly onto their class queue 142 * index. When a queue has something in it, the corresponding bit is 143 * set in the queuebits variable, allowing a single read to determine 144 * the state of all 32 queues and then a ffs() to find the first busy 145 * queue. 146 */ 147 static struct rq bsd4_queues[NQS]; 148 static struct rq bsd4_rtqueues[NQS]; 149 static struct rq bsd4_idqueues[NQS]; 150 static u_int32_t bsd4_queuebits; 151 static u_int32_t bsd4_rtqueuebits; 152 static u_int32_t bsd4_idqueuebits; 153 static cpumask_t bsd4_curprocmask = -1; /* currently running a user process */ 154 static cpumask_t bsd4_rdyprocmask; /* ready to accept a user process */ 155 static int bsd4_runqcount; 156 #ifdef SMP 157 static volatile int bsd4_scancpu; 158 #endif 159 static struct spinlock bsd4_spin; 160 static struct usched_bsd4_pcpu bsd4_pcpu[MAXCPU]; 161 162 SYSCTL_INT(_debug, OID_AUTO, bsd4_runqcount, CTLFLAG_RD, &bsd4_runqcount, 0, 163 "Number of run queues"); 164 #ifdef INVARIANTS 165 static int usched_nonoptimal; 166 SYSCTL_INT(_debug, OID_AUTO, usched_nonoptimal, CTLFLAG_RW, 167 &usched_nonoptimal, 0, "acquire_curproc() was not optimal"); 168 static int usched_optimal; 169 SYSCTL_INT(_debug, OID_AUTO, usched_optimal, CTLFLAG_RW, 170 &usched_optimal, 0, "acquire_curproc() was optimal"); 171 #endif 172 static int usched_debug = -1; 173 SYSCTL_INT(_debug, OID_AUTO, scdebug, CTLFLAG_RW, &usched_debug, 0, 174 "Print debug information for this pid"); 175 #ifdef SMP 176 static int remote_resched_nonaffinity; 177 static int remote_resched_affinity; 178 static int choose_affinity; 179 SYSCTL_INT(_debug, OID_AUTO, remote_resched_nonaffinity, CTLFLAG_RD, 180 &remote_resched_nonaffinity, 0, "Number of remote rescheds"); 181 SYSCTL_INT(_debug, OID_AUTO, remote_resched_affinity, CTLFLAG_RD, 182 &remote_resched_affinity, 0, "Number of remote rescheds"); 183 SYSCTL_INT(_debug, OID_AUTO, choose_affinity, CTLFLAG_RD, 184 &choose_affinity, 0, "chooseproc() was smart"); 185 #endif 186 187 static int usched_bsd4_rrinterval = (ESTCPUFREQ + 9) / 10; 188 SYSCTL_INT(_kern, OID_AUTO, usched_bsd4_rrinterval, CTLFLAG_RW, 189 &usched_bsd4_rrinterval, 0, ""); 190 static int usched_bsd4_decay = ESTCPUINCR / 2; 191 SYSCTL_INT(_kern, OID_AUTO, usched_bsd4_decay, CTLFLAG_RW, 192 &usched_bsd4_decay, 0, ""); 193 194 /* 195 * Initialize the run queues at boot time. 196 */ 197 static void 198 rqinit(void *dummy) 199 { 200 int i; 201 202 spin_init(&bsd4_spin); 203 for (i = 0; i < NQS; i++) { 204 TAILQ_INIT(&bsd4_queues[i]); 205 TAILQ_INIT(&bsd4_rtqueues[i]); 206 TAILQ_INIT(&bsd4_idqueues[i]); 207 } 208 atomic_clear_cpumask(&bsd4_curprocmask, 1); 209 } 210 SYSINIT(runqueue, SI_BOOT2_USCHED, SI_ORDER_FIRST, rqinit, NULL) 211 212 /* 213 * BSD4_ACQUIRE_CURPROC 214 * 215 * This function is called when the kernel intends to return to userland. 216 * It is responsible for making the thread the current designated userland 217 * thread for this cpu, blocking if necessary. 218 * 219 * The kernel has already depressed our LWKT priority so we must not switch 220 * until we have either assigned or disposed of the thread. 221 * 222 * WARNING! THIS FUNCTION IS ALLOWED TO CAUSE THE CURRENT THREAD TO MIGRATE 223 * TO ANOTHER CPU! Because most of the kernel assumes that no migration will 224 * occur, this function is called only under very controlled circumstances. 225 * 226 * MPSAFE 227 */ 228 static void 229 bsd4_acquire_curproc(struct lwp *lp) 230 { 231 globaldata_t gd; 232 bsd4_pcpu_t dd; 233 struct lwp *olp; 234 235 crit_enter(); 236 bsd4_recalculate_estcpu(lp); 237 238 /* 239 * If a reschedule was requested give another thread the 240 * driver's seat. 241 */ 242 if (user_resched_wanted()) { 243 clear_user_resched(); 244 bsd4_release_curproc(lp); 245 } 246 247 /* 248 * Loop until we are the current user thread 249 */ 250 do { 251 /* 252 * Reload after a switch or setrunqueue/switch possibly 253 * moved us to another cpu. 254 */ 255 /*clear_lwkt_resched();*/ 256 gd = mycpu; 257 dd = &bsd4_pcpu[gd->gd_cpuid]; 258 259 /* 260 * Become the currently scheduled user thread for this cpu 261 * if we can do so trivially. 262 * 263 * We can steal another thread's current thread designation 264 * on this cpu since if we are running that other thread 265 * must not be, so we can safely deschedule it. 266 */ 267 if (dd->uschedcp == lp) { 268 /* 269 * We are already the current lwp (hot path). 270 */ 271 dd->upri = lp->lwp_priority; 272 } else if (dd->uschedcp == NULL) { 273 /* 274 * We can trivially become the current lwp. 275 */ 276 atomic_set_cpumask(&bsd4_curprocmask, gd->gd_cpumask); 277 dd->uschedcp = lp; 278 dd->upri = lp->lwp_priority; 279 } else if (dd->upri > lp->lwp_priority) { 280 /* 281 * We can steal the current lwp designation from the 282 * olp that was previously assigned to this cpu. 283 */ 284 olp = dd->uschedcp; 285 dd->uschedcp = lp; 286 dd->upri = lp->lwp_priority; 287 lwkt_deschedule(olp->lwp_thread); 288 bsd4_setrunqueue(olp); 289 } else { 290 /* 291 * We cannot become the current lwp, place the lp 292 * on the bsd4 run-queue and deschedule ourselves. 293 */ 294 lwkt_deschedule(lp->lwp_thread); 295 bsd4_setrunqueue(lp); 296 lwkt_switch(); 297 } 298 299 /* 300 * Other threads at our current user priority have already 301 * put in their bids, but we must run any kernel threads 302 * at higher priorities, and we could lose our bid to 303 * another thread trying to return to user mode in the 304 * process. 305 * 306 * If we lose our bid we will be descheduled and put on 307 * the run queue. When we are reactivated we will have 308 * another chance. 309 */ 310 if (lwkt_resched_wanted() || 311 lp->lwp_thread->td_fairq_accum < 0) { 312 lwkt_switch(); 313 } 314 } while (dd->uschedcp != lp); 315 316 crit_exit(); 317 KKASSERT((lp->lwp_flag & LWP_ONRUNQ) == 0); 318 } 319 320 /* 321 * BSD4_RELEASE_CURPROC 322 * 323 * This routine detaches the current thread from the userland scheduler, 324 * usually because the thread needs to run or block in the kernel (at 325 * kernel priority) for a while. 326 * 327 * This routine is also responsible for selecting a new thread to 328 * make the current thread. 329 * 330 * NOTE: This implementation differs from the dummy example in that 331 * bsd4_select_curproc() is able to select the current process, whereas 332 * dummy_select_curproc() is not able to select the current process. 333 * This means we have to NULL out uschedcp. 334 * 335 * Additionally, note that we may already be on a run queue if releasing 336 * via the lwkt_switch() in bsd4_setrunqueue(). 337 * 338 * MPSAFE 339 */ 340 static void 341 bsd4_release_curproc(struct lwp *lp) 342 { 343 globaldata_t gd = mycpu; 344 bsd4_pcpu_t dd = &bsd4_pcpu[gd->gd_cpuid]; 345 346 if (dd->uschedcp == lp) { 347 crit_enter(); 348 KKASSERT((lp->lwp_flag & LWP_ONRUNQ) == 0); 349 dd->uschedcp = NULL; /* don't let lp be selected */ 350 dd->upri = PRIBASE_NULL; 351 atomic_clear_cpumask(&bsd4_curprocmask, gd->gd_cpumask); 352 bsd4_select_curproc(gd); 353 crit_exit(); 354 } 355 } 356 357 /* 358 * BSD4_SELECT_CURPROC 359 * 360 * Select a new current process for this cpu and clear any pending user 361 * reschedule request. The cpu currently has no current process. 362 * 363 * This routine is also responsible for equal-priority round-robining, 364 * typically triggered from bsd4_schedulerclock(). In our dummy example 365 * all the 'user' threads are LWKT scheduled all at once and we just 366 * call lwkt_switch(). 367 * 368 * The calling process is not on the queue and cannot be selected. 369 * 370 * MPSAFE 371 */ 372 static 373 void 374 bsd4_select_curproc(globaldata_t gd) 375 { 376 bsd4_pcpu_t dd = &bsd4_pcpu[gd->gd_cpuid]; 377 struct lwp *nlp; 378 int cpuid = gd->gd_cpuid; 379 380 crit_enter_gd(gd); 381 382 spin_lock(&bsd4_spin); 383 if ((nlp = chooseproc_locked(dd->uschedcp)) != NULL) { 384 atomic_set_cpumask(&bsd4_curprocmask, CPUMASK(cpuid)); 385 dd->upri = nlp->lwp_priority; 386 dd->uschedcp = nlp; 387 spin_unlock(&bsd4_spin); 388 #ifdef SMP 389 lwkt_acquire(nlp->lwp_thread); 390 #endif 391 lwkt_schedule(nlp->lwp_thread); 392 } else { 393 spin_unlock(&bsd4_spin); 394 } 395 #if 0 396 } else if (bsd4_runqcount && (bsd4_rdyprocmask & CPUMASK(cpuid))) { 397 atomic_clear_cpumask(&bsd4_rdyprocmask, CPUMASK(cpuid)); 398 spin_unlock(&bsd4_spin); 399 lwkt_schedule(&dd->helper_thread); 400 } else { 401 spin_unlock(&bsd4_spin); 402 } 403 #endif 404 crit_exit_gd(gd); 405 } 406 407 /* 408 * BSD4_SETRUNQUEUE 409 * 410 * Place the specified lwp on the user scheduler's run queue. This routine 411 * must be called with the thread descheduled. The lwp must be runnable. 412 * 413 * The thread may be the current thread as a special case. 414 * 415 * MPSAFE 416 */ 417 static void 418 bsd4_setrunqueue(struct lwp *lp) 419 { 420 globaldata_t gd; 421 bsd4_pcpu_t dd; 422 #ifdef SMP 423 int cpuid; 424 cpumask_t mask; 425 cpumask_t tmpmask; 426 #endif 427 428 /* 429 * First validate the process state relative to the current cpu. 430 * We don't need the spinlock for this, just a critical section. 431 * We are in control of the process. 432 */ 433 crit_enter(); 434 KASSERT(lp->lwp_stat == LSRUN, ("setrunqueue: lwp not LSRUN")); 435 KASSERT((lp->lwp_flag & LWP_ONRUNQ) == 0, 436 ("lwp %d/%d already on runq! flag %08x/%08x", lp->lwp_proc->p_pid, 437 lp->lwp_tid, lp->lwp_proc->p_flag, lp->lwp_flag)); 438 KKASSERT((lp->lwp_thread->td_flags & TDF_RUNQ) == 0); 439 440 /* 441 * Note: gd and dd are relative to the target thread's last cpu, 442 * NOT our current cpu. 443 */ 444 gd = lp->lwp_thread->td_gd; 445 dd = &bsd4_pcpu[gd->gd_cpuid]; 446 447 /* 448 * This process is not supposed to be scheduled anywhere or assigned 449 * as the current process anywhere. Assert the condition. 450 */ 451 KKASSERT(dd->uschedcp != lp); 452 453 #ifndef SMP 454 /* 455 * If we are not SMP we do not have a scheduler helper to kick 456 * and must directly activate the process if none are scheduled. 457 * 458 * This is really only an issue when bootstrapping init since 459 * the caller in all other cases will be a user process, and 460 * even if released (dd->uschedcp == NULL), that process will 461 * kickstart the scheduler when it returns to user mode from 462 * the kernel. 463 */ 464 if (dd->uschedcp == NULL) { 465 atomic_set_cpumask(&bsd4_curprocmask, gd->gd_cpumask); 466 dd->uschedcp = lp; 467 dd->upri = lp->lwp_priority; 468 lwkt_schedule(lp->lwp_thread); 469 crit_exit(); 470 return; 471 } 472 #endif 473 474 #ifdef SMP 475 /* 476 * XXX fixme. Could be part of a remrunqueue/setrunqueue 477 * operation when the priority is recalculated, so TDF_MIGRATING 478 * may already be set. 479 */ 480 if ((lp->lwp_thread->td_flags & TDF_MIGRATING) == 0) 481 lwkt_giveaway(lp->lwp_thread); 482 #endif 483 484 /* 485 * We lose control of lp the moment we release the spinlock after 486 * having placed lp on the queue. i.e. another cpu could pick it 487 * up and it could exit, or its priority could be further adjusted, 488 * or something like that. 489 */ 490 spin_lock(&bsd4_spin); 491 bsd4_setrunqueue_locked(lp); 492 493 #ifdef SMP 494 /* 495 * Kick the scheduler helper on one of the other cpu's 496 * and request a reschedule if appropriate. 497 * 498 * NOTE: We check all cpus whos rdyprocmask is set. First we 499 * look for cpus without designated lps, then we look for 500 * cpus with designated lps with a worse priority than our 501 * process. 502 */ 503 ++bsd4_scancpu; 504 cpuid = (bsd4_scancpu & 0xFFFF) % ncpus; 505 mask = ~bsd4_curprocmask & bsd4_rdyprocmask & lp->lwp_cpumask & 506 smp_active_mask; 507 508 while (mask) { 509 tmpmask = ~(CPUMASK(cpuid) - 1); 510 if (mask & tmpmask) 511 cpuid = BSFCPUMASK(mask & tmpmask); 512 else 513 cpuid = BSFCPUMASK(mask); 514 gd = globaldata_find(cpuid); 515 dd = &bsd4_pcpu[cpuid]; 516 517 if ((dd->upri & ~PPQMASK) >= (lp->lwp_priority & ~PPQMASK)) 518 goto found; 519 mask &= ~CPUMASK(cpuid); 520 } 521 522 /* 523 * Then cpus which might have a currently running lp 524 */ 525 mask = bsd4_curprocmask & bsd4_rdyprocmask & 526 lp->lwp_cpumask & smp_active_mask; 527 528 while (mask) { 529 tmpmask = ~(CPUMASK(cpuid) - 1); 530 if (mask & tmpmask) 531 cpuid = BSFCPUMASK(mask & tmpmask); 532 else 533 cpuid = BSFCPUMASK(mask); 534 gd = globaldata_find(cpuid); 535 dd = &bsd4_pcpu[cpuid]; 536 537 if ((dd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) 538 goto found; 539 mask &= ~CPUMASK(cpuid); 540 } 541 542 /* 543 * If we cannot find a suitable cpu we reload from bsd4_scancpu 544 * and round-robin. Other cpus will pickup as they release their 545 * current lwps or become ready. 546 * 547 * We only kick the target helper thread in this case, we do not 548 * set the user resched flag because 549 */ 550 cpuid = (bsd4_scancpu & 0xFFFF) % ncpus; 551 gd = globaldata_find(cpuid); 552 dd = &bsd4_pcpu[cpuid]; 553 found: 554 if (gd == mycpu) { 555 spin_unlock(&bsd4_spin); 556 if ((dd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) { 557 if (dd->uschedcp == NULL) { 558 lwkt_schedule(&dd->helper_thread); 559 } else { 560 need_user_resched(); 561 } 562 } 563 } else { 564 atomic_clear_cpumask(&bsd4_rdyprocmask, CPUMASK(cpuid)); 565 spin_unlock(&bsd4_spin); 566 if ((dd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) 567 lwkt_send_ipiq(gd, need_user_resched_remote, NULL); 568 else 569 lwkt_schedule(&dd->helper_thread); 570 } 571 #else 572 /* 573 * Request a reschedule if appropriate. 574 */ 575 spin_unlock(&bsd4_spin); 576 if ((dd->upri & ~PPQMASK) > (lp->lwp_priority & ~PPQMASK)) { 577 need_user_resched(); 578 } 579 #endif 580 crit_exit(); 581 } 582 583 /* 584 * This routine is called from a systimer IPI. It MUST be MP-safe and 585 * the BGL IS NOT HELD ON ENTRY. This routine is called at ESTCPUFREQ on 586 * each cpu. 587 * 588 * MPSAFE 589 */ 590 static 591 void 592 bsd4_schedulerclock(struct lwp *lp, sysclock_t period, sysclock_t cpstamp) 593 { 594 globaldata_t gd = mycpu; 595 bsd4_pcpu_t dd = &bsd4_pcpu[gd->gd_cpuid]; 596 597 /* 598 * Do we need to round-robin? We round-robin 10 times a second. 599 * This should only occur for cpu-bound batch processes. 600 */ 601 if (++dd->rrcount >= usched_bsd4_rrinterval) { 602 dd->rrcount = 0; 603 need_user_resched(); 604 } 605 606 /* 607 * As the process accumulates cpu time p_estcpu is bumped and may 608 * push the process into another scheduling queue. It typically 609 * takes 4 ticks to bump the queue. 610 */ 611 lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu + ESTCPUINCR); 612 613 /* 614 * Reducing p_origcpu over time causes more of our estcpu to be 615 * returned to the parent when we exit. This is a small tweak 616 * for the batch detection heuristic. 617 */ 618 if (lp->lwp_origcpu) 619 --lp->lwp_origcpu; 620 621 /* 622 * Spinlocks also hold a critical section so there should not be 623 * any active. 624 */ 625 KKASSERT(gd->gd_spinlocks_wr == 0); 626 627 bsd4_resetpriority(lp); 628 #if 0 629 /* 630 * if we can't call bsd4_resetpriority for some reason we must call 631 * need user_resched(). 632 */ 633 need_user_resched(); 634 #endif 635 } 636 637 /* 638 * Called from acquire and from kern_synch's one-second timer (one of the 639 * callout helper threads) with a critical section held. 640 * 641 * Decay p_estcpu based on the number of ticks we haven't been running 642 * and our p_nice. As the load increases each process observes a larger 643 * number of idle ticks (because other processes are running in them). 644 * This observation leads to a larger correction which tends to make the 645 * system more 'batchy'. 646 * 647 * Note that no recalculation occurs for a process which sleeps and wakes 648 * up in the same tick. That is, a system doing thousands of context 649 * switches per second will still only do serious estcpu calculations 650 * ESTCPUFREQ times per second. 651 * 652 * MPSAFE 653 */ 654 static 655 void 656 bsd4_recalculate_estcpu(struct lwp *lp) 657 { 658 globaldata_t gd = mycpu; 659 sysclock_t cpbase; 660 int loadfac; 661 int ndecay; 662 int nticks; 663 int nleft; 664 665 /* 666 * We have to subtract periodic to get the last schedclock 667 * timeout time, otherwise we would get the upcoming timeout. 668 * Keep in mind that a process can migrate between cpus and 669 * while the scheduler clock should be very close, boundary 670 * conditions could lead to a small negative delta. 671 */ 672 cpbase = gd->gd_schedclock.time - gd->gd_schedclock.periodic; 673 674 if (lp->lwp_slptime > 1) { 675 /* 676 * Too much time has passed, do a coarse correction. 677 */ 678 lp->lwp_estcpu = lp->lwp_estcpu >> 1; 679 bsd4_resetpriority(lp); 680 lp->lwp_cpbase = cpbase; 681 lp->lwp_cpticks = 0; 682 } else if (lp->lwp_cpbase != cpbase) { 683 /* 684 * Adjust estcpu if we are in a different tick. Don't waste 685 * time if we are in the same tick. 686 * 687 * First calculate the number of ticks in the measurement 688 * interval. The nticks calculation can wind up 0 due to 689 * a bug in the handling of lwp_slptime (as yet not found), 690 * so make sure we do not get a divide by 0 panic. 691 */ 692 nticks = (cpbase - lp->lwp_cpbase) / gd->gd_schedclock.periodic; 693 if (nticks <= 0) 694 nticks = 1; 695 updatepcpu(lp, lp->lwp_cpticks, nticks); 696 697 if ((nleft = nticks - lp->lwp_cpticks) < 0) 698 nleft = 0; 699 if (usched_debug == lp->lwp_proc->p_pid) { 700 kprintf("pid %d tid %d estcpu %d cpticks %d nticks %d nleft %d", 701 lp->lwp_proc->p_pid, lp->lwp_tid, lp->lwp_estcpu, 702 lp->lwp_cpticks, nticks, nleft); 703 } 704 705 /* 706 * Calculate a decay value based on ticks remaining scaled 707 * down by the instantanious load and p_nice. 708 */ 709 if ((loadfac = bsd4_runqcount) < 2) 710 loadfac = 2; 711 ndecay = nleft * usched_bsd4_decay * 2 * 712 (PRIO_MAX * 2 - lp->lwp_proc->p_nice) / (loadfac * PRIO_MAX * 2); 713 714 /* 715 * Adjust p_estcpu. Handle a border case where batch jobs 716 * can get stalled long enough to decay to zero when they 717 * shouldn't. 718 */ 719 if (lp->lwp_estcpu > ndecay * 2) 720 lp->lwp_estcpu -= ndecay; 721 else 722 lp->lwp_estcpu >>= 1; 723 724 if (usched_debug == lp->lwp_proc->p_pid) 725 kprintf(" ndecay %d estcpu %d\n", ndecay, lp->lwp_estcpu); 726 bsd4_resetpriority(lp); 727 lp->lwp_cpbase = cpbase; 728 lp->lwp_cpticks = 0; 729 } 730 } 731 732 /* 733 * Compute the priority of a process when running in user mode. 734 * Arrange to reschedule if the resulting priority is better 735 * than that of the current process. 736 * 737 * This routine may be called with any process. 738 * 739 * This routine is called by fork1() for initial setup with the process 740 * of the run queue, and also may be called normally with the process on or 741 * off the run queue. 742 * 743 * MPSAFE 744 */ 745 static void 746 bsd4_resetpriority(struct lwp *lp) 747 { 748 bsd4_pcpu_t dd; 749 int newpriority; 750 u_short newrqtype; 751 int reschedcpu; 752 753 /* 754 * Calculate the new priority and queue type 755 */ 756 crit_enter(); 757 spin_lock(&bsd4_spin); 758 759 newrqtype = lp->lwp_rtprio.type; 760 761 switch(newrqtype) { 762 case RTP_PRIO_REALTIME: 763 case RTP_PRIO_FIFO: 764 newpriority = PRIBASE_REALTIME + 765 (lp->lwp_rtprio.prio & PRIMASK); 766 break; 767 case RTP_PRIO_NORMAL: 768 newpriority = (lp->lwp_proc->p_nice - PRIO_MIN) * PPQ / NICEPPQ; 769 newpriority += lp->lwp_estcpu * PPQ / ESTCPUPPQ; 770 newpriority = newpriority * MAXPRI / (PRIO_RANGE * PPQ / 771 NICEPPQ + ESTCPUMAX * PPQ / ESTCPUPPQ); 772 newpriority = PRIBASE_NORMAL + (newpriority & PRIMASK); 773 break; 774 case RTP_PRIO_IDLE: 775 newpriority = PRIBASE_IDLE + (lp->lwp_rtprio.prio & PRIMASK); 776 break; 777 case RTP_PRIO_THREAD: 778 newpriority = PRIBASE_THREAD + (lp->lwp_rtprio.prio & PRIMASK); 779 break; 780 default: 781 panic("Bad RTP_PRIO %d", newrqtype); 782 /* NOT REACHED */ 783 } 784 785 /* 786 * The newpriority incorporates the queue type so do a simple masked 787 * check to determine if the process has moved to another queue. If 788 * it has, and it is currently on a run queue, then move it. 789 */ 790 if ((lp->lwp_priority ^ newpriority) & ~PPQMASK) { 791 lp->lwp_priority = newpriority; 792 if (lp->lwp_flag & LWP_ONRUNQ) { 793 bsd4_remrunqueue_locked(lp); 794 lp->lwp_rqtype = newrqtype; 795 lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ; 796 bsd4_setrunqueue_locked(lp); 797 reschedcpu = lp->lwp_thread->td_gd->gd_cpuid; 798 } else { 799 lp->lwp_rqtype = newrqtype; 800 lp->lwp_rqindex = (newpriority & PRIMASK) / PPQ; 801 reschedcpu = -1; 802 } 803 } else { 804 lp->lwp_priority = newpriority; 805 reschedcpu = -1; 806 } 807 808 /* 809 * Determine if we need to reschedule the target cpu. This only 810 * occurs if the LWP is already on a scheduler queue, which means 811 * that idle cpu notification has already occured. At most we 812 * need only issue a need_user_resched() on the appropriate cpu. 813 * 814 * The LWP may be owned by a CPU different from the current one, 815 * in which case dd->uschedcp may be modified without an MP lock 816 * or a spinlock held. The worst that happens is that the code 817 * below causes a spurious need_user_resched() on the target CPU 818 * and dd->pri to be wrong for a short period of time, both of 819 * which are harmless. 820 */ 821 if (reschedcpu >= 0) { 822 dd = &bsd4_pcpu[reschedcpu]; 823 if ((bsd4_rdyprocmask & CPUMASK(reschedcpu)) && 824 (dd->upri & ~PRIMASK) > (lp->lwp_priority & ~PRIMASK)) { 825 #ifdef SMP 826 if (reschedcpu == mycpu->gd_cpuid) { 827 spin_unlock(&bsd4_spin); 828 need_user_resched(); 829 } else { 830 spin_unlock(&bsd4_spin); 831 atomic_clear_cpumask(&bsd4_rdyprocmask, 832 CPUMASK(reschedcpu)); 833 lwkt_send_ipiq(lp->lwp_thread->td_gd, 834 need_user_resched_remote, NULL); 835 } 836 #else 837 spin_unlock(&bsd4_spin); 838 need_user_resched(); 839 #endif 840 } else { 841 spin_unlock(&bsd4_spin); 842 } 843 } else { 844 spin_unlock(&bsd4_spin); 845 } 846 crit_exit(); 847 } 848 849 /* 850 * MPSAFE 851 */ 852 static 853 void 854 bsd4_yield(struct lwp *lp) 855 { 856 #if 0 857 /* FUTURE (or something similar) */ 858 switch(lp->lwp_rqtype) { 859 case RTP_PRIO_NORMAL: 860 lp->lwp_estcpu = ESTCPULIM(lp->lwp_estcpu + ESTCPUINCR); 861 break; 862 default: 863 break; 864 } 865 #endif 866 need_user_resched(); 867 } 868 869 /* 870 * Called from fork1() when a new child process is being created. 871 * 872 * Give the child process an initial estcpu that is more batch then 873 * its parent and dock the parent for the fork (but do not 874 * reschedule the parent). This comprises the main part of our batch 875 * detection heuristic for both parallel forking and sequential execs. 876 * 877 * Interactive processes will decay the boosted estcpu quickly while batch 878 * processes will tend to compound it. 879 * XXX lwp should be "spawning" instead of "forking" 880 * 881 * MPSAFE 882 */ 883 static void 884 bsd4_forking(struct lwp *plp, struct lwp *lp) 885 { 886 lp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ); 887 lp->lwp_origcpu = lp->lwp_estcpu; 888 plp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + ESTCPUPPQ); 889 } 890 891 /* 892 * Called when the parent reaps a child. Propogate cpu use by the child 893 * back to the parent. 894 * 895 * MPSAFE 896 */ 897 static void 898 bsd4_exiting(struct lwp *plp, struct lwp *lp) 899 { 900 int delta; 901 902 if (plp->lwp_proc->p_pid != 1) { 903 delta = lp->lwp_estcpu - lp->lwp_origcpu; 904 if (delta > 0) 905 plp->lwp_estcpu = ESTCPULIM(plp->lwp_estcpu + delta); 906 } 907 } 908 909 910 /* 911 * chooseproc() is called when a cpu needs a user process to LWKT schedule, 912 * it selects a user process and returns it. If chklp is non-NULL and chklp 913 * has a better or equal priority then the process that would otherwise be 914 * chosen, NULL is returned. 915 * 916 * Until we fix the RUNQ code the chklp test has to be strict or we may 917 * bounce between processes trying to acquire the current process designation. 918 * 919 * MPSAFE - must be called with bsd4_spin exclusive held. The spinlock is 920 * left intact through the entire routine. 921 */ 922 static 923 struct lwp * 924 chooseproc_locked(struct lwp *chklp) 925 { 926 struct lwp *lp; 927 struct rq *q; 928 u_int32_t *which, *which2; 929 u_int32_t pri; 930 u_int32_t rtqbits; 931 u_int32_t tsqbits; 932 u_int32_t idqbits; 933 cpumask_t cpumask; 934 935 rtqbits = bsd4_rtqueuebits; 936 tsqbits = bsd4_queuebits; 937 idqbits = bsd4_idqueuebits; 938 cpumask = mycpu->gd_cpumask; 939 940 #ifdef SMP 941 again: 942 #endif 943 if (rtqbits) { 944 pri = bsfl(rtqbits); 945 q = &bsd4_rtqueues[pri]; 946 which = &bsd4_rtqueuebits; 947 which2 = &rtqbits; 948 } else if (tsqbits) { 949 pri = bsfl(tsqbits); 950 q = &bsd4_queues[pri]; 951 which = &bsd4_queuebits; 952 which2 = &tsqbits; 953 } else if (idqbits) { 954 pri = bsfl(idqbits); 955 q = &bsd4_idqueues[pri]; 956 which = &bsd4_idqueuebits; 957 which2 = &idqbits; 958 } else { 959 return NULL; 960 } 961 lp = TAILQ_FIRST(q); 962 KASSERT(lp, ("chooseproc: no lwp on busy queue")); 963 964 #ifdef SMP 965 while ((lp->lwp_cpumask & cpumask) == 0) { 966 lp = TAILQ_NEXT(lp, lwp_procq); 967 if (lp == NULL) { 968 *which2 &= ~(1 << pri); 969 goto again; 970 } 971 } 972 #endif 973 974 /* 975 * If the passed lwp <chklp> is reasonably close to the selected 976 * lwp <lp>, return NULL (indicating that <chklp> should be kept). 977 * 978 * Note that we must error on the side of <chklp> to avoid bouncing 979 * between threads in the acquire code. 980 */ 981 if (chklp) { 982 if (chklp->lwp_priority < lp->lwp_priority + PPQ) 983 return(NULL); 984 } 985 986 #ifdef SMP 987 /* 988 * If the chosen lwp does not reside on this cpu spend a few 989 * cycles looking for a better candidate at the same priority level. 990 * This is a fallback check, setrunqueue() tries to wakeup the 991 * correct cpu and is our front-line affinity. 992 */ 993 if (lp->lwp_thread->td_gd != mycpu && 994 (chklp = TAILQ_NEXT(lp, lwp_procq)) != NULL 995 ) { 996 if (chklp->lwp_thread->td_gd == mycpu) { 997 ++choose_affinity; 998 lp = chklp; 999 } 1000 } 1001 #endif 1002 1003 TAILQ_REMOVE(q, lp, lwp_procq); 1004 --bsd4_runqcount; 1005 if (TAILQ_EMPTY(q)) 1006 *which &= ~(1 << pri); 1007 KASSERT((lp->lwp_flag & LWP_ONRUNQ) != 0, ("not on runq6!")); 1008 lp->lwp_flag &= ~LWP_ONRUNQ; 1009 return lp; 1010 } 1011 1012 #ifdef SMP 1013 1014 static 1015 void 1016 need_user_resched_remote(void *dummy) 1017 { 1018 globaldata_t gd = mycpu; 1019 bsd4_pcpu_t dd = &bsd4_pcpu[gd->gd_cpuid]; 1020 1021 need_user_resched(); 1022 lwkt_schedule(&dd->helper_thread); 1023 } 1024 1025 #endif 1026 1027 /* 1028 * bsd4_remrunqueue_locked() removes a given process from the run queue 1029 * that it is on, clearing the queue busy bit if it becomes empty. 1030 * 1031 * Note that user process scheduler is different from the LWKT schedule. 1032 * The user process scheduler only manages user processes but it uses LWKT 1033 * underneath, and a user process operating in the kernel will often be 1034 * 'released' from our management. 1035 * 1036 * MPSAFE - bsd4_spin must be held exclusively on call 1037 */ 1038 static void 1039 bsd4_remrunqueue_locked(struct lwp *lp) 1040 { 1041 struct rq *q; 1042 u_int32_t *which; 1043 u_int8_t pri; 1044 1045 KKASSERT(lp->lwp_flag & LWP_ONRUNQ); 1046 lp->lwp_flag &= ~LWP_ONRUNQ; 1047 --bsd4_runqcount; 1048 KKASSERT(bsd4_runqcount >= 0); 1049 1050 pri = lp->lwp_rqindex; 1051 switch(lp->lwp_rqtype) { 1052 case RTP_PRIO_NORMAL: 1053 q = &bsd4_queues[pri]; 1054 which = &bsd4_queuebits; 1055 break; 1056 case RTP_PRIO_REALTIME: 1057 case RTP_PRIO_FIFO: 1058 q = &bsd4_rtqueues[pri]; 1059 which = &bsd4_rtqueuebits; 1060 break; 1061 case RTP_PRIO_IDLE: 1062 q = &bsd4_idqueues[pri]; 1063 which = &bsd4_idqueuebits; 1064 break; 1065 default: 1066 panic("remrunqueue: invalid rtprio type"); 1067 /* NOT REACHED */ 1068 } 1069 TAILQ_REMOVE(q, lp, lwp_procq); 1070 if (TAILQ_EMPTY(q)) { 1071 KASSERT((*which & (1 << pri)) != 0, 1072 ("remrunqueue: remove from empty queue")); 1073 *which &= ~(1 << pri); 1074 } 1075 } 1076 1077 /* 1078 * bsd4_setrunqueue_locked() 1079 * 1080 * Add a process whos rqtype and rqindex had previously been calculated 1081 * onto the appropriate run queue. Determine if the addition requires 1082 * a reschedule on a cpu and return the cpuid or -1. 1083 * 1084 * NOTE: Lower priorities are better priorities. 1085 * 1086 * MPSAFE - bsd4_spin must be held exclusively on call 1087 */ 1088 static void 1089 bsd4_setrunqueue_locked(struct lwp *lp) 1090 { 1091 struct rq *q; 1092 u_int32_t *which; 1093 int pri; 1094 1095 KKASSERT((lp->lwp_flag & LWP_ONRUNQ) == 0); 1096 lp->lwp_flag |= LWP_ONRUNQ; 1097 ++bsd4_runqcount; 1098 1099 pri = lp->lwp_rqindex; 1100 1101 switch(lp->lwp_rqtype) { 1102 case RTP_PRIO_NORMAL: 1103 q = &bsd4_queues[pri]; 1104 which = &bsd4_queuebits; 1105 break; 1106 case RTP_PRIO_REALTIME: 1107 case RTP_PRIO_FIFO: 1108 q = &bsd4_rtqueues[pri]; 1109 which = &bsd4_rtqueuebits; 1110 break; 1111 case RTP_PRIO_IDLE: 1112 q = &bsd4_idqueues[pri]; 1113 which = &bsd4_idqueuebits; 1114 break; 1115 default: 1116 panic("remrunqueue: invalid rtprio type"); 1117 /* NOT REACHED */ 1118 } 1119 1120 /* 1121 * Add to the correct queue and set the appropriate bit. If no 1122 * lower priority (i.e. better) processes are in the queue then 1123 * we want a reschedule, calculate the best cpu for the job. 1124 * 1125 * Always run reschedules on the LWPs original cpu. 1126 */ 1127 TAILQ_INSERT_TAIL(q, lp, lwp_procq); 1128 *which |= 1 << pri; 1129 } 1130 1131 #ifdef SMP 1132 1133 /* 1134 * For SMP systems a user scheduler helper thread is created for each 1135 * cpu and is used to allow one cpu to wakeup another for the purposes of 1136 * scheduling userland threads from setrunqueue(). 1137 * 1138 * UP systems do not need the helper since there is only one cpu. 1139 * 1140 * We can't use the idle thread for this because we might block. 1141 * Additionally, doing things this way allows us to HLT idle cpus 1142 * on MP systems. 1143 * 1144 * MPSAFE 1145 */ 1146 static void 1147 sched_thread(void *dummy) 1148 { 1149 globaldata_t gd; 1150 bsd4_pcpu_t dd; 1151 struct lwp *nlp; 1152 cpumask_t mask; 1153 int cpuid; 1154 #ifdef SMP 1155 cpumask_t tmpmask; 1156 int tmpid; 1157 #endif 1158 1159 gd = mycpu; 1160 cpuid = gd->gd_cpuid; /* doesn't change */ 1161 mask = gd->gd_cpumask; /* doesn't change */ 1162 dd = &bsd4_pcpu[cpuid]; 1163 1164 /* 1165 * Since we are woken up only when no user processes are scheduled 1166 * on a cpu, we can run at an ultra low priority. 1167 */ 1168 lwkt_setpri_self(TDPRI_USER_SCHEDULER); 1169 1170 for (;;) { 1171 /* 1172 * We use the LWKT deschedule-interlock trick to avoid racing 1173 * bsd4_rdyprocmask. This means we cannot block through to the 1174 * manual lwkt_switch() call we make below. 1175 */ 1176 crit_enter_gd(gd); 1177 lwkt_deschedule_self(gd->gd_curthread); 1178 spin_lock(&bsd4_spin); 1179 atomic_set_cpumask(&bsd4_rdyprocmask, mask); 1180 1181 clear_user_resched(); /* This satisfied the reschedule request */ 1182 dd->rrcount = 0; /* Reset the round-robin counter */ 1183 1184 if ((bsd4_curprocmask & mask) == 0) { 1185 /* 1186 * No thread is currently scheduled. 1187 */ 1188 KKASSERT(dd->uschedcp == NULL); 1189 if ((nlp = chooseproc_locked(NULL)) != NULL) { 1190 atomic_set_cpumask(&bsd4_curprocmask, mask); 1191 dd->upri = nlp->lwp_priority; 1192 dd->uschedcp = nlp; 1193 spin_unlock(&bsd4_spin); 1194 lwkt_acquire(nlp->lwp_thread); 1195 lwkt_schedule(nlp->lwp_thread); 1196 } else { 1197 spin_unlock(&bsd4_spin); 1198 } 1199 } else if (bsd4_runqcount) { 1200 if ((nlp = chooseproc_locked(dd->uschedcp)) != NULL) { 1201 dd->upri = nlp->lwp_priority; 1202 dd->uschedcp = nlp; 1203 spin_unlock(&bsd4_spin); 1204 lwkt_acquire(nlp->lwp_thread); 1205 lwkt_schedule(nlp->lwp_thread); 1206 } else { 1207 /* 1208 * CHAINING CONDITION TRAIN 1209 * 1210 * We could not deal with the scheduler wakeup 1211 * request on this cpu, locate a ready scheduler 1212 * with no current lp assignment and chain to it. 1213 * 1214 * This ensures that a wakeup race which fails due 1215 * to priority test does not leave other unscheduled 1216 * cpus idle when the runqueue is not empty. 1217 */ 1218 tmpmask = ~bsd4_curprocmask & bsd4_rdyprocmask & 1219 smp_active_mask; 1220 if (tmpmask) { 1221 tmpid = BSFCPUMASK(tmpmask); 1222 gd = globaldata_find(cpuid); 1223 dd = &bsd4_pcpu[cpuid]; 1224 atomic_clear_cpumask(&bsd4_rdyprocmask, 1225 CPUMASK(tmpid)); 1226 spin_unlock(&bsd4_spin); 1227 lwkt_schedule(&dd->helper_thread); 1228 } else { 1229 spin_unlock(&bsd4_spin); 1230 } 1231 } 1232 } else { 1233 /* 1234 * The runq is empty. 1235 */ 1236 spin_unlock(&bsd4_spin); 1237 } 1238 crit_exit_gd(gd); 1239 lwkt_switch(); 1240 } 1241 } 1242 1243 /* 1244 * Setup our scheduler helpers. Note that curprocmask bit 0 has already 1245 * been cleared by rqinit() and we should not mess with it further. 1246 */ 1247 static void 1248 sched_thread_cpu_init(void) 1249 { 1250 int i; 1251 1252 if (bootverbose) 1253 kprintf("start scheduler helpers on cpus:"); 1254 1255 for (i = 0; i < ncpus; ++i) { 1256 bsd4_pcpu_t dd = &bsd4_pcpu[i]; 1257 cpumask_t mask = CPUMASK(i); 1258 1259 if ((mask & smp_active_mask) == 0) 1260 continue; 1261 1262 if (bootverbose) 1263 kprintf(" %d", i); 1264 1265 lwkt_create(sched_thread, NULL, NULL, &dd->helper_thread, 1266 TDF_STOPREQ, i, "usched %d", i); 1267 1268 /* 1269 * Allow user scheduling on the target cpu. cpu #0 has already 1270 * been enabled in rqinit(). 1271 */ 1272 if (i) 1273 atomic_clear_cpumask(&bsd4_curprocmask, mask); 1274 atomic_set_cpumask(&bsd4_rdyprocmask, mask); 1275 dd->upri = PRIBASE_NULL; 1276 } 1277 if (bootverbose) 1278 kprintf("\n"); 1279 } 1280 SYSINIT(uschedtd, SI_BOOT2_USCHED, SI_ORDER_SECOND, 1281 sched_thread_cpu_init, NULL) 1282 1283 #endif 1284 1285