Lines Matching +full:ideal +full:- +full:factor +full:- +full:value

1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
73 * the range 100-256 Hz (approximately).
76 min((e), INVERSE_ESTCPU_WEIGHT * (NICE_WEIGHT * (PRIO_MAX - PRIO_MIN) - \
77 RQ_PPQ) + INVERSE_ESTCPU_WEIGHT - 1)
112 #define TSF_AFFINITY 0x0001 /* Has a non-"full" CPU set. */
115 ((ts)->ts_runq != 0 && (ts)->ts_runq != &runq)
118 CPU_ISSET((cpu), &(td)->td_cpuset->cs_mask)
165 * Per-CPU run queues
200 if (error != 0 || req->newptr == NULL)
294 sched_tdcnt--;
307 if (td->td_priority < curthread->td_priority)
328 * - The kernel is in the throes of crashing (panicstr).
329 * - The current thread has a higher (numerically lower) or
332 * - The current thread has an inhibitor set or is in the process of
337 * - If the new thread's priority is not a realtime priority and
348 KASSERT((td->td_inhibitors == 0),
350 pri = td->td_priority;
351 cpri = ctd->td_priority;
361 ctd->td_owepreempt = 1;
378 * That is, the system wants to compute a value of decay such
402 * 1) Given factor ** (5 * loadavg) ~= .1, prove factor == b/(b+1)
408 * therefore exp(-1/b) =~ 1 - (1/b) = (b-1)/b.
410 * ln(1+x) = x - x**2/2 + x**3/3 - ... -1 < x < 1
411 * therefore ln(b/(b+1)) = ln(1 - 1/(b+1)) =~ -1/(b+1).
412 * ln(.1) =~ -2.30
415 * Solve (factor)**(power) =~ .1 given power (5*loadav):
416 * solving for factor,
417 * ln(factor) =~ (-2.30/5*loadav), or
418 * factor =~ exp(-1/((5/2.30)*loadav)) =~ exp(-1/(2*loadav)) =
419 * exp(-1/b) =~ (b-1)/b =~ b/(b+1). QED
422 * Solve (factor)**(power) =~ .1 given factor == (b/(b+1)):
424 * power*ln(b/(b+1)) =~ -2.30, or
437 static fixpt_t ccpu = 0.95122942450071400909 * FSCALE; /* exp(-1/20) */
439 "Decay factor used for updating %CPU");
442 * If `ccpu' is not equal to `exp(-1/20)' and you still want to use the
443 * faster/more-accurate formula, you'll have to estimate CCPU_SHIFT below
446 * To estimate CCPU_SHIFT for exp(-1/20), the following formula was used:
447 * 1 - exp(-1/20) ~= 0.0487 ~= 0.0488 == 1 (fixed pt, *11* bits).
449 * If you don't want to bother with the faster/more-accurate formula, you
450 * can set CCPU_SHIFT to (FSHIFT + 1) which will use a slower/less-accurate
457 * MP-safe, called without the Giant mutex.
472 if (p->p_state == PRS_NEW) {
491 td->td_flags &= ~TDF_DIDRUN;
495 } else if (td->td_flags & TDF_DIDRUN) {
497 td->td_flags &= ~TDF_DIDRUN;
503 ts->ts_pctcpu = (ts->ts_pctcpu * ccpu) >> FSHIFT;
509 if (ts->ts_cpticks != 0) {
511 ts->ts_pctcpu += (realstathz == 100)
512 ? ((fixpt_t) ts->ts_cpticks) <<
513 (FSHIFT - CCPU_SHIFT) :
514 100 * (((fixpt_t) ts->ts_cpticks)
515 << (FSHIFT - CCPU_SHIFT)) / realstathz;
517 ts->ts_pctcpu += ((FSCALE - ccpu) *
518 (ts->ts_cpticks *
521 ts->ts_cpticks = 0;
529 if (ts->ts_slptime > 1) {
531 * In an ideal world, this should not
541 ts->ts_slptime = 0;
543 ts->ts_slptime++;
544 if (ts->ts_slptime > 1) {
548 ts->ts_estcpu = decay_cpu(loadfac, ts->ts_estcpu);
567 pause("-", hz);
585 if (ts->ts_slptime > 5 * loadfac)
586 ts->ts_estcpu = 0;
588 newcpu = ts->ts_estcpu;
589 ts->ts_slptime--; /* was incremented in schedcpu() */
590 while (newcpu && --ts->ts_slptime)
592 ts->ts_estcpu = newcpu;
606 if (td->td_pri_class != PRI_TIMESHARE)
609 td_get_sched(td)->ts_estcpu / INVERSE_ESTCPU_WEIGHT +
610 NICE_WEIGHT * (td->td_proc->p_nice - PRIO_MIN);
625 if (td->td_priority < PRI_MIN_TIMESHARE ||
626 td->td_priority > PRI_MAX_TIMESHARE)
632 sched_prio(td, td->td_user_pri);
662 * Very early in the boot some setup of scheduler-specific
675 td_get_sched(&thread0)->ts_slice = sched_slice;
706 "Interrupt thread preemptions due to time-sharing");
719 * favor processes which haven't run much recently, and to round-robin
731 ts->ts_cpticks++;
732 ts->ts_estcpu = ESTCPULIM(ts->ts_estcpu + 1);
733 if ((ts->ts_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) {
742 if (!TD_IS_IDLETHREAD(td) && --ts->ts_slice <= 0) {
743 ts->ts_slice = sched_slice;
749 if (PRI_BASE(td->td_pri_class) == PRI_ITHD) {
751 td->td_owepreempt = 1;
752 if (td->td_base_pri + RQ_PPQ < PRI_MAX_ITHD) {
754 sched_prio(td, td->td_base_pri + RQ_PPQ);
757 td->td_flags |= TDF_SLICEEND;
763 stat->oldidlecalls = stat->idlecalls;
764 stat->idlecalls = 0;
771 for ( ; cnt > 0; cnt--)
783 "prio:%d", td->td_priority);
794 "prio:%d", child->td_priority);
796 td_get_sched(td)->ts_estcpu = ESTCPULIM(td_get_sched(td)->ts_estcpu +
797 td_get_sched(child)->ts_estcpu);
800 if ((child->td_flags & TDF_NOLOAD) == 0)
816 childtd->td_oncpu = NOCPU;
817 childtd->td_lastcpu = NOCPU;
818 childtd->td_lock = &sched_lock;
819 childtd->td_cpuset = cpuset_ref(td->td_cpuset);
820 childtd->td_domain.dr_policy = td->td_cpuset->cs_domain;
821 childtd->td_priority = childtd->td_base_pri;
825 ts->ts_estcpu = tsc->ts_estcpu;
826 ts->ts_flags |= (tsc->ts_flags & TSF_AFFINITY);
827 ts->ts_slice = 1;
836 p->p_nice = nice;
849 td->td_pri_class = class;
860 "prio:%d", td->td_priority, "new prio:%d", prio, KTR_ATTR_LINKED,
862 SDT_PROBE3(sched, , , change__pri, td, td->td_proc, prio);
863 if (td != curthread && prio > td->td_priority) {
865 "lend prio", "prio:%d", td->td_priority, "new prio:%d",
867 SDT_PROBE4(sched, , , lend__pri, td, td->td_proc, prio,
871 if (td->td_priority == prio)
873 td->td_priority = prio;
874 if (TD_ON_RUNQ(td) && td->td_rqindex != (prio / RQ_PPQ)) {
888 td->td_flags |= TDF_BORROWING;
905 if (td->td_base_pri >= PRI_MIN_TIMESHARE &&
906 td->td_base_pri <= PRI_MAX_TIMESHARE)
907 base_pri = td->td_user_pri;
909 base_pri = td->td_base_pri;
911 td->td_flags &= ~TDF_BORROWING;
923 td->td_base_pri = prio;
929 if (td->td_flags & TDF_BORROWING && td->td_priority < prio)
933 oldprio = td->td_priority;
948 MPASS(td->td_pri_class == PRI_ITHD);
949 td->td_base_ithread_pri = prio;
958 td->td_base_user_pri = prio;
959 if (td->td_lend_user_pri <= prio)
961 td->td_user_pri = prio;
969 td->td_lend_user_pri = prio;
970 td->td_user_pri = min(prio, td->td_base_user_pri);
971 if (td->td_priority > td->td_user_pri)
972 sched_prio(td, td->td_user_pri);
973 else if (td->td_priority != td->td_user_pri)
984 if (td->td_lend_user_pri == prio)
997 td->td_slptick = ticks;
998 td_get_sched(td)->ts_slptime = 0;
999 if (pri != 0 && PRI_BASE(td->td_pri_class) == PRI_TIMESHARE)
1014 td->td_lastcpu = td->td_oncpu;
1015 preempted = (td->td_flags & TDF_SLICEEND) == 0 &&
1017 td->td_flags &= ~TDF_SLICEEND;
1019 td->td_owepreempt = 0;
1020 td->td_oncpu = NOCPU;
1028 if (td->td_flags & TDF_IDLETD) {
1046 if (td->td_lock != &sched_lock) {
1052 if ((td->td_flags & TDF_NOLOAD) == 0)
1056 MPASS(newtd->td_lock == &sched_lock);
1061 "prio:%d", td->td_priority);
1064 "prio:%d", td->td_priority, "wmesg:\"%s\"", td->td_wmesg,
1065 "lockname:\"%s\"", td->td_lockname);
1070 if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1074 SDT_PROBE2(sched, , , off__cpu, newtd, newtd->td_proc);
1110 if (PMC_PROC_IS_USING_PMCS(td->td_proc))
1114 td->td_lock = &sched_lock;
1119 "prio:%d", td->td_priority);
1122 if (td->td_flags & TDF_IDLETD)
1126 td->td_oncpu = PCPU_GET(cpuid);
1138 if (ts->ts_slptime > 1) {
1142 td->td_slptick = 0;
1143 ts->ts_slptime = 0;
1144 ts->ts_slice = sched_slice;
1150 if (PRI_BASE(td->td_pri_class) == PRI_ITHD &&
1151 td->td_base_pri != td->td_base_ithread_pri)
1152 sched_prio(td, td->td_base_ithread_pri);
1195 id = pc->pc_cpuid;
1197 pc->pc_curthread == pc->pc_idlethread) {
1230 id = pc->pc_cpuid;
1233 if (cpu_idle_wakeup(pc->pc_cpuid))
1259 cpri = pcpu->pc_curthread->td_priority;
1273 if (pcpu->pc_curthread->td_lock == &sched_lock) {
1274 ast_sched_locked(pcpu->pc_curthread, TDA_SCHED);
1288 if (td->td_lastcpu != NOCPU && THREAD_CAN_SCHED(td, td->td_lastcpu))
1289 best = td->td_lastcpu;
1319 KASSERT((td->td_inhibitors == 0),
1323 KASSERT(td->td_flags & TDF_INMEM,
1327 "prio:%d", td->td_priority, KTR_ATTR_LINKED,
1331 SDT_PROBE4(sched, , , enqueue, td, td->td_proc, NULL,
1335 * Now that the thread is moving to the run-queue, set the lock
1338 if (td->td_lock != &sched_lock) {
1341 td->td_lock = &sched_lock;
1349 * a specific set of CPUs, queue the thread to a per-CPU run queue.
1353 * as per-CPU state may not be initialized yet and we may crash if we
1354 * try to access the per-CPU run queues.
1356 if (smp_started && (td->td_pinned != 0 || td->td_flags & TDF_BOUND ||
1357 ts->ts_flags & TSF_AFFINITY)) {
1358 if (td->td_pinned != 0)
1359 cpu = td->td_lastcpu;
1360 else if (td->td_flags & TDF_BOUND) {
1364 cpu = ts->ts_runq - &runq_pcpu[0];
1368 ts->ts_runq = &runq_pcpu[cpu];
1378 ts->ts_runq = &runq;
1381 if ((td->td_flags & TDF_NOLOAD) == 0)
1383 runq_add(ts->ts_runq, td, flags);
1389 kick_other_cpu(td->td_priority, cpu);
1416 KASSERT((td->td_inhibitors == 0),
1420 KASSERT(td->td_flags & TDF_INMEM,
1423 "prio:%d", td->td_priority, KTR_ATTR_LINKED,
1427 SDT_PROBE4(sched, , , enqueue, td, td->td_proc, NULL,
1431 * Now that the thread is moving to the run-queue, set the lock
1434 if (td->td_lock != &sched_lock) {
1437 td->td_lock = &sched_lock;
1443 ts->ts_runq = &runq;
1445 if ((td->td_flags & TDF_NOLOAD) == 0)
1447 runq_add(ts->ts_runq, td, flags);
1461 KASSERT(td->td_flags & TDF_INMEM,
1467 "prio:%d", td->td_priority, KTR_ATTR_LINKED,
1469 SDT_PROBE3(sched, , , dequeue, td, td->td_proc, NULL);
1471 if ((td->td_flags & TDF_NOLOAD) == 0)
1474 if (ts->ts_runq != &runq)
1475 runq_length[ts->ts_runq - runq_pcpu]--;
1477 runq_remove(ts->ts_runq, td);
1501 tdcpu->td_priority < td->td_priority)) {
1518 runq_length[PCPU_GET(cpuid)]--;
1521 td->td_flags |= TDF_DIDRUN;
1523 KASSERT(td->td_flags & TDF_INMEM,
1535 SDT_PROBE2(sched, , , surrender, td, td->td_proc);
1536 if (td->td_critnest > 1) {
1537 td->td_owepreempt = 1;
1552 td->td_priority = td->td_user_pri;
1553 td->td_base_pri = td->td_user_pri;
1567 td->td_flags |= TDF_BOUND;
1569 ts->ts_runq = &runq_pcpu[cpu];
1583 td->td_flags &= ~TDF_BOUND;
1590 return (td->td_flags & TDF_BOUND);
1625 return (ts->ts_pctcpu);
1644 if (ts->ts_cpticks != 0) {
1647 ? ((fixpt_t) ts->ts_cpticks) <<
1648 (FSHIFT - CCPU_SHIFT) :
1649 100 * (((fixpt_t) ts->ts_cpticks)
1650 << (FSHIFT - CCPU_SHIFT)) / realstathz;
1652 delta = ((FSCALE - ccpu) *
1653 (ts->ts_cpticks *
1666 return (td_get_sched(td)->ts_estcpu);
1683 cpu_idle(stat->idlecalls + stat->oldidlecalls > 64);
1684 stat->idlecalls++;
1697 KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count"));
1733 MPASS(td->td_lock == &sched_lock);
1736 td->td_lastcpu = td->td_oncpu;
1737 td->td_oncpu = NOCPU;
1748 * non-nested critical section with sched_lock held but not recursed.
1750 td->td_oncpu = PCPU_GET(cpuid);
1757 "prio:%d", td->td_priority);
1768 if (ts->ts_name[0] == '\0')
1769 snprintf(ts->ts_name, sizeof(ts->ts_name),
1770 "%s tid %d", td->td_name, td->td_tid);
1771 return (ts->ts_name);
1773 return (td->td_name);
1784 ts->ts_name[0] = '\0';
1802 ts->ts_flags &= ~TSF_AFFINITY;
1805 ts->ts_flags |= TSF_AFFINITY;
1813 if (!(ts->ts_flags & TSF_AFFINITY))
1817 if (td->td_pinned != 0 || td->td_flags & TDF_BOUND)
1823 * If we are on a per-CPU runqueue that is in the set,
1826 if (ts->ts_runq != &runq &&
1827 THREAD_CAN_SCHED(td, ts->ts_runq - runq_pcpu))
1830 /* Put this thread on a valid per-CPU runqueue. */
1839 if (THREAD_CAN_SCHED(td, td->td_oncpu))