Lines Matching defs:ts

114 #define SKE_RUNQ_PCPU(ts)						\
115 ((ts)->ts_runq != 0 && (ts)->ts_runq != &runq)
466 struct td_sched *ts;
478 ts = td_get_sched(td);
503 ts->ts_pctcpu = (ts->ts_pctcpu * ccpu) >> FSHIFT;
509 if (ts->ts_cpticks != 0) {
511 ts->ts_pctcpu += (realstathz == 100)
512 ? ((fixpt_t) ts->ts_cpticks) <<
514 100 * (((fixpt_t) ts->ts_cpticks)
517 ts->ts_pctcpu += ((FSCALE - ccpu) *
518 (ts->ts_cpticks *
521 ts->ts_cpticks = 0;
529 if (ts->ts_slptime > 1) {
541 ts->ts_slptime = 0;
543 ts->ts_slptime++;
544 if (ts->ts_slptime > 1) {
548 ts->ts_estcpu = decay_cpu(loadfac, ts->ts_estcpu);
579 struct td_sched *ts;
583 ts = td_get_sched(td);
585 if (ts->ts_slptime > 5 * loadfac)
586 ts->ts_estcpu = 0;
588 newcpu = ts->ts_estcpu;
589 ts->ts_slptime--; /* was incremented in schedcpu() */
590 while (newcpu && --ts->ts_slptime)
592 ts->ts_estcpu = newcpu;
726 struct td_sched *ts;
729 ts = td_get_sched(td);
731 ts->ts_cpticks++;
732 ts->ts_estcpu = ESTCPULIM(ts->ts_estcpu + 1);
733 if ((ts->ts_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) {
742 if (!TD_IS_IDLETHREAD(td) && --ts->ts_slice <= 0) {
743 ts->ts_slice = sched_slice;
814 struct td_sched *ts, *tsc;
822 ts = td_get_sched(childtd);
823 bzero(ts, sizeof(*ts));
825 ts->ts_estcpu = tsc->ts_estcpu;
826 ts->ts_flags |= (tsc->ts_flags & TSF_AFFINITY);
827 ts->ts_slice = 1;
1134 struct td_sched *ts;
1137 ts = td_get_sched(td);
1138 if (ts->ts_slptime > 1) {
1143 ts->ts_slptime = 0;
1144 ts->ts_slice = sched_slice;
1312 struct td_sched *ts;
1317 ts = td_get_sched(td);
1357 ts->ts_flags & TSF_AFFINITY)) {
1362 KASSERT(SKE_RUNQ_PCPU(ts),
1364 cpu = ts->ts_runq - &runq_pcpu[0];
1368 ts->ts_runq = &runq_pcpu[cpu];
1371 "sched_add: Put td_sched:%p(td:%p) on cpu%d runq", ts, td,
1375 "sched_add: adding td_sched:%p (td:%p) to gbl runq", ts,
1378 ts->ts_runq = &runq;
1383 runq_add(ts->ts_runq, td, flags);
1412 struct td_sched *ts;
1414 ts = td_get_sched(td);
1442 CTR2(KTR_RUNQ, "sched_add: adding td_sched:%p (td:%p) to runq", ts, td);
1443 ts->ts_runq = &runq;
1447 runq_add(ts->ts_runq, td, flags);
1458 struct td_sched *ts;
1460 ts = td_get_sched(td);
1474 if (ts->ts_runq != &runq)
1475 runq_length[ts->ts_runq - runq_pcpu]--;
1477 runq_remove(ts->ts_runq, td);
1561 struct td_sched *ts = td_get_sched(td);
1569 ts->ts_runq = &runq_pcpu[cpu];
1621 struct td_sched *ts;
1624 ts = td_get_sched(td);
1625 return (ts->ts_pctcpu);
1636 struct td_sched *ts;
1641 ts = td_get_sched(td);
1644 if (ts->ts_cpticks != 0) {
1647 ? ((fixpt_t) ts->ts_cpticks) <<
1649 100 * (((fixpt_t) ts->ts_cpticks)
1653 (ts->ts_cpticks *
1765 struct td_sched *ts;
1767 ts = td_get_sched(td);
1768 if (ts->ts_name[0] == '\0')
1769 snprintf(ts->ts_name, sizeof(ts->ts_name),
1771 return (ts->ts_name);
1781 struct td_sched *ts;
1783 ts = td_get_sched(td);
1784 ts->ts_name[0] = '\0';
1792 struct td_sched *ts;
1801 ts = td_get_sched(td);
1802 ts->ts_flags &= ~TSF_AFFINITY;
1805 ts->ts_flags |= TSF_AFFINITY;
1813 if (!(ts->ts_flags & TSF_AFFINITY))
1826 if (ts->ts_runq != &runq &&
1827 THREAD_CAN_SCHED(td, ts->ts_runq - runq_pcpu))