Lines Matching defs:spc
138 struct schedstate_percpu *spc;
143 spc = &ci->ci_schedstate;
144 spc->spc_nextpkg = ci;
146 if (spc->spc_lwplock == NULL) {
147 spc->spc_lwplock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED);
151 lwp0.l_mutex = spc->spc_lwplock;
153 if (spc->spc_mutex != NULL) {
159 size = roundup2(sizeof(spc->spc_queue[0]) * PRI_COUNT, coherency_unit) +
162 spc->spc_queue = (void *)roundup2((uintptr_t)p, coherency_unit);
165 spc->spc_mutex = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED);
167 TAILQ_INIT(&spc->spc_queue[i]);
174 sched_getrq(struct schedstate_percpu *spc, const pri_t prio)
178 return &spc->spc_queue[prio];
188 struct schedstate_percpu *spc;
194 spc = &ci->ci_schedstate;
198 q_head = sched_getrq(spc, eprio);
206 KASSERT((spc->spc_bitmap[i] & q) == 0);
207 spc->spc_bitmap[i] |= q;
237 spc->spc_flags &= ~SPCF_IDLE;
238 spc->spc_count++;
240 atomic_store_relaxed(&spc->spc_mcount,
241 atomic_load_relaxed(&spc->spc_mcount) + 1);
248 if (eprio > spc->spc_maxpriority)
249 spc->spc_maxpriority = eprio;
262 struct schedstate_percpu *spc;
265 spc = &l->l_cpu->ci_schedstate;
267 KASSERT(lwp_locked(l, spc->spc_mutex));
268 KASSERT(eprio <= spc->spc_maxpriority);
269 KASSERT(spc->spc_bitmap[eprio >> BITMAP_SHIFT] != 0);
270 KASSERT(spc->spc_count > 0);
272 if (spc->spc_migrating == l)
273 spc->spc_migrating = NULL;
275 spc->spc_count--;
277 atomic_store_relaxed(&spc->spc_mcount,
278 atomic_load_relaxed(&spc->spc_mcount) - 1);
281 q_head = sched_getrq(spc, eprio);
290 KASSERT((spc->spc_bitmap[i] & q) != 0);
291 spc->spc_bitmap[i] &= ~q;
297 if (eprio != spc->spc_maxpriority)
301 if (spc->spc_bitmap[i] != 0) {
302 q = ffs(spc->spc_bitmap[i]);
303 spc->spc_maxpriority =
310 spc->spc_maxpriority = 0;
322 struct schedstate_percpu *spc;
326 spc = &ci->ci_schedstate;
328 KASSERT(mutex_owned(spc->spc_mutex));
334 if (pri <= spc->spc_curpriority || !mp_online) {
455 const struct schedstate_percpu *spc = &ci->ci_schedstate;
459 if (__predict_false(spc->spc_flags & SPCF_OFFLINE))
467 return (spc->spc_psid == l->l_psid);
476 struct schedstate_percpu *spc = &curcpu()->ci_schedstate;
478 spc->spc_nextpkg =
479 spc->spc_nextpkg->ci_sibling[CPUREL_PACKAGE1ST];
481 return spc->spc_nextpkg;
569 struct schedstate_percpu *spc;
581 spc = &ci->ci_schedstate;
622 if (sched_migratable(l, ci) && (eprio > spc->spc_curpriority ||
655 struct schedstate_percpu *spc, *curspc;
661 spc = &ci->ci_schedstate;
669 if (atomic_load_relaxed(&spc->spc_mcount) < (gentle ? min_catch : 1) ||
670 curspc->spc_psid != spc->spc_psid) {
676 q_head = sched_getrq(spc, spc->spc_maxpriority);
717 struct schedstate_percpu *spc, *tspc;
720 spc = &ci->ci_schedstate;
725 l = spc->spc_migrating;
745 spc->spc_migrating = NULL;
770 spc->spc_migrating = NULL;
795 struct schedstate_percpu *spc, *tspc;
798 spc = &ci->ci_schedstate;
801 spc->spc_psid == tspc->spc_psid) {
819 struct schedstate_percpu *spc, *tspc;
823 spc = &ci->ci_schedstate;
832 if (spc->spc_migrating != NULL) {
837 if ((spc->spc_flags & SPCF_OFFLINE) != 0 || spc->spc_count != 0) {
855 if ((spc->spc_flags & SPCF_1STCLASS) == 0 &&
856 spc->spc_psid == PS_NONE) {
869 if (spc->spc_nextskim > getticks()) {
872 spc->spc_nextskim = getticks() + mstohz(skim_interval);
884 spc->spc_psid != tspc->spc_psid ||
1076 struct schedstate_percpu *spc;
1085 spc = &ci->ci_schedstate;
1086 if (__predict_false(spc->spc_migrating != NULL))
1090 if (__predict_false(spc->spc_count == 0))
1094 KASSERT(spc->spc_bitmap[spc->spc_maxpriority >> BITMAP_SHIFT]);
1095 q_head = sched_getrq(spc, spc->spc_maxpriority);
1113 const struct schedstate_percpu *spc;
1118 spc = &ci->ci_schedstate;
1119 rv = (spc->spc_count != 0);
1188 struct schedstate_percpu *spc;
1196 spc = &ci->ci_schedstate;
1206 spc->spc_count, spc->spc_maxpriority,
1207 spc->spc_migrating);
1211 q = spc->spc_bitmap[i];