Lines Matching defs:spc
83 struct schedstate_percpu *spc = &ci->ci_schedstate;
87 TAILQ_INIT(&spc->spc_qs[i]);
89 spc->spc_idleproc = NULL;
91 clockintr_bind(&spc->spc_itimer, ci, itimer_update, NULL);
92 clockintr_bind(&spc->spc_profclock, ci, profclock, NULL);
93 clockintr_bind(&spc->spc_roundrobin, ci, roundrobin, NULL);
94 clockintr_bind(&spc->spc_statclock, ci, statclock, NULL);
98 LIST_INIT(&spc->spc_deadproc);
99 SIMPLEQ_INIT(&spc->spc_deferred);
118 struct schedstate_percpu *spc = &ci->ci_schedstate;
123 &spc->spc_idleproc))
127 snprintf(spc->spc_idleproc->p_p->ps_comm,
128 sizeof(spc->spc_idleproc->p_p->ps_comm),
137 struct schedstate_percpu *spc;
143 spc = &ci->ci_schedstate;
159 KASSERT(curproc == spc->spc_idleproc);
170 while ((dead = LIST_FIRST(&spc->spc_deadproc))) {
182 while (spc->spc_whichqs == 0) {
184 if (spc->spc_schedflags & SPCF_SHOULDHALT &&
185 (spc->spc_schedflags & SPCF_HALTED) == 0) {
188 atomic_setbits_int(&spc->spc_schedflags,
189 spc->spc_whichqs ? 0 : SPCF_HALTED);
191 wakeup(spc);
215 struct schedstate_percpu *spc = &curcpu()->ci_schedstate;
217 LIST_INSERT_HEAD(&spc->spc_deadproc, p, p_hash);
228 struct schedstate_percpu *spc = &curcpu()->ci_schedstate;
237 if (ISSET(spc->spc_schedflags, SPCF_ITIMER)) {
238 atomic_clearbits_int(&spc->spc_schedflags, SPCF_ITIMER);
239 clockintr_cancel(&spc->spc_itimer);
241 if (ISSET(spc->spc_schedflags, SPCF_PROFCLOCK)) {
242 atomic_clearbits_int(&spc->spc_schedflags, SPCF_PROFCLOCK);
243 clockintr_cancel(&spc->spc_profclock);
246 atomic_clearbits_int(&spc->spc_schedflags, SPCF_SWITCHCLEAR);
249 idle = spc->spc_idleproc;
271 struct schedstate_percpu *spc;
286 spc = &p->p_cpu->ci_schedstate;
287 spc->spc_nrun++;
291 TAILQ_INSERT_TAIL(&spc->spc_qs[queue], p, p_runq);
292 spc->spc_whichqs |= (1U << queue);
297 else if (prio < spc->spc_curpriority)
304 struct schedstate_percpu *spc;
308 spc = &p->p_cpu->ci_schedstate;
309 spc->spc_nrun--;
313 TAILQ_REMOVE(&spc->spc_qs[queue], p, p_runq);
314 if (TAILQ_EMPTY(&spc->spc_qs[queue])) {
315 spc->spc_whichqs &= ~(1U << queue);
316 if (spc->spc_whichqs == 0)
324 struct schedstate_percpu *spc = &curcpu()->ci_schedstate;
331 if (spc->spc_schedflags & SPCF_SHOULDHALT) {
332 if (spc->spc_whichqs) {
334 while ((p = TAILQ_FIRST(&spc->spc_qs[queue]))) {
344 p = spc->spc_idleproc;
355 if (spc->spc_whichqs) {
356 queue = ffs(spc->spc_whichqs) - 1;
357 p = TAILQ_FIRST(&spc->spc_qs[queue]);
363 p = spc->spc_idleproc;
499 struct schedstate_percpu *spc;
519 spc = &ci->ci_schedstate;
521 queue = ffs(spc->spc_whichqs) - 1;
522 TAILQ_FOREACH(p, &spc->spc_qs[queue], p_runq) {
584 struct schedstate_percpu *spc;
587 spc = &ci->ci_schedstate;
595 cost += (p->p_usrpri - spc->spc_curpriority) *
600 cost += spc->spc_nrun * sched_cost_runnable;
657 struct schedstate_percpu *spc = &ci->ci_schedstate;
661 atomic_clearbits_int(&spc->spc_schedflags,
681 struct schedstate_percpu *spc = &ci->ci_schedstate;
686 atomic_setbits_int(&spc->spc_schedflags, SPCF_SHOULDHALT);
689 struct schedstate_percpu *spc = &ci->ci_schedstate;
693 while ((spc->spc_schedflags & SPCF_HALTED) == 0) {
694 sleep_setup(spc, PZERO, "schedstate");
696 (spc->spc_schedflags & SPCF_HALTED) == 0);