Lines Matching +full:no +full:- +full:idle +full:- +full:on +full:- +full:init

1 /*-
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
79 check_cpu_switched(int c, cpuset_t *csp, uint64_t *swt, bool init) in check_cpu_switched() argument
88 if (pc->pc_curthread == pc->pc_idlethread) { in check_cpu_switched()
95 * pc_curthread with non-idle thread pointer is visible before in check_cpu_switched()
100 sw = pc->pc_switchtime; in check_cpu_switched()
101 if (init) in check_cpu_switched()
110 * sync_core) on current CPU as well. There is no guarantee that
113 * might be not provided by the syscall return. E.g. on amd64 we
130 td->td_retval[0] = MEMBARRIER_SUPPORTED_CMDS; in kern_membarrier()
134 p = td->td_proc; in kern_membarrier()
158 if ((td->td_proc->p_flag2 & P2_MEMBAR_GLOBE) == 0) { in kern_membarrier()
163 td1 = cpuid_to_pcpu[c]->pc_curthread; in kern_membarrier()
164 p1 = td1->td_proc; in kern_membarrier()
166 (p1->p_flag2 & P2_MEMBAR_GLOBE) != 0) in kern_membarrier()
174 if ((p->p_flag2 & P2_MEMBAR_GLOBE) == 0) { in kern_membarrier()
176 p->p_flag2 |= P2_MEMBAR_GLOBE; in kern_membarrier()
182 if ((td->td_proc->p_flag2 & P2_MEMBAR_PRIVE) == 0) { in kern_membarrier()
185 pmap_active_cpus(vmspace_pmap(p->p_vmspace), &cs); in kern_membarrier()
191 if ((p->p_flag2 & P2_MEMBAR_PRIVE) == 0) { in kern_membarrier()
193 p->p_flag2 |= P2_MEMBAR_PRIVE; in kern_membarrier()
199 if ((td->td_proc->p_flag2 & P2_MEMBAR_PRIVE_SYNCORE) == 0) { in kern_membarrier()
205 * cpu_sync_core() on CPUs that were missed in kern_membarrier()
208 * on amd64 because threads always use slow in kern_membarrier()
212 pmap_active_cpus(vmspace_pmap(p->p_vmspace), &cs); in kern_membarrier()
220 if ((p->p_flag2 & P2_MEMBAR_PRIVE_SYNCORE) == 0) { in kern_membarrier()
222 p->p_flag2 |= P2_MEMBAR_PRIVE_SYNCORE; in kern_membarrier()
238 return (kern_membarrier(td, uap->cmd, uap->flags, uap->cpu_id)); in sys_membarrier()