Searched refs:l_mutex (Results 1 – 15 of 15) sorted by relevance
91 kmutex_t *mp = l->l_mutex; in sleepq_block()94 l->l_mutex = mp; /* keep sleepq lock until woken up */ in sleepq_block()145 mutex_spin_exit(l->l_mutex); in sleepq_unsleep()184 KASSERT(mutex_owned(l->l_mutex)); in lwp_unlock_to()186 old = l->l_mutex; in lwp_unlock_to()187 atomic_store_release(&l->l_mutex, new); in lwp_unlock_to()194 kmutex_t *old = atomic_load_consume(&l->l_mutex); in lwp_lock()197 while (__predict_false(atomic_load_relaxed(&l->l_mutex) != old)) { in lwp_lock()199 old = atomic_load_consume(&l->l_mutex); in lwp_lock()208 mutex_spin_exit(l->l_mutex); in lwp_unlock()
70 KASSERT(mutex_owned(l->l_mutex)); in lwp_unsleep()335 l->l_mutex = &unruntime_lock; in lwproc_makelwp()478 newlwp->l_mutex = l->l_mutex; in rump_lwproc_switch()495 l->l_mutex = &unruntime_lock; in rump_lwproc_switch()
381 l->l_mutex = rcpu->rcpu_ci->ci_schedstate.spc_mutex; in rump_schedule_cpu_interlock()405 KASSERT(l->l_mutex == l->l_cpu->ci_schedstate.spc_mutex); in rump_unschedule()407 l->l_mutex = &unruntime_lock; in rump_unschedule()428 lwp0.l_mutex = &unruntime_lock; in rump_unschedule()
220 LOCKDEBUG_BARRIER(l->l_mutex, 1); in turnstile_lendpri()253 dolock = l->l_mutex != atomic_load_relaxed(&owner->l_mutex); in turnstile_lendpri()296 LOCKDEBUG_BARRIER(owner->l_mutex, 1); in turnstile_lendpri()299 LOCKDEBUG_BARRIER(l->l_mutex, 1); in turnstile_lendpri()300 if (cur->l_mutex != atomic_load_relaxed(&l->l_mutex)) { in turnstile_lendpri()304 LOCKDEBUG_BARRIER(cur->l_mutex, 1); in turnstile_lendpri()323 dolock = (atomic_load_relaxed(&l->l_mutex) == in turnstile_unlendpri()431 KASSERT(lock == l->l_mutex); in turnstile_block()
400 l->l_mutex = l->l_cpu->ci_schedstate.spc_lwplock; in lwp_ctor()837 KASSERT(l2->l_mutex == l2->l_cpu->ci_schedstate.spc_lwplock); in lwp_create()1045 lock = prev->l_mutex; in lwp_startup()1545 kmutex_t *cur = l->l_mutex; in lwp_locked()1556 kmutex_t *oldmtx = l->l_mutex; in lwp_setlock()1560 atomic_store_release(&l->l_mutex, mtx); in lwp_setlock()1575 old = l->l_mutex; in lwp_unlock_to()1576 atomic_store_release(&l->l_mutex, mtx); in lwp_unlock_to()1586 if (!mutex_tryenter(old = atomic_load_consume(&l->l_mutex))) in lwp_trylock()1588 if (__predict_true(atomic_load_relaxed(&l->l_mutex) == old)) in lwp_trylock()[all …]
493 KASSERT(l->l_mutex == mp); in cv_wakeup_one()540 KASSERT(l->l_mutex == mp); in cv_wakeup_all()
94 KASSERT(l->l_mutex == l->l_cpu->ci_schedstate.spc_lwplock); in idle_loop()
731 if (l->l_mutex == spc->spc_mutex) { in mi_switch()746 LOCKDEBUG_BARRIER(l->l_mutex, 1); in mi_switch()834 lock = prevlwp->l_mutex; in mi_switch()895 KASSERT(l->l_mutex != l->l_cpu->ci_schedstate.spc_mutex); in setrunnable()
464 KASSERT(l->l_mutex == mp); in sleepq_wake()487 kmutex_t *mp = l->l_mutex; in sleepq_unsleep()
124 KASSERT(l->l_mutex != spc->spc_mutex); in sched_tick()
306 KASSERT(l->l_mutex != spc->spc_mutex); in sched_tick()
491 mp = t->l_mutex; in lwp_unpark()
915 if (oflag == SEL_BLOCKING && l->l_mutex == lock) {
151 lwp0.l_mutex = spc->spc_lwplock; in sched_cpuattach()
91 kmutex_t * volatile l_mutex; /* l: ptr to mutex on sched state */ member