Home
last modified time | relevance | path

Searched refs:l_mutex (Results 1 – 15 of 15) sorted by relevance

/netbsd-src/sys/rump/librump/rumpkern/
H A Dsleepq.c91 kmutex_t *mp = l->l_mutex; in sleepq_block()
94 l->l_mutex = mp; /* keep sleepq lock until woken up */ in sleepq_block()
145 mutex_spin_exit(l->l_mutex); in sleepq_unsleep()
184 KASSERT(mutex_owned(l->l_mutex)); in lwp_unlock_to()
186 old = l->l_mutex; in lwp_unlock_to()
187 atomic_store_release(&l->l_mutex, new); in lwp_unlock_to()
194 kmutex_t *old = atomic_load_consume(&l->l_mutex); in lwp_lock()
197 while (__predict_false(atomic_load_relaxed(&l->l_mutex) != old)) { in lwp_lock()
199 old = atomic_load_consume(&l->l_mutex); in lwp_lock()
208 mutex_spin_exit(l->l_mutex); in lwp_unlock()
H A Dlwproc.c70 KASSERT(mutex_owned(l->l_mutex)); in lwp_unsleep()
335 l->l_mutex = &unruntime_lock; in lwproc_makelwp()
478 newlwp->l_mutex = l->l_mutex; in rump_lwproc_switch()
495 l->l_mutex = &unruntime_lock; in rump_lwproc_switch()
H A Dscheduler.c381 l->l_mutex = rcpu->rcpu_ci->ci_schedstate.spc_mutex; in rump_schedule_cpu_interlock()
405 KASSERT(l->l_mutex == l->l_cpu->ci_schedstate.spc_mutex); in rump_unschedule()
407 l->l_mutex = &unruntime_lock; in rump_unschedule()
428 lwp0.l_mutex = &unruntime_lock; in rump_unschedule()
/netbsd-src/sys/kern/
H A Dkern_turnstile.c220 LOCKDEBUG_BARRIER(l->l_mutex, 1); in turnstile_lendpri()
253 dolock = l->l_mutex != atomic_load_relaxed(&owner->l_mutex); in turnstile_lendpri()
296 LOCKDEBUG_BARRIER(owner->l_mutex, 1); in turnstile_lendpri()
299 LOCKDEBUG_BARRIER(l->l_mutex, 1); in turnstile_lendpri()
300 if (cur->l_mutex != atomic_load_relaxed(&l->l_mutex)) { in turnstile_lendpri()
304 LOCKDEBUG_BARRIER(cur->l_mutex, 1); in turnstile_lendpri()
323 dolock = (atomic_load_relaxed(&l->l_mutex) == in turnstile_unlendpri()
431 KASSERT(lock == l->l_mutex); in turnstile_block()
H A Dkern_lwp.c400 l->l_mutex = l->l_cpu->ci_schedstate.spc_lwplock; in lwp_ctor()
837 KASSERT(l2->l_mutex == l2->l_cpu->ci_schedstate.spc_lwplock); in lwp_create()
1045 lock = prev->l_mutex; in lwp_startup()
1545 kmutex_t *cur = l->l_mutex; in lwp_locked()
1556 kmutex_t *oldmtx = l->l_mutex; in lwp_setlock()
1560 atomic_store_release(&l->l_mutex, mtx); in lwp_setlock()
1575 old = l->l_mutex; in lwp_unlock_to()
1576 atomic_store_release(&l->l_mutex, mtx); in lwp_unlock_to()
1586 if (!mutex_tryenter(old = atomic_load_consume(&l->l_mutex))) in lwp_trylock()
1588 if (__predict_true(atomic_load_relaxed(&l->l_mutex) == old)) in lwp_trylock()
[all …]
H A Dkern_condvar.c493 KASSERT(l->l_mutex == mp); in cv_wakeup_one()
540 KASSERT(l->l_mutex == mp); in cv_wakeup_all()
H A Dkern_idle.c94 KASSERT(l->l_mutex == l->l_cpu->ci_schedstate.spc_lwplock); in idle_loop()
H A Dkern_synch.c731 if (l->l_mutex == spc->spc_mutex) { in mi_switch()
746 LOCKDEBUG_BARRIER(l->l_mutex, 1); in mi_switch()
834 lock = prevlwp->l_mutex; in mi_switch()
895 KASSERT(l->l_mutex != l->l_cpu->ci_schedstate.spc_mutex); in setrunnable()
H A Dkern_sleepq.c464 KASSERT(l->l_mutex == mp); in sleepq_wake()
487 kmutex_t *mp = l->l_mutex; in sleepq_unsleep()
H A Dsched_4bsd.c124 KASSERT(l->l_mutex != spc->spc_mutex); in sched_tick()
H A Dsched_m2.c306 KASSERT(l->l_mutex != spc->spc_mutex); in sched_tick()
H A Dsys_lwp.c491 mp = t->l_mutex; in lwp_unpark()
H A Dsys_select.c915 if (oflag == SEL_BLOCKING && l->l_mutex == lock) {
H A Dkern_runq.c151 lwp0.l_mutex = spc->spc_lwplock; in sched_cpuattach()
/netbsd-src/sys/sys/
H A Dlwp.h91 kmutex_t * volatile l_mutex; /* l: ptr to mutex on sched state */ member