Lines Matching full:m

93 #define mtx_unowned(m)	((m)->mtx_lock == MTX_UNOWNED)
95 #define mtx_destroyed(m) ((m)->mtx_lock == MTX_DESTROYED)
240 struct mtx *m;
242 m = (struct mtx *)lock;
243 mtx_assert(m, MA_OWNED | MA_NOTRECURSED);
244 mtx_unlock(m);
251 struct mtx *m;
253 m = (struct mtx *)lock;
254 mtx_assert(m, MA_OWNED | MA_NOTRECURSED);
255 mtx_unlock_spin(m);
263 const struct mtx *m;
266 m = (const struct mtx *)lock;
267 x = m->mtx_lock;
280 struct mtx *m;
283 m = mtxlock2mtx(c);
288 curthread, m->lock_object.lo_name, file, line));
289 KASSERT(m->mtx_lock != MTX_DESTROYED,
291 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
292 ("mtx_lock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
294 WITNESS_CHECKORDER(&m->lock_object, (opts & ~MTX_RECURSE) |
299 if (!_mtx_obtain_lock_fetch(m, &v, tid))
300 _mtx_lock_sleep(m, v, opts, file, line);
303 m, 0, 0, file, line);
304 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
306 WITNESS_LOCK(&m->lock_object, (opts & ~MTX_RECURSE) | LOP_EXCLUSIVE,
314 struct mtx *m;
316 m = mtxlock2mtx(c);
318 KASSERT(m->mtx_lock != MTX_DESTROYED,
320 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
321 ("mtx_unlock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
323 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
324 LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
326 mtx_assert(m, MA_OWNED);
331 __mtx_unlock(m, curthread, opts, file, line);
340 struct mtx *m;
345 m = mtxlock2mtx(c);
347 KASSERT(m->mtx_lock != MTX_DESTROYED,
349 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
351 m->lock_object.lo_name, file, line));
352 if (mtx_owned(m))
353 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
356 m->lock_object.lo_name, file, line));
358 WITNESS_CHECKORDER(&m->lock_object, opts | LOP_NEWORDER | LOP_EXCLUSIVE,
364 if (!_mtx_obtain_lock_fetch(m, &v, tid))
365 _mtx_lock_spin(m, v, opts, file, line);
368 m, 0, 0, file, line);
370 __mtx_lock_spin(m, curthread, opts, file, line);
372 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
374 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
381 struct mtx *m;
386 m = mtxlock2mtx(c);
388 KASSERT(m->mtx_lock != MTX_DESTROYED,
390 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
392 m->lock_object.lo_name, file, line));
395 m->lock_object.lo_name, file, line));
396 if (__mtx_trylock_spin(m, curthread, opts, file, line)) {
397 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 1, file, line);
398 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
401 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, 0, file, line);
409 struct mtx *m;
411 m = mtxlock2mtx(c);
413 KASSERT(m->mtx_lock != MTX_DESTROYED,
415 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
417 m->lock_object.lo_name, file, line));
418 WITNESS_UNLOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
419 LOCK_LOG_LOCK("UNLOCK", &m->lock_object, opts, m->mtx_recurse, file,
421 mtx_assert(m, MA_OWNED);
423 __mtx_unlock_spin(m);
428 * Tries to acquire lock `m.' If this function is called on a mutex that
432 _mtx_trylock_flags_int(struct mtx *m, int opts LOCK_FILE_LINE_ARG_DEF)
450 curthread, m->lock_object.lo_name, file, line));
451 KASSERT(m->mtx_lock != MTX_DESTROYED,
453 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
454 ("mtx_trylock() of spin mutex %s @ %s:%d", m->lock_object.lo_name,
461 if (_mtx_obtain_lock_fetch(m, &v, tid))
466 ((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
468 m->mtx_recurse++;
469 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
479 LOCK_LOG_TRY("LOCK", &m->lock_object, opts, rval, file, line);
481 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE | LOP_TRYLOCK,
486 m, contested, waittime, file, line);
495 struct mtx *m;
497 m = mtxlock2mtx(c);
498 return (_mtx_trylock_flags_int(m, opts LOCK_FILE_LINE_ARG));
517 struct mtx *m;
539 m = mtxlock2mtx(c);
544 if (_mtx_obtain_lock_fetch(m, &v, tid))
548 all_time -= lockstat_nsecs(&m->lock_object);
559 v = MTX_READ_VALUE(m);
562 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 ||
565 m->lock_object.lo_name, file, line));
569 m->mtx_recurse++;
570 atomic_set_ptr(&m->mtx_lock, MTX_RECURSED);
571 if (LOCK_LOG_TEST(&m->lock_object, opts))
572 CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m);
588 lock_profile_obtain_lock_failed(&m->lock_object, false,
590 if (LOCK_LOG_TEST(&m->lock_object, opts))
593 m->lock_object.lo_name, (void *)m->mtx_lock, file, line);
595 THREAD_CONTENDS_ON_LOCK(&m->lock_object);
599 if (_mtx_obtain_lock_fetch(m, &v, tid))
613 if (LOCK_LOG_TEST(&m->lock_object, 0))
616 __func__, m, owner);
620 m->lock_object.lo_name);
623 v = MTX_READ_VALUE(m);
633 ts = turnstile_trywait(&m->lock_object);
634 v = MTX_READ_VALUE(m);
667 !atomic_fcmpset_ptr(&m->mtx_lock, &v, v | MTX_CONTESTED)) {
674 mtx_assert(m, MA_NOTOWNED);
680 sleep_time -= lockstat_nsecs(&m->lock_object);
683 owner = mtx_owner(m);
685 MPASS(owner == mtx_owner(m));
688 sleep_time += lockstat_nsecs(&m->lock_object);
691 v = MTX_READ_VALUE(m);
693 THREAD_CONTENTION_DONE(&m->lock_object);
699 all_time += lockstat_nsecs(&m->lock_object);
701 LOCKSTAT_RECORD1(adaptive__block, m, sleep_time);
707 LOCKSTAT_RECORD1(adaptive__spin, m, all_time - sleep_time);
710 LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire, m, contested,
730 struct mtx *m;
745 m = mtxlock2mtx(c);
750 if (_mtx_obtain_lock_fetch(m, &v, tid))
754 spin_time -= lockstat_nsecs(&m->lock_object);
762 v = MTX_READ_VALUE(m);
765 m->mtx_recurse++;
772 if (LOCK_LOG_TEST(&m->lock_object, opts))
773 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m);
775 "spinning", "lockname:\"%s\"", m->lock_object.lo_name);
782 lock_profile_obtain_lock_failed(&m->lock_object, true, &contested, &waittime);
786 if (_mtx_obtain_lock_fetch(m, &v, tid))
796 _mtx_lock_indefinite_check(m, &lda);
798 v = MTX_READ_VALUE(m);
803 if (LOCK_LOG_TEST(&m->lock_object, opts))
804 CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m);
813 spin_time += lockstat_nsecs(&m->lock_object);
815 LOCKSTAT_RECORD1(spin__spin, m, spin_time);
818 LOCKSTAT_PROFILE_OBTAIN_SPIN_LOCK_SUCCESS(spin__acquire, m,
825 thread_lock_validate(struct mtx *m, int opts, const char *file, int line)
828 KASSERT(m->mtx_lock != MTX_DESTROYED,
830 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
832 m->lock_object.lo_name, file, line));
833 KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) == 0,
835 m->lock_object.lo_name, file, line));
836 WITNESS_CHECKORDER(&m->lock_object,
840 #define thread_lock_validate(m, opts, file, line) do { } while (0)
852 struct mtx *m;
860 m = td->td_lock;
861 thread_lock_validate(m, 0, file, line);
862 if (__predict_false(m == &blocked_lock))
864 if (__predict_false(!_mtx_obtain_lock(m, tid)))
866 if (__predict_true(m == td->td_lock)) {
867 WITNESS_LOCK(&m->lock_object, LOP_EXCLUSIVE, file, line);
870 _mtx_release_lock_quick(m);
885 struct mtx *m;
930 m = td->td_lock;
931 thread_lock_validate(m, opts, file, line);
932 v = MTX_READ_VALUE(m);
935 if (_mtx_obtain_lock_fetch(m, &v, tid))
940 lock_profile_obtain_lock_failed(&m->lock_object, true,
948 _mtx_lock_indefinite_check(m, &lda);
950 if (m != td->td_lock) {
954 v = MTX_READ_VALUE(m);
958 if (m == td->td_lock)
960 _mtx_release_lock_quick(m);
962 LOCK_LOG_LOCK("LOCK", &m->lock_object, opts, m->mtx_recurse, file,
964 WITNESS_LOCK(&m->lock_object, opts | LOP_EXCLUSIVE, file, line);
971 spin_time += lockstat_nsecs(&m->lock_object);
973 LOCKSTAT_PROFILE_OBTAIN_SPIN_LOCK_SUCCESS(spin__acquire, m, contested,
977 LOCKSTAT_RECORD1(thread__spin, m, spin_time);
1042 struct mtx *m;
1050 m = mtxlock2mtx(c);
1053 v = MTX_READ_VALUE(m);
1056 if (--(m->mtx_recurse) == 0)
1057 atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED);
1058 if (LOCK_LOG_TEST(&m->lock_object, opts))
1059 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m);
1063 LOCKSTAT_PROFILE_RELEASE_LOCK(adaptive__release, m);
1064 if (v == tid && _mtx_release_lock(m, tid))
1071 turnstile_chain_lock(&m->lock_object);
1072 _mtx_release_lock_quick(m);
1073 ts = turnstile_lookup(&m->lock_object);
1075 panic("got NULL turnstile on mutex %p v %p", m, (void *)v);
1077 if (LOCK_LOG_TEST(&m->lock_object, opts))
1078 CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m);
1086 turnstile_chain_unlock(&m->lock_object);
1101 const struct mtx *m;
1106 m = mtxlock2mtx(c);
1112 if (!mtx_owned(m))
1114 m->lock_object.lo_name, file, line);
1115 if (mtx_recursed(m)) {
1118 m->lock_object.lo_name, file, line);
1121 m->lock_object.lo_name, file, line);
1125 if (mtx_owned(m))
1127 m->lock_object.lo_name, file, line);
1148 * Mutex initialization routine; initialize lock `m' of type contained in
1156 struct mtx *m;
1160 m = mtxlock2mtx(c);
1164 ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock,
1166 &m->mtx_lock));
1188 lock_init(&m->lock_object, class, name, type, flags);
1190 m->mtx_lock = MTX_UNOWNED;
1191 m->mtx_recurse = 0;
1195 * Remove lock `m' from all_mtx queue. We don't allow MTX_QUIET to be
1203 struct mtx *m;
1205 m = mtxlock2mtx(c);
1207 if (!mtx_owned(m))
1208 MPASS(mtx_unowned(m));
1210 MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0);
1213 if (LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin) {
1214 lock_profile_release_lock(&m->lock_object, true);
1218 lock_profile_release_lock(&m->lock_object, false);
1222 WITNESS_UNLOCK(&m->lock_object, LOP_EXCLUSIVE, __FILE__,
1226 m->mtx_lock = MTX_DESTROYED;
1227 lock_destroy(&m->lock_object);
1258 _mtx_lock_indefinite_check(struct mtx *m, struct lock_delay_arg *ldap)
1266 td = mtx_owner(m);
1273 m, m->lock_object.lo_name, td, td->td_tid);
1275 witness_display_spinlock(&m->lock_object, td, printf);
1283 mtx_spin_wait_unlocked(struct mtx *m)
1287 KASSERT(m->mtx_lock != MTX_DESTROYED,
1288 ("%s() of destroyed mutex %p", __func__, m));
1289 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin,
1290 ("%s() of sleep mutex %p (%s)", __func__, m,
1291 m->lock_object.lo_name));
1292 KASSERT(!mtx_owned(m), ("%s() waiting on myself on lock %p (%s)", __func__, m,
1293 m->lock_object.lo_name));
1297 while (atomic_load_acq_ptr(&m->mtx_lock) != MTX_UNOWNED) {
1302 _mtx_lock_indefinite_check(m, &lda);
1308 mtx_wait_unlocked(struct mtx *m)
1313 KASSERT(m->mtx_lock != MTX_DESTROYED,
1314 ("%s() of destroyed mutex %p", __func__, m));
1315 KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep,
1316 ("%s() not a sleep mutex %p (%s)", __func__, m,
1317 m->lock_object.lo_name));
1318 KASSERT(!mtx_owned(m), ("%s() waiting on myself on lock %p (%s)", __func__, m,
1319 m->lock_object.lo_name));
1322 v = atomic_load_acq_ptr(&m->mtx_lock);
1328 mtx_lock(m);
1329 mtx_unlock(m);
1341 const struct mtx *m;
1343 m = (const struct mtx *)lock;
1350 if (m->lock_object.lo_flags & LO_RECURSABLE)
1352 if (m->lock_object.lo_flags & LO_DUPOK)
1356 if (mtx_unowned(m))
1358 else if (mtx_destroyed(m))
1362 if (m->mtx_lock & MTX_CONTESTED)
1364 if (m->mtx_lock & MTX_RECURSED)
1368 if (!mtx_unowned(m) && !mtx_destroyed(m)) {
1369 td = mtx_owner(m);
1372 if (mtx_recursed(m))
1373 db_printf(" recursed: %d\n", m->mtx_recurse);