Lines Matching full:m
81 static int mutex_qidx(struct pthread_mutex *m);
82 static bool is_robust_mutex(struct pthread_mutex *m);
83 static bool is_pshared_mutex(struct pthread_mutex *m);
120 mutex_init_link(struct pthread_mutex *m __unused)
124 m->m_qe.tqe_prev = NULL;
125 m->m_qe.tqe_next = NULL;
126 m->m_pqe.tqe_prev = NULL;
127 m->m_pqe.tqe_next = NULL;
132 mutex_assert_is_owned(struct pthread_mutex *m __unused)
136 if (__predict_false(m->m_qe.tqe_prev == NULL))
138 m, m->m_lock.m_owner, m->m_qe.tqe_prev, m->m_qe.tqe_next);
144 struct pthread_mutex *m __unused)
148 if (__predict_false(m->m_qe.tqe_prev != NULL ||
149 m->m_qe.tqe_next != NULL))
151 m, m->m_lock.m_owner, m->m_qe.tqe_prev, m->m_qe.tqe_next);
152 if (__predict_false(is_robust_mutex(m) &&
153 (m->m_lock.m_rb_lnk != 0 || m->m_rb_prev != NULL ||
154 (is_pshared_mutex(m) && curthread->robust_list ==
155 (uintptr_t)&m->m_lock) ||
156 (!is_pshared_mutex(m) && curthread->priv_robust_list ==
157 (uintptr_t)&m->m_lock))))
160 m, m->m_lock.m_owner, (void *)m->m_lock.m_rb_lnk,
161 m->m_rb_prev, (void *)curthread->robust_list,
167 is_pshared_mutex(struct pthread_mutex *m)
170 return ((m->m_lock.m_flags & USYNC_PROCESS_SHARED) != 0);
174 is_robust_mutex(struct pthread_mutex *m)
177 return ((m->m_lock.m_flags & UMUTEX_ROBUST) != 0);
181 _mutex_enter_robust(struct pthread *curthread, struct pthread_mutex *m)
188 if (!is_robust_mutex(m))
192 curthread->inact_mtx = (uintptr_t)&m->m_lock;
197 _mutex_leave_robust(struct pthread *curthread, struct pthread_mutex *m __unused)
201 if (__predict_false(curthread->inact_mtx != (uintptr_t)&m->m_lock))
320 set_inherited_priority(struct pthread *curthread, struct pthread_mutex *m)
324 m2 = TAILQ_LAST(&curthread->mq[mutex_qidx(m)], mutex_queue);
326 m->m_lock.m_ceilings[1] = m2->m_lock.m_ceilings[0];
328 m->m_lock.m_ceilings[1] = -1;
439 struct pthread_mutex *m;
442 TAILQ_FOREACH(m, qp, m_pqe) {
443 TAILQ_INSERT_TAIL(q, m, m_qe);
444 m->m_lock.m_owner = TID(curthread) | bit;
464 pthread_mutex_t m, m1;
467 m = *mutex;
468 if (m < THR_MUTEX_DESTROYED) {
470 } else if (m == THR_MUTEX_DESTROYED) {
473 if (m == THR_PSHARED_PTR) {
486 if (PMUTEX_OWNER_ID(m) != 0 &&
487 (uint32_t)m->m_lock.m_owner != UMUTEX_RB_NOTRECOV) {
491 mutex_assert_not_owned(_get_curthread(), m);
492 __thr_free(m);
501 mutex_qidx(struct pthread_mutex *m)
504 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
506 return (is_robust_mutex(m) ? TMQ_ROBUST_PP : TMQ_NORM_PP);
522 enqueue_mutex(struct pthread *curthread, struct pthread_mutex *m,
531 mutex_assert_not_owned(curthread, m);
532 qidx = mutex_qidx(m);
533 TAILQ_INSERT_TAIL(&curthread->mq[qidx], m, m_qe);
534 if (!is_pshared_mutex(m))
535 TAILQ_INSERT_TAIL(&curthread->mq[qidx + 1], m, m_pqe);
536 if (is_robust_mutex(m)) {
537 rl = is_pshared_mutex(m) ? &curthread->robust_list :
539 m->m_rb_prev = NULL;
543 m->m_lock.m_rb_lnk = (uintptr_t)&m1->m_lock;
544 m1->m_rb_prev = m;
547 m->m_lock.m_rb_lnk = 0;
549 *rl = (uintptr_t)&m->m_lock;
554 dequeue_mutex(struct pthread *curthread, struct pthread_mutex *m)
559 mutex_assert_is_owned(m);
560 qidx = mutex_qidx(m);
561 if (is_robust_mutex(m)) {
562 mp = m->m_rb_prev;
564 if (is_pshared_mutex(m)) {
565 curthread->robust_list = m->m_lock.m_rb_lnk;
568 m->m_lock.m_rb_lnk;
571 mp->m_lock.m_rb_lnk = m->m_lock.m_rb_lnk;
573 if (m->m_lock.m_rb_lnk != 0) {
574 mn = __containerof((void *)m->m_lock.m_rb_lnk,
576 mn->m_rb_prev = m->m_rb_prev;
578 m->m_lock.m_rb_lnk = 0;
579 m->m_rb_prev = NULL;
581 TAILQ_REMOVE(&curthread->mq[qidx], m, m_qe);
582 if (!is_pshared_mutex(m))
583 TAILQ_REMOVE(&curthread->mq[qidx + 1], m, m_pqe);
584 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) != 0)
585 set_inherited_priority(curthread, m);
586 mutex_init_link(m);
590 check_and_init_mutex(pthread_mutex_t *mutex, struct pthread_mutex **m)
594 *m = *mutex;
596 if (__predict_false(*m == THR_PSHARED_PTR)) {
597 *m = __thr_pshared_offpage(mutex, 0);
598 if (*m == NULL)
601 shared_mutex_init(*m, NULL);
602 } else if (__predict_false(*m <= THR_MUTEX_DESTROYED)) {
603 if (*m == THR_MUTEX_DESTROYED) {
608 *m = *mutex;
618 struct pthread_mutex *m;
623 ret = check_and_init_mutex(mutex, &m);
628 if (m->m_flags & PMUTEX_FLAG_PRIVATE)
630 robust = _mutex_enter_robust(curthread, m);
631 ret = _thr_umutex_trylock(&m->m_lock, id);
633 enqueue_mutex(curthread, m, ret);
635 m->m_lock.m_flags |= UMUTEX_NONCONSISTENT;
636 } else if (PMUTEX_OWNER_ID(m) == id) {
637 ret = mutex_self_trylock(m);
640 _mutex_leave_robust(curthread, m);
642 (m->m_flags & PMUTEX_FLAG_PRIVATE) != 0)
648 mutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m,
655 if (PMUTEX_OWNER_ID(m) == id)
656 return (mutex_self_lock(m, abstime));
664 if (__predict_false((m->m_lock.m_flags & (UMUTEX_PRIO_PROTECT |
671 count = m->m_spinloops;
673 owner = m->m_lock.m_owner;
675 if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner,
685 count = m->m_yieldloops;
688 owner = m->m_lock.m_owner;
690 if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner,
700 ret = __thr_umutex_lock(&m->m_lock, id);
705 ret = __thr_umutex_timedlock(&m->m_lock, id, abstime);
708 enqueue_mutex(curthread, m, ret);
710 m->m_lock.m_flags |= UMUTEX_NONCONSISTENT;
716 mutex_lock_common(struct pthread_mutex *m, const struct timespec *abstime,
724 if (!cvattach && m->m_flags & PMUTEX_FLAG_PRIVATE)
727 robust = _mutex_enter_robust(curthread, m);
728 ret = _thr_umutex_trylock2(&m->m_lock, TID(curthread));
730 enqueue_mutex(curthread, m, ret);
732 m->m_lock.m_flags |= UMUTEX_NONCONSISTENT;
734 ret = mutex_lock_sleep(curthread, m, abstime);
737 _mutex_leave_robust(curthread, m);
739 (m->m_flags & PMUTEX_FLAG_PRIVATE) != 0 && !cvattach)
747 struct pthread_mutex *m;
751 ret = check_and_init_mutex(mutex, &m);
753 ret = mutex_lock_common(m, NULL, false, false);
761 struct pthread_mutex *m;
765 ret = check_and_init_mutex(mutex, &m);
767 ret = mutex_lock_common(m, abstime, false, false);
788 _mutex_cv_lock(struct pthread_mutex *m, int count, bool rb_onlist)
792 error = mutex_lock_common(m, NULL, true, rb_onlist);
794 m->m_count = count;
799 _mutex_cv_unlock(struct pthread_mutex *m, int *count, int *defer)
805 *count = m->m_count;
806 m->m_count = 0;
807 (void)mutex_unlock_common(m, true, defer);
812 _mutex_cv_attach(struct pthread_mutex *m, int count)
817 enqueue_mutex(curthread, m, 0);
818 m->m_count = count;
855 mutex_self_trylock(struct pthread_mutex *m)
859 switch (PMUTEX_TYPE(m->m_flags)) {
868 if (m->m_count + 1 > 0) {
869 m->m_count++;
884 mutex_self_lock(struct pthread_mutex *m, const struct timespec *abstime)
889 switch (PMUTEX_TYPE(m->m_flags)) {
937 if (m->m_count + 1 > 0) {
938 m->m_count++;
953 mutex_unlock_common(struct pthread_mutex *m, bool cv, int *mtx_defer)
959 if (__predict_false(m <= THR_MUTEX_DESTROYED)) {
960 if (m == THR_MUTEX_DESTROYED)
971 if (__predict_false(PMUTEX_OWNER_ID(m) != id))
975 private = (m->m_flags & PMUTEX_FLAG_PRIVATE) != 0;
976 if (__predict_false(PMUTEX_TYPE(m->m_flags) ==
977 PTHREAD_MUTEX_RECURSIVE && m->m_count > 0)) {
978 m->m_count--;
980 if ((m->m_flags & PMUTEX_FLAG_DEFERRED) != 0) {
982 m->m_flags &= ~PMUTEX_FLAG_DEFERRED;
986 robust = _mutex_enter_robust(curthread, m);
987 dequeue_mutex(curthread, m);
988 error = _thr_umutex_unlock2(&m->m_lock, id, mtx_defer);
998 _mutex_leave_robust(curthread, m);
1009 struct pthread_mutex *m;
1012 m = __thr_pshared_offpage(__DECONST(void *, mutex), 0);
1013 if (m == NULL)
1015 shared_mutex_init(m, NULL);
1017 m = *mutex;
1018 if (m <= THR_MUTEX_DESTROYED)
1021 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
1023 *prioceiling = m->m_lock.m_ceilings[0];
1032 struct pthread_mutex *m, *m1, *m2;
1037 m = __thr_pshared_offpage(mutex, 0);
1038 if (m == NULL)
1040 shared_mutex_init(m, NULL);
1042 m = *mutex;
1043 if (m <= THR_MUTEX_DESTROYED)
1046 if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
1049 ret = __thr_umutex_set_ceiling(&m->m_lock, ceiling, old_ceiling);
1054 if (PMUTEX_OWNER_ID(m) == TID(curthread)) {
1055 mutex_assert_is_owned(m);
1056 m1 = TAILQ_PREV(m, mutex_queue, m_qe);
1057 m2 = TAILQ_NEXT(m, m_qe);
1060 qidx = mutex_qidx(m);
1063 TAILQ_REMOVE(q, m, m_qe);
1064 if (!is_pshared_mutex(m))
1065 TAILQ_REMOVE(qp, m, m_pqe);
1068 TAILQ_INSERT_BEFORE(m2, m, m_qe);
1069 if (!is_pshared_mutex(m)) {
1077 m, m_pqe);
1080 m, m_pqe);
1086 TAILQ_INSERT_TAIL(q, m, m_qe);
1087 if (!is_pshared_mutex(m))
1088 TAILQ_INSERT_TAIL(qp, m, m_pqe);
1097 struct pthread_mutex *m;
1100 ret = check_and_init_mutex(mutex, &m);
1102 *count = m->m_spinloops;
1109 struct pthread_mutex *m;
1112 ret = check_and_init_mutex(mutex, &m);
1114 m->m_spinloops = count;
1121 struct pthread_mutex *m;
1124 ret = check_and_init_mutex(mutex, &m);
1126 *count = m->m_yieldloops;
1133 struct pthread_mutex *m;
1136 ret = check_and_init_mutex(mutex, &m);
1138 m->m_yieldloops = count;
1145 struct pthread_mutex *m;
1148 m = __thr_pshared_offpage(mutex, 0);
1149 if (m == NULL)
1151 shared_mutex_init(m, NULL);
1153 m = *mutex;
1154 if (m <= THR_MUTEX_DESTROYED)
1157 return (PMUTEX_OWNER_ID(m) == TID(_get_curthread()));
1177 struct pthread_mutex *m;
1181 m = __thr_pshared_offpage(mutex, 0);
1182 if (m == NULL)
1184 shared_mutex_init(m, NULL);
1186 m = *mutex;
1187 if (m <= THR_MUTEX_DESTROYED)
1191 if ((m->m_lock.m_flags & (UMUTEX_ROBUST | UMUTEX_NONCONSISTENT)) !=
1194 if (PMUTEX_OWNER_ID(m) != TID(curthread))
1196 m->m_lock.m_flags &= ~UMUTEX_NONCONSISTENT;