Lines Matching defs:mutex

65 int	__pthread_mutex_timedlock(pthread_mutex_t * __restrict mutex,
67 int _pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count);
68 int _pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count);
69 int __pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count);
70 int _pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count);
71 int _pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count);
72 int __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count);
137 PANIC("mutex %p own %#x is not on list %p %p",
150 PANIC("mutex %p own %#x is on list %p %p",
159 "mutex %p own %#x is on robust linkage %p %p head %p phead %p",
275 mutex_init(pthread_mutex_t *mutex,
295 *mutex = pmutex;
300 init_static(struct pthread *thread, pthread_mutex_t *mutex)
306 if (*mutex == THR_MUTEX_INITIALIZER)
307 ret = mutex_init(mutex, &_pthread_mutexattr_default,
309 else if (*mutex == THR_ADAPTIVE_MUTEX_INITIALIZER)
310 ret = mutex_init(mutex, &_pthread_mutexattr_adaptive_default,
346 * same process-shared mutex. We rely on kernel allocating
347 * zeroed offpage for the mutex, i.e. the
378 __Tthr_mutex_init(pthread_mutex_t * __restrict mutex,
394 return (mutex_init(mutex, mutex_attr ? *mutex_attr : NULL,
397 pmtx = __thr_pshared_offpage(__DECONST(void *, mutex), 1);
400 *mutex = THR_PSHARED_PTR;
407 _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
419 ret = mutex_init(mutex, &attr, calloc_cb);
421 (*mutex)->m_flags |= PMUTEX_FLAG_PRIVATE;
426 * Fix mutex ownership for child process.
428 * Process private mutex ownership is transmitted from the forking
431 * Process shared mutex should not be inherited because owner is
433 * the owned mutex list.
462 _thr_mutex_destroy(pthread_mutex_t *mutex)
467 m = *mutex;
474 m1 = __thr_pshared_offpage(mutex, 0);
481 __thr_pshared_destroy(mutex);
483 *mutex = THR_MUTEX_DESTROYED;
490 *mutex = THR_MUTEX_DESTROYED;
590 check_and_init_mutex(pthread_mutex_t *mutex, struct pthread_mutex **m)
594 *m = *mutex;
597 *m = __thr_pshared_offpage(mutex, 0);
606 ret = init_static(_get_curthread(), mutex);
608 *m = *mutex;
615 __Tthr_mutex_trylock(pthread_mutex_t *mutex)
623 ret = check_and_init_mutex(mutex, &m);
660 * that if the application requests this mutex type then
745 __Tthr_mutex_lock(pthread_mutex_t *mutex)
751 ret = check_and_init_mutex(mutex, &m);
758 __pthread_mutex_timedlock(pthread_mutex_t * __restrict mutex,
765 ret = check_and_init_mutex(mutex, &m);
772 _thr_mutex_unlock(pthread_mutex_t *mutex)
776 if (*mutex == THR_PSHARED_PTR) {
777 mp = __thr_pshared_offpage(mutex, 0);
782 mp = *mutex;
803 * Clear the count in case this is a recursive mutex.
833 * Clear the count in case this is a recursive mutex.
876 /* Trap invalid mutex types; */
913 * What SS2 define as a 'normal' mutex. Intentionally
945 /* Trap invalid mutex types; */
969 * Check if the running thread is not the owner of the mutex.
1006 _pthread_mutex_getprioceiling(const pthread_mutex_t * __restrict mutex,
1011 if (*mutex == THR_PSHARED_PTR) {
1012 m = __thr_pshared_offpage(__DECONST(void *, mutex), 0);
1017 m = *mutex;
1028 _pthread_mutex_setprioceiling(pthread_mutex_t * __restrict mutex,
1036 if (*mutex == THR_PSHARED_PTR) {
1037 m = __thr_pshared_offpage(mutex, 0);
1042 m = *mutex;
1095 _pthread_mutex_getspinloops_np(pthread_mutex_t *mutex, int *count)
1100 ret = check_and_init_mutex(mutex, &m);
1107 __pthread_mutex_setspinloops_np(pthread_mutex_t *mutex, int count)
1112 ret = check_and_init_mutex(mutex, &m);
1119 _pthread_mutex_getyieldloops_np(pthread_mutex_t *mutex, int *count)
1124 ret = check_and_init_mutex(mutex, &m);
1131 __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count)
1136 ret = check_and_init_mutex(mutex, &m);
1143 _pthread_mutex_isowned_np(pthread_mutex_t *mutex)
1147 if (*mutex == THR_PSHARED_PTR) {
1148 m = __thr_pshared_offpage(mutex, 0);
1153 m = *mutex;
1175 _Tthr_mutex_consistent(pthread_mutex_t *mutex)
1180 if (*mutex == THR_PSHARED_PTR) {
1181 m = __thr_pshared_offpage(mutex, 0);
1186 m = *mutex;