xref: /dflybsd-src/lib/libthread_xu/thread/thr_mutex.c (revision d4b0d6715086a6d05c21e8e4dd1cfd5da335cf79)
1 /*
2  * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
3  * Copyright (c) 2006 David Xu <yfxu@corp.netease.com>.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  * 3. All advertising materials mentioning features or use of this software
15  *    must display the following acknowledgement:
16  *	This product includes software developed by John Birrell.
17  * 4. Neither the name of the author nor the names of any co-contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  */
34 
35 #include "namespace.h"
36 #include <machine/tls.h>
37 
38 #include <errno.h>
39 #include <stdlib.h>
40 #include <string.h>
41 #include <sys/queue.h>
42 #include <pthread.h>
43 #include "un-namespace.h"
44 
45 #include "thr_private.h"
46 
47 #if defined(_PTHREADS_INVARIANTS)
48 #define MUTEX_INIT_LINK(m) 		do {		\
49 	(m)->m_qe.tqe_prev = NULL;			\
50 	(m)->m_qe.tqe_next = NULL;			\
51 } while (0)
52 #define MUTEX_ASSERT_IS_OWNED(m)	do {		\
53 	if ((m)->m_qe.tqe_prev == NULL)			\
54 		PANIC("mutex is not on list");		\
55 } while (0)
56 #define MUTEX_ASSERT_NOT_OWNED(m)	do {		\
57 	if (((m)->m_qe.tqe_prev != NULL) ||		\
58 	    ((m)->m_qe.tqe_next != NULL))		\
59 		PANIC("mutex is on list");		\
60 } while (0)
61 #define	THR_ASSERT_NOT_IN_SYNCQ(thr)	do {		\
62 	THR_ASSERT(((thr)->sflags & THR_FLAGS_IN_SYNCQ) == 0, \
63 	    "thread in syncq when it shouldn't be.");	\
64 } while (0);
65 #else
66 #define MUTEX_INIT_LINK(m)
67 #define MUTEX_ASSERT_IS_OWNED(m)
68 #define MUTEX_ASSERT_NOT_OWNED(m)
69 #define	THR_ASSERT_NOT_IN_SYNCQ(thr)
70 #endif
71 
72 #define THR_IN_MUTEXQ(thr)	(((thr)->sflags & THR_FLAGS_IN_SYNCQ) != 0)
73 #define	MUTEX_DESTROY(m) do {		\
74 	free(m);			\
75 } while (0)
76 
77 umtx_t	_mutex_static_lock;
78 
79 /*
80  * Prototypes
81  */
82 static int	mutex_self_trylock(pthread_mutex_t);
83 static int	mutex_self_lock(pthread_mutex_t,
84 			const struct timespec *abstime);
85 static int	mutex_unlock_common(pthread_mutex_t *);
86 
87 int __pthread_mutex_init(pthread_mutex_t *mutex,
88 	const pthread_mutexattr_t *mutex_attr);
89 int __pthread_mutex_trylock(pthread_mutex_t *mutex);
90 int __pthread_mutex_lock(pthread_mutex_t *mutex);
91 int __pthread_mutex_timedlock(pthread_mutex_t *mutex,
92 	const struct timespec *abs_timeout);
93 
94 static int
95 mutex_init(pthread_mutex_t *mutex,
96     const pthread_mutexattr_t *mutex_attr, int private)
97 {
98 	const struct pthread_mutex_attr *attr;
99 	struct pthread_mutex *pmutex;
100 
101 	if (mutex_attr == NULL) {
102 		attr = &_pthread_mutexattr_default;
103 	} else {
104 		attr = *mutex_attr;
105 		if (attr->m_type < PTHREAD_MUTEX_ERRORCHECK ||
106 		    attr->m_type >= PTHREAD_MUTEX_TYPE_MAX)
107 			return (EINVAL);
108 		if (attr->m_protocol < PTHREAD_PRIO_NONE ||
109 		    attr->m_protocol > PTHREAD_PRIO_PROTECT)
110 			return (EINVAL);
111 	}
112 
113 	if ((pmutex = (pthread_mutex_t)
114 		malloc(sizeof(struct pthread_mutex))) == NULL)
115 		return (ENOMEM);
116 
117 	_thr_umtx_init(&pmutex->m_lock);
118 	pmutex->m_type = attr->m_type;
119 	pmutex->m_protocol = attr->m_protocol;
120 	TAILQ_INIT(&pmutex->m_queue);
121 	pmutex->m_owner = NULL;
122 	pmutex->m_flags = attr->m_flags | MUTEX_FLAGS_INITED;
123 	if (private)
124 		pmutex->m_flags |= MUTEX_FLAGS_PRIVATE;
125 	pmutex->m_count = 0;
126 	pmutex->m_refcount = 0;
127 	if (attr->m_protocol == PTHREAD_PRIO_PROTECT)
128 		pmutex->m_prio = attr->m_ceiling;
129 	else
130 		pmutex->m_prio = -1;
131 	pmutex->m_saved_prio = 0;
132 	MUTEX_INIT_LINK(pmutex);
133 	*mutex = pmutex;
134 	return (0);
135 }
136 
137 static int
138 init_static(struct pthread *thread, pthread_mutex_t *mutex)
139 {
140 	int ret;
141 
142 	THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
143 
144 	if (*mutex == NULL)
145 		ret = mutex_init(mutex, NULL, 0);
146 	else
147 		ret = 0;
148 
149 	THR_LOCK_RELEASE(thread, &_mutex_static_lock);
150 
151 	return (ret);
152 }
153 
154 static int
155 init_static_private(struct pthread *thread, pthread_mutex_t *mutex)
156 {
157 	int ret;
158 
159 	THR_LOCK_ACQUIRE(thread, &_mutex_static_lock);
160 
161 	if (*mutex == NULL)
162 		ret = mutex_init(mutex, NULL, 1);
163 	else
164 		ret = 0;
165 
166 	THR_LOCK_RELEASE(thread, &_mutex_static_lock);
167 
168 	return (ret);
169 }
170 
171 int
172 _pthread_mutex_init(pthread_mutex_t *mutex,
173     const pthread_mutexattr_t *mutex_attr)
174 {
175 	return mutex_init(mutex, mutex_attr, 1);
176 }
177 
178 int
179 __pthread_mutex_init(pthread_mutex_t *mutex,
180     const pthread_mutexattr_t *mutex_attr)
181 {
182 	return mutex_init(mutex, mutex_attr, 0);
183 }
184 
185 int
186 _mutex_reinit(pthread_mutex_t *mutex)
187 {
188 	_thr_umtx_init(&(*mutex)->m_lock);
189 	TAILQ_INIT(&(*mutex)->m_queue);
190 	MUTEX_INIT_LINK(*mutex);
191 	(*mutex)->m_owner = NULL;
192 	(*mutex)->m_count = 0;
193 	(*mutex)->m_refcount = 0;
194 	(*mutex)->m_prio = 0;
195 	(*mutex)->m_saved_prio = 0;
196 	return (0);
197 }
198 
199 void
200 _mutex_fork(struct pthread *curthread)
201 {
202 	struct pthread_mutex *m;
203 
204 	TAILQ_FOREACH(m, &curthread->mutexq, m_qe)
205 		m->m_lock = UMTX_LOCKED;
206 }
207 
208 int
209 _pthread_mutex_destroy(pthread_mutex_t *mutex)
210 {
211 	struct pthread *curthread = tls_get_curthread();
212 	pthread_mutex_t m;
213 	int ret = 0;
214 
215 	if (mutex == NULL)
216 		ret = EINVAL;
217 	else if (*mutex == NULL)
218 		ret = 0;
219 	else {
220 		/*
221 		 * Try to lock the mutex structure, we only need to
222 		 * try once, if failed, the mutex is in used.
223 		 */
224 		ret = THR_UMTX_TRYLOCK(curthread, &(*mutex)->m_lock);
225 		if (ret)
226 			return (ret);
227 
228 		/*
229 		 * Check mutex other fields to see if this mutex is
230 		 * in use. Mostly for prority mutex types, or there
231 		 * are condition variables referencing it.
232 		 */
233 		if (((*mutex)->m_owner != NULL) ||
234 		    (TAILQ_FIRST(&(*mutex)->m_queue) != NULL) ||
235 		    ((*mutex)->m_refcount != 0)) {
236 			THR_UMTX_UNLOCK(curthread, &(*mutex)->m_lock);
237 			ret = EBUSY;
238 		} else {
239 			/*
240 			 * Save a pointer to the mutex so it can be free'd
241 			 * and set the caller's pointer to NULL:
242 			 */
243 			m = *mutex;
244 			*mutex = NULL;
245 
246 			/* Unlock the mutex structure: */
247 			THR_UMTX_UNLOCK(curthread, &m->m_lock);
248 
249 			/*
250 			 * Free the memory allocated for the mutex
251 			 * structure:
252 			 */
253 			MUTEX_ASSERT_NOT_OWNED(m);
254 			MUTEX_DESTROY(m);
255 		}
256 	}
257 
258 	/* Return the completion status: */
259 	return (ret);
260 }
261 
262 static int
263 mutex_trylock_common(struct pthread *curthread, pthread_mutex_t *mutex)
264 {
265 	struct pthread_mutex *m;
266 	int ret;
267 
268 	m = *mutex;
269 	ret = THR_UMTX_TRYLOCK(curthread, &m->m_lock);
270 	if (ret == 0) {
271 		m->m_owner = curthread;
272 		/* Add to the list of owned mutexes: */
273 		MUTEX_ASSERT_NOT_OWNED(m);
274 		TAILQ_INSERT_TAIL(&curthread->mutexq,
275 		    m, m_qe);
276 	} else if (m->m_owner == curthread) {
277 		ret = mutex_self_trylock(m);
278 	} /* else {} */
279 
280 	return (ret);
281 }
282 
283 int
284 __pthread_mutex_trylock(pthread_mutex_t *m)
285 {
286 	struct pthread *curthread = tls_get_curthread();
287 	int ret;
288 
289 	if (__predict_false(m == NULL))
290 		return(EINVAL);
291 	/*
292 	 * If the mutex is statically initialized, perform the dynamic
293 	 * initialization:
294 	 */
295 	if (__predict_false(*m == NULL)) {
296 		ret = init_static(curthread, m);
297 		if (__predict_false(ret != 0))
298 			return (ret);
299 	}
300 	return (mutex_trylock_common(curthread, m));
301 }
302 
303 int
304 _pthread_mutex_trylock(pthread_mutex_t *m)
305 {
306 	struct pthread	*curthread = tls_get_curthread();
307 	int	ret = 0;
308 
309 	/*
310 	 * If the mutex is statically initialized, perform the dynamic
311 	 * initialization marking the mutex private (delete safe):
312 	 */
313 	if (__predict_false(*m == NULL)) {
314 		ret = init_static_private(curthread, m);
315 		if (__predict_false(ret != 0))
316 			return (ret);
317 	}
318 	return (mutex_trylock_common(curthread, m));
319 }
320 
321 static int
322 mutex_lock_common(struct pthread *curthread, pthread_mutex_t *mutex,
323 	const struct timespec * abstime)
324 {
325 	struct  timespec ts, ts2;
326 	struct  pthread_mutex *m;
327 	int	ret = 0;
328 
329 	m = *mutex;
330 	ret = THR_UMTX_TRYLOCK(curthread, &m->m_lock);
331 	if (ret == 0) {
332 		m->m_owner = curthread;
333 		/* Add to the list of owned mutexes: */
334 		MUTEX_ASSERT_NOT_OWNED(m);
335 		TAILQ_INSERT_TAIL(&curthread->mutexq,
336 		    m, m_qe);
337 	} else if (m->m_owner == curthread) {
338 		ret = mutex_self_lock(m, abstime);
339 	} else {
340 		if (abstime == NULL) {
341 			THR_UMTX_LOCK(curthread, &m->m_lock);
342 			ret = 0;
343 		} else if (__predict_false(
344 			abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
345 			abstime->tv_nsec >= 1000000000)) {
346 				ret = EINVAL;
347 		} else {
348 			clock_gettime(CLOCK_REALTIME, &ts);
349 			TIMESPEC_SUB(&ts2, abstime, &ts);
350 			ret = THR_UMTX_TIMEDLOCK(curthread,
351 				&m->m_lock, &ts2);
352 			/*
353 			 * Timed out wait is not restarted if
354 			 * it was interrupted, not worth to do it.
355 			 */
356 			if (ret == EINTR)
357 				ret = ETIMEDOUT;
358 		}
359 		if (ret == 0) {
360 			m->m_owner = curthread;
361 			/* Add to the list of owned mutexes: */
362 			MUTEX_ASSERT_NOT_OWNED(m);
363 			TAILQ_INSERT_TAIL(&curthread->mutexq,
364 			    m, m_qe);
365 		}
366 	}
367 	return (ret);
368 }
369 
370 int
371 __pthread_mutex_lock(pthread_mutex_t *m)
372 {
373 	struct pthread *curthread;
374 	int	ret;
375 
376 	if (__predict_false(m == NULL))
377 		return(EINVAL);
378 
379 	/*
380 	 * If the mutex is statically initialized, perform the dynamic
381 	 * initialization:
382 	 */
383 	curthread = tls_get_curthread();
384 	if (__predict_false(*m == NULL)) {
385 		ret = init_static(curthread, m);
386 		if (__predict_false(ret))
387 			return (ret);
388 	}
389 	return (mutex_lock_common(curthread, m, NULL));
390 }
391 
392 int
393 _pthread_mutex_lock(pthread_mutex_t *m)
394 {
395 	struct pthread *curthread;
396 	int	ret;
397 
398 	if (__predict_false(m == NULL))
399 		return(EINVAL);
400 
401 	/*
402 	 * If the mutex is statically initialized, perform the dynamic
403 	 * initialization marking it private (delete safe):
404 	 */
405 	curthread = tls_get_curthread();
406 	if (__predict_false(*m == NULL)) {
407 		ret = init_static_private(curthread, m);
408 		if (__predict_false(ret))
409 			return (ret);
410 	}
411 	return (mutex_lock_common(curthread, m, NULL));
412 }
413 
414 int
415 __pthread_mutex_timedlock(pthread_mutex_t *m,
416 	const struct timespec *abs_timeout)
417 {
418 	struct pthread *curthread;
419 	int	ret;
420 
421 	if (__predict_false(m == NULL))
422 		return(EINVAL);
423 
424 	/*
425 	 * If the mutex is statically initialized, perform the dynamic
426 	 * initialization:
427 	 */
428 	curthread = tls_get_curthread();
429 	if (__predict_false(*m == NULL)) {
430 		ret = init_static(curthread, m);
431 		if (__predict_false(ret))
432 			return (ret);
433 	}
434 	return (mutex_lock_common(curthread, m, abs_timeout));
435 }
436 
437 int
438 _pthread_mutex_timedlock(pthread_mutex_t *m,
439 	const struct timespec *abs_timeout)
440 {
441 	struct pthread *curthread;
442 	int	ret;
443 
444 	if (__predict_false(m == NULL))
445 		return(EINVAL);
446 
447 	curthread = tls_get_curthread();
448 
449 	/*
450 	 * If the mutex is statically initialized, perform the dynamic
451 	 * initialization marking it private (delete safe):
452 	 */
453 	if (__predict_false(*m == NULL)) {
454 		ret = init_static_private(curthread, m);
455 		if (__predict_false(ret))
456 			return (ret);
457 	}
458 	return (mutex_lock_common(curthread, m, abs_timeout));
459 }
460 
461 int
462 _pthread_mutex_unlock(pthread_mutex_t *m)
463 {
464 	if (__predict_false(m == NULL))
465 		return(EINVAL);
466 	return (mutex_unlock_common(m));
467 }
468 
469 static int
470 mutex_self_trylock(pthread_mutex_t m)
471 {
472 	int	ret;
473 
474 	switch (m->m_type) {
475 	/* case PTHREAD_MUTEX_DEFAULT: */
476 	case PTHREAD_MUTEX_ERRORCHECK:
477 	case PTHREAD_MUTEX_NORMAL:
478 		ret = EBUSY;
479 		break;
480 
481 	case PTHREAD_MUTEX_RECURSIVE:
482 		/* Increment the lock count: */
483 		if (m->m_count + 1 > 0) {
484 			m->m_count++;
485 			ret = 0;
486 		} else
487 			ret = EAGAIN;
488 		break;
489 
490 	default:
491 		/* Trap invalid mutex types; */
492 		ret = EINVAL;
493 	}
494 
495 	return (ret);
496 }
497 
498 static int
499 mutex_self_lock(pthread_mutex_t m, const struct timespec *abstime)
500 {
501 	struct timespec ts1, ts2;
502 	int ret;
503 
504 	switch (m->m_type) {
505 	/* case PTHREAD_MUTEX_DEFAULT: */
506 	case PTHREAD_MUTEX_ERRORCHECK:
507 		if (abstime) {
508 			clock_gettime(CLOCK_REALTIME, &ts1);
509 			TIMESPEC_SUB(&ts2, abstime, &ts1);
510 			__sys_nanosleep(&ts2, NULL);
511 			ret = ETIMEDOUT;
512 		} else {
513 			/*
514 			 * POSIX specifies that mutexes should return
515 			 * EDEADLK if a recursive lock is detected.
516 			 */
517 			ret = EDEADLK;
518 		}
519 		break;
520 
521 	case PTHREAD_MUTEX_NORMAL:
522 		/*
523 		 * What SS2 define as a 'normal' mutex.  Intentionally
524 		 * deadlock on attempts to get a lock you already own.
525 		 */
526 		ret = 0;
527 		if (abstime) {
528 			clock_gettime(CLOCK_REALTIME, &ts1);
529 			TIMESPEC_SUB(&ts2, abstime, &ts1);
530 			__sys_nanosleep(&ts2, NULL);
531 			ret = ETIMEDOUT;
532 		} else {
533 			ts1.tv_sec = 30;
534 			ts1.tv_nsec = 0;
535 			for (;;)
536 				__sys_nanosleep(&ts1, NULL);
537 		}
538 		break;
539 
540 	case PTHREAD_MUTEX_RECURSIVE:
541 		/* Increment the lock count: */
542 		if (m->m_count + 1 > 0) {
543 			m->m_count++;
544 			ret = 0;
545 		} else
546 			ret = EAGAIN;
547 		break;
548 
549 	default:
550 		/* Trap invalid mutex types; */
551 		ret = EINVAL;
552 	}
553 
554 	return (ret);
555 }
556 
557 static int
558 mutex_unlock_common(pthread_mutex_t *mutex)
559 {
560 	struct pthread *curthread = tls_get_curthread();
561 	struct pthread_mutex *m;
562 
563 	if (__predict_false((m = *mutex)== NULL))
564 		return (EINVAL);
565 	if (__predict_false(m->m_owner != curthread))
566 		return (EPERM);
567 
568 	if (__predict_false(
569 		m->m_type == PTHREAD_MUTEX_RECURSIVE &&
570 		m->m_count > 0)) {
571 		m->m_count--;
572 	} else {
573 		/*
574 		 * Clear the count in case this is a recursive mutex.
575 		 */
576 		m->m_count = 0;
577 		m->m_owner = NULL;
578 		/* Remove the mutex from the threads queue. */
579 		MUTEX_ASSERT_IS_OWNED(m);
580 		TAILQ_REMOVE(&curthread->mutexq, m, m_qe);
581 		MUTEX_INIT_LINK(m);
582 		/*
583 		 * Hand off the mutex to the next waiting thread.
584 		 */
585 		THR_UMTX_UNLOCK(curthread, &m->m_lock);
586 	}
587 	return (0);
588 }
589 
590 int
591 _mutex_cv_lock(pthread_mutex_t *m, int count)
592 {
593 	int	ret;
594 
595 	if ((ret = _pthread_mutex_lock(m)) == 0) {
596 		(*m)->m_refcount--;
597 		(*m)->m_count += count;
598 	}
599 	return (ret);
600 }
601 
602 int
603 _mutex_cv_unlock(pthread_mutex_t *mutex, int *count)
604 {
605 	struct pthread *curthread = tls_get_curthread();
606 	struct pthread_mutex *m;
607 
608 	if (__predict_false(mutex == NULL))
609 		return (EINVAL);
610 	if (__predict_false((m = *mutex) == NULL))
611 		return (EINVAL);
612 	if (__predict_false(m->m_owner != curthread))
613 		return (EPERM);
614 
615 	*count = m->m_count;
616 	m->m_count = 0;
617 	m->m_refcount++;
618 	m->m_owner = NULL;
619 	/* Remove the mutex from the threads queue. */
620 	MUTEX_ASSERT_IS_OWNED(m);
621 	TAILQ_REMOVE(&curthread->mutexq, m, m_qe);
622 	MUTEX_INIT_LINK(m);
623 	THR_UMTX_UNLOCK(curthread, &m->m_lock);
624 	return (0);
625 }
626 
627 void
628 _mutex_unlock_private(pthread_t pthread)
629 {
630 	struct pthread_mutex	*m, *m_next;
631 
632 	for (m = TAILQ_FIRST(&pthread->mutexq); m != NULL; m = m_next) {
633 		m_next = TAILQ_NEXT(m, m_qe);
634 		if ((m->m_flags & MUTEX_FLAGS_PRIVATE) != 0)
635 			_pthread_mutex_unlock(&m);
636 	}
637 }
638 
639 __strong_reference(__pthread_mutex_init, pthread_mutex_init);
640 __strong_reference(__pthread_mutex_lock, pthread_mutex_lock);
641 __strong_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
642 __strong_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
643 
644 /* Single underscore versions provided for libc internal usage: */
645 /* No difference between libc and application usage of these: */
646 __strong_reference(_pthread_mutex_destroy, pthread_mutex_destroy);
647 __strong_reference(_pthread_mutex_unlock, pthread_mutex_unlock);
648