xref: /netbsd-src/lib/libpthread/pthread_mutex.c (revision 8b0f9554ff8762542c4defc4f70e1eb76fb508fa)
1 /*	$NetBSD: pthread_mutex.c,v 1.38 2007/11/19 15:14:13 ad Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001, 2003, 2006, 2007 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Nathan J. Williams, by Jason R. Thorpe, and by Andrew Doran.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 #include <sys/cdefs.h>
40 __RCSID("$NetBSD: pthread_mutex.c,v 1.38 2007/11/19 15:14:13 ad Exp $");
41 
42 #include <errno.h>
43 #include <limits.h>
44 #include <stdlib.h>
45 #include <string.h>
46 
47 #include <sys/types.h>
48 #include <sys/lock.h>
49 
50 #include "pthread.h"
51 #include "pthread_int.h"
52 
53 #ifndef	PTHREAD__HAVE_ATOMIC
54 
55 static int pthread_mutex_lock_slow(pthread_t, pthread_mutex_t *);
56 
57 __strong_alias(__libc_mutex_init,pthread_mutex_init)
58 __strong_alias(__libc_mutex_lock,pthread_mutex_lock)
59 __strong_alias(__libc_mutex_trylock,pthread_mutex_trylock)
60 __strong_alias(__libc_mutex_unlock,pthread_mutex_unlock)
61 __strong_alias(__libc_mutex_destroy,pthread_mutex_destroy)
62 
63 __strong_alias(__libc_mutexattr_init,pthread_mutexattr_init)
64 __strong_alias(__libc_mutexattr_destroy,pthread_mutexattr_destroy)
65 __strong_alias(__libc_mutexattr_settype,pthread_mutexattr_settype)
66 
67 __strong_alias(__libc_thr_once,pthread_once)
68 
69 struct mutex_private {
70 	int	type;
71 	int	recursecount;
72 };
73 
74 static const struct mutex_private mutex_private_default = {
75 	PTHREAD_MUTEX_DEFAULT,
76 	0,
77 };
78 
79 struct mutexattr_private {
80 	int	type;
81 };
82 
83 static const struct mutexattr_private mutexattr_private_default = {
84 	PTHREAD_MUTEX_DEFAULT,
85 };
86 
87 int
88 pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr)
89 {
90 	struct mutexattr_private *map;
91 	struct mutex_private *mp;
92 
93 	pthread__error(EINVAL, "Invalid mutex attribute",
94 	    (attr == NULL) || (attr->ptma_magic == _PT_MUTEXATTR_MAGIC));
95 
96 	if (attr != NULL && (map = attr->ptma_private) != NULL &&
97 	    memcmp(map, &mutexattr_private_default, sizeof(*map)) != 0) {
98 		mp = malloc(sizeof(*mp));
99 		if (mp == NULL)
100 			return ENOMEM;
101 
102 		mp->type = map->type;
103 		mp->recursecount = 0;
104 	} else {
105 		/* LINTED cast away const */
106 		mp = (struct mutex_private *) &mutex_private_default;
107 	}
108 
109 	mutex->ptm_magic = _PT_MUTEX_MAGIC;
110 	mutex->ptm_owner = NULL;
111 	pthread_lockinit(&mutex->ptm_lock);
112 	pthread_lockinit(&mutex->ptm_interlock);
113 	PTQ_INIT(&mutex->ptm_blocked);
114 	mutex->ptm_private = mp;
115 
116 	return 0;
117 }
118 
119 
120 int
121 pthread_mutex_destroy(pthread_mutex_t *mutex)
122 {
123 
124 	pthread__error(EINVAL, "Invalid mutex",
125 	    mutex->ptm_magic == _PT_MUTEX_MAGIC);
126 	pthread__error(EBUSY, "Destroying locked mutex",
127 	    __SIMPLELOCK_UNLOCKED_P(&mutex->ptm_lock));
128 
129 	mutex->ptm_magic = _PT_MUTEX_DEAD;
130 	if (mutex->ptm_private != NULL &&
131 	    mutex->ptm_private != (const void *)&mutex_private_default)
132 		free(mutex->ptm_private);
133 
134 	return 0;
135 }
136 
137 
138 /*
139  * Note regarding memory visibility: Pthreads has rules about memory
140  * visibility and mutexes. Very roughly: Memory a thread can see when
141  * it unlocks a mutex can be seen by another thread that locks the
142  * same mutex.
143  *
144  * A memory barrier after a lock and before an unlock will provide
145  * this behavior. This code relies on pthread__spintrylock() to issue
146  * a barrier after obtaining a lock, and on pthread__spinunlock() to
147  * issue a barrier before releasing a lock.
148  */
149 
150 int
151 pthread_mutex_lock(pthread_mutex_t *mutex)
152 {
153 	pthread_t self;
154 	int error;
155 
156 	self = pthread__self();
157 
158 	/*
159 	 * Note that if we get the lock, we don't have to deal with any
160 	 * non-default lock type handling.
161 	 */
162 	if (__predict_false(pthread__spintrylock(self, &mutex->ptm_lock) == 0)) {
163 		error = pthread_mutex_lock_slow(self, mutex);
164 		if (error)
165 			return error;
166 	}
167 
168 	/*
169 	 * We have the lock!
170 	 */
171 	mutex->ptm_owner = self;
172 
173 	return 0;
174 }
175 
176 
177 static int
178 pthread_mutex_lock_slow(pthread_t self, pthread_mutex_t *mutex)
179 {
180 	extern int pthread__started;
181 	struct mutex_private *mp;
182 	sigset_t ss;
183 	int count;
184 
185 	pthread__error(EINVAL, "Invalid mutex",
186 	    mutex->ptm_magic == _PT_MUTEX_MAGIC);
187 
188 	for (;;) {
189 		/* Spin for a while. */
190 		count = pthread__nspins;
191 		while (__SIMPLELOCK_LOCKED_P(&mutex->ptm_lock)  && --count > 0)
192 			pthread__smt_pause();
193 		if (count > 0) {
194 			if (pthread__spintrylock(self, &mutex->ptm_lock) != 0)
195 				break;
196 			continue;
197 		}
198 
199 		/* Okay, didn't look free. Get the interlock... */
200 		pthread__spinlock(self, &mutex->ptm_interlock);
201 
202 		/*
203 		 * The mutex_unlock routine will get the interlock
204 		 * before looking at the list of sleepers, so if the
205 		 * lock is held we can safely put ourselves on the
206 		 * sleep queue. If it's not held, we can try taking it
207 		 * again.
208 		 */
209 		PTQ_INSERT_HEAD(&mutex->ptm_blocked, self, pt_sleep);
210 		if (__SIMPLELOCK_UNLOCKED_P(&mutex->ptm_lock)) {
211 			PTQ_REMOVE(&mutex->ptm_blocked, self, pt_sleep);
212 			pthread__spinunlock(self, &mutex->ptm_interlock);
213 			continue;
214 		}
215 
216 		mp = mutex->ptm_private;
217 		if (mutex->ptm_owner == self && mp != NULL) {
218 			switch (mp->type) {
219 			case PTHREAD_MUTEX_ERRORCHECK:
220 				PTQ_REMOVE(&mutex->ptm_blocked, self, pt_sleep);
221 				pthread__spinunlock(self, &mutex->ptm_interlock);
222 				return EDEADLK;
223 
224 			case PTHREAD_MUTEX_RECURSIVE:
225 				/*
226 				 * It's safe to do this without
227 				 * holding the interlock, because
228 				 * we only modify it if we know we
229 				 * own the mutex.
230 				 */
231 				PTQ_REMOVE(&mutex->ptm_blocked, self, pt_sleep);
232 				pthread__spinunlock(self, &mutex->ptm_interlock);
233 				if (mp->recursecount == INT_MAX)
234 					return EAGAIN;
235 				mp->recursecount++;
236 				return 0;
237 			}
238 		}
239 
240 		if (pthread__started == 0) {
241 			/* The spec says we must deadlock, so... */
242 			pthread__assert(mp->type == PTHREAD_MUTEX_NORMAL);
243 			(void) sigprocmask(SIG_SETMASK, NULL, &ss);
244 			for (;;) {
245 				sigsuspend(&ss);
246 			}
247 			/*NOTREACHED*/
248 		}
249 
250 		/*
251 		 * Locking a mutex is not a cancellation
252 		 * point, so we don't need to do the
253 		 * test-cancellation dance. We may get woken
254 		 * up spuriously by pthread_cancel or signals,
255 		 * but it's okay since we're just going to
256 		 * retry.
257 		 */
258 		self->pt_sleeponq = 1;
259 		self->pt_sleepobj = &mutex->ptm_blocked;
260 		pthread__spinunlock(self, &mutex->ptm_interlock);
261 		(void)pthread__park(self, &mutex->ptm_interlock,
262 		    &mutex->ptm_blocked, NULL, 0, &mutex->ptm_blocked);
263 	}
264 
265 	return 0;
266 }
267 
268 
269 int
270 pthread_mutex_trylock(pthread_mutex_t *mutex)
271 {
272 	struct mutex_private *mp;
273 	pthread_t self;
274 
275 	pthread__error(EINVAL, "Invalid mutex",
276 	    mutex->ptm_magic == _PT_MUTEX_MAGIC);
277 
278 	self = pthread__self();
279 
280 	if (pthread__spintrylock(self, &mutex->ptm_lock) == 0) {
281 		/*
282 		 * These tests can be performed without holding the
283 		 * interlock because these fields are only modified
284 		 * if we know we own the mutex.
285 		 */
286 		mp = mutex->ptm_private;
287 		if (mp != NULL && mp->type == PTHREAD_MUTEX_RECURSIVE &&
288 		    mutex->ptm_owner == self) {
289 			if (mp->recursecount == INT_MAX)
290 				return EAGAIN;
291 			mp->recursecount++;
292 			return 0;
293 		}
294 
295 		return EBUSY;
296 	}
297 
298 	mutex->ptm_owner = self;
299 
300 	return 0;
301 }
302 
303 
304 int
305 pthread_mutex_unlock(pthread_mutex_t *mutex)
306 {
307 	struct mutex_private *mp;
308 	pthread_t self;
309 	int weown;
310 
311 	pthread__error(EINVAL, "Invalid mutex",
312 	    mutex->ptm_magic == _PT_MUTEX_MAGIC);
313 
314 	/*
315 	 * These tests can be performed without holding the
316 	 * interlock because these fields are only modified
317 	 * if we know we own the mutex.
318 	 */
319 	self = pthread__self();
320 	weown = (mutex->ptm_owner == self);
321 	mp = mutex->ptm_private;
322 
323 	if (mp == NULL) {
324 		if (__predict_false(!weown)) {
325 			pthread__error(EPERM, "Unlocking unlocked mutex",
326 			    (mutex->ptm_owner != 0));
327 			pthread__error(EPERM,
328 			    "Unlocking mutex owned by another thread", weown);
329 		}
330 	} else if (mp->type == PTHREAD_MUTEX_RECURSIVE) {
331 		if (!weown)
332 			return EPERM;
333 		if (mp->recursecount != 0) {
334 			mp->recursecount--;
335 			return 0;
336 		}
337 	} else if (mp->type == PTHREAD_MUTEX_ERRORCHECK) {
338 		if (!weown)
339 			return EPERM;
340 		if (__predict_false(!weown)) {
341 			pthread__error(EPERM, "Unlocking unlocked mutex",
342 			    (mutex->ptm_owner != 0));
343 			pthread__error(EPERM,
344 			    "Unlocking mutex owned by another thread", weown);
345 		}
346 	}
347 
348 	mutex->ptm_owner = NULL;
349 	pthread__spinunlock(self, &mutex->ptm_lock);
350 
351 	/*
352 	 * Do a double-checked locking dance to see if there are any
353 	 * waiters.  If we don't see any waiters, we can exit, because
354 	 * we've already released the lock. If we do see waiters, they
355 	 * were probably waiting on us... there's a slight chance that
356 	 * they are waiting on a different thread's ownership of the
357 	 * lock that happened between the unlock above and this
358 	 * examination of the queue; if so, no harm is done, as the
359 	 * waiter will loop and see that the mutex is still locked.
360 	 */
361 	pthread__spinlock(self, &mutex->ptm_interlock);
362 	pthread__unpark_all(self, &mutex->ptm_interlock, &mutex->ptm_blocked);
363 	return 0;
364 }
365 
366 int
367 pthread_mutexattr_init(pthread_mutexattr_t *attr)
368 {
369 	struct mutexattr_private *map;
370 
371 	map = malloc(sizeof(*map));
372 	if (map == NULL)
373 		return ENOMEM;
374 
375 	*map = mutexattr_private_default;
376 
377 	attr->ptma_magic = _PT_MUTEXATTR_MAGIC;
378 	attr->ptma_private = map;
379 
380 	return 0;
381 }
382 
383 
384 int
385 pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
386 {
387 
388 	pthread__error(EINVAL, "Invalid mutex attribute",
389 	    attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
390 
391 	attr->ptma_magic = _PT_MUTEXATTR_DEAD;
392 	if (attr->ptma_private != NULL)
393 		free(attr->ptma_private);
394 
395 	return 0;
396 }
397 
398 
399 int
400 pthread_mutexattr_gettype(const pthread_mutexattr_t *attr, int *typep)
401 {
402 	struct mutexattr_private *map;
403 
404 	pthread__error(EINVAL, "Invalid mutex attribute",
405 	    attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
406 
407 	map = attr->ptma_private;
408 
409 	*typep = map->type;
410 
411 	return 0;
412 }
413 
414 
415 int
416 pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
417 {
418 	struct mutexattr_private *map;
419 
420 	pthread__error(EINVAL, "Invalid mutex attribute",
421 	    attr->ptma_magic == _PT_MUTEXATTR_MAGIC);
422 
423 	map = attr->ptma_private;
424 
425 	switch (type) {
426 	case PTHREAD_MUTEX_NORMAL:
427 	case PTHREAD_MUTEX_ERRORCHECK:
428 	case PTHREAD_MUTEX_RECURSIVE:
429 		map->type = type;
430 		break;
431 
432 	default:
433 		return EINVAL;
434 	}
435 
436 	return 0;
437 }
438 
439 
440 static void
441 once_cleanup(void *closure)
442 {
443 
444        pthread_mutex_unlock((pthread_mutex_t *)closure);
445 }
446 
447 
448 int
449 pthread_once(pthread_once_t *once_control, void (*routine)(void))
450 {
451 
452 	if (once_control->pto_done == 0) {
453 		pthread_mutex_lock(&once_control->pto_mutex);
454 		pthread_cleanup_push(&once_cleanup, &once_control->pto_mutex);
455 		if (once_control->pto_done == 0) {
456 			routine();
457 			once_control->pto_done = 1;
458 		}
459 		pthread_cleanup_pop(1);
460 	}
461 
462 	return 0;
463 }
464 
465 int
466 pthread__mutex_deferwake(pthread_t thread, pthread_mutex_t *mutex)
467 {
468 
469 	return mutex->ptm_owner == thread;
470 }
471 
472 #endif	/* !PTHREAD__HAVE_ATOMIC */
473