182657471SMarkus Pfeiffer /* 282657471SMarkus Pfeiffer * Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>. 382657471SMarkus Pfeiffer * Copyright (c) 1998 Alex Nash 482657471SMarkus Pfeiffer * Copyright (c) 2006 David Xu <yfxu@corp.netease.com>. 582657471SMarkus Pfeiffer * Copyright (c) 2013 Larisa Grigore <larisagrigore@gmail.com>. 682657471SMarkus Pfeiffer * All rights reserved. 782657471SMarkus Pfeiffer * 882657471SMarkus Pfeiffer * Redistribution and use in source and binary forms, with or without 982657471SMarkus Pfeiffer * modification, are permitted provided that the following conditions 1082657471SMarkus Pfeiffer * are met: 1182657471SMarkus Pfeiffer * 1. Redistributions of source code must retain the above copyright 1282657471SMarkus Pfeiffer * notice, this list of conditions and the following disclaimer. 1382657471SMarkus Pfeiffer * 2. Redistributions in binary form must reproduce the above copyright 1482657471SMarkus Pfeiffer * notice, this list of conditions and the following disclaimer in the 1582657471SMarkus Pfeiffer * documentation and/or other materials provided with the distribution. 1682657471SMarkus Pfeiffer * 3. All advertising materials mentioning features or use of this software 1782657471SMarkus Pfeiffer * must display the following acknowledgement: 1882657471SMarkus Pfeiffer * This product includes software developed by John Birrell. 1982657471SMarkus Pfeiffer * 4. Neither the name of the author nor the names of any co-contributors 2082657471SMarkus Pfeiffer * may be used to endorse or promote products derived from this software 2182657471SMarkus Pfeiffer * without specific prior written permission. 2282657471SMarkus Pfeiffer * 2382657471SMarkus Pfeiffer * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND 2482657471SMarkus Pfeiffer * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 2582657471SMarkus Pfeiffer * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 2682657471SMarkus Pfeiffer * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 2782657471SMarkus Pfeiffer * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 2882657471SMarkus Pfeiffer * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 2982657471SMarkus Pfeiffer * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 3082657471SMarkus Pfeiffer * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 3182657471SMarkus Pfeiffer * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 3282657471SMarkus Pfeiffer * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 3382657471SMarkus Pfeiffer * SUCH DAMAGE. 3482657471SMarkus Pfeiffer * 3582657471SMarkus Pfeiffer * $DragonFly: src/lib/libthread_xu/thread/thr_mutex.c,v 1.15 2008/05/09 16:03:27 dillon Exp $ 3682657471SMarkus Pfeiffer * $FreeBSD: src/lib/libpthread/thread/thr_rwlock.c,v 1.14 2004/01/08 15:37:09 deischen Exp $ 3782657471SMarkus Pfeiffer * $DragonFly: src/lib/libthread_xu/thread/thr_rwlock.c,v 1.7 2006/04/06 13:03:09 davidxu Exp $ 3882657471SMarkus Pfeiffer */ 3982657471SMarkus Pfeiffer 4082657471SMarkus Pfeiffer #include <machine/atomic.h> 4182657471SMarkus Pfeiffer #include <machine/tls.h> 4282657471SMarkus Pfeiffer #include <errno.h> 4382657471SMarkus Pfeiffer 4482657471SMarkus Pfeiffer #include "sysvipc_utils.h" 4582657471SMarkus Pfeiffer #include "sysvipc_lock.h" 4682657471SMarkus Pfeiffer #include "sysvipc_lock_generic.h" 4782657471SMarkus Pfeiffer 4882657471SMarkus Pfeiffer #include <limits.h> 4982657471SMarkus Pfeiffer #include <stdio.h> 5082657471SMarkus Pfeiffer #include <unistd.h> 5182657471SMarkus Pfeiffer 5282657471SMarkus Pfeiffer #define MAX_READ_LOCKS (INT_MAX - 1) 5382657471SMarkus Pfeiffer 5482657471SMarkus Pfeiffer static int rdlock_count; 5582657471SMarkus Pfeiffer 5682657471SMarkus Pfeiffer int 5782657471SMarkus Pfeiffer sysv_mutex_init(struct sysv_mutex *mutex) { 5882657471SMarkus Pfeiffer if(mutex == NULL) 5982657471SMarkus Pfeiffer return (EINVAL); 6082657471SMarkus Pfeiffer mutex->_mutex_static_lock = 0; 6182657471SMarkus Pfeiffer mutex->pid_owner = -1; 6282657471SMarkus Pfeiffer mutex->tid_owner = -1; 6382657471SMarkus Pfeiffer return (0); 6482657471SMarkus Pfeiffer } 6582657471SMarkus Pfeiffer 6682657471SMarkus Pfeiffer int 6782657471SMarkus Pfeiffer sysv_mutex_lock(struct sysv_mutex *mutex) 6882657471SMarkus Pfeiffer { 6982657471SMarkus Pfeiffer if (mutex->pid_owner == getpid() && 7082657471SMarkus Pfeiffer mutex->tid_owner == lwp_gettid()) { 7182657471SMarkus Pfeiffer sysv_print_err("deadlock: mutex aleady acquired by the thread\n"); 7282657471SMarkus Pfeiffer return (EDEADLK); 7382657471SMarkus Pfeiffer } 7482657471SMarkus Pfeiffer _sysv_umtx_lock(&mutex->_mutex_static_lock); 7582657471SMarkus Pfeiffer mutex->pid_owner = getpid(); 7682657471SMarkus Pfeiffer mutex->tid_owner = lwp_gettid(); 7782657471SMarkus Pfeiffer return (0); 7882657471SMarkus Pfeiffer } 7982657471SMarkus Pfeiffer 8082657471SMarkus Pfeiffer int 8182657471SMarkus Pfeiffer sysv_mutex_unlock(struct sysv_mutex *mutex) 8282657471SMarkus Pfeiffer { 8382657471SMarkus Pfeiffer if (mutex->pid_owner != getpid() || 8482657471SMarkus Pfeiffer mutex->tid_owner != lwp_gettid()) { 8582657471SMarkus Pfeiffer sysv_print_err("eperm try unlock a mutex that is not acquired\n"); 8682657471SMarkus Pfeiffer return (EPERM); 8782657471SMarkus Pfeiffer } 8882657471SMarkus Pfeiffer 8982657471SMarkus Pfeiffer mutex->tid_owner = -1; 9082657471SMarkus Pfeiffer mutex->pid_owner = -1; 9182657471SMarkus Pfeiffer _sysv_umtx_unlock(&mutex->_mutex_static_lock); 9282657471SMarkus Pfeiffer return (0); 9382657471SMarkus Pfeiffer } 9482657471SMarkus Pfeiffer 9582657471SMarkus Pfeiffer static int 9682657471SMarkus Pfeiffer sysv_cond_wait(int *val, struct sysv_mutex *mutex) { 9782657471SMarkus Pfeiffer sysv_mutex_unlock(mutex); 9882657471SMarkus Pfeiffer 9982657471SMarkus Pfeiffer /* I use SYSV_TIMEOUT to avoid lossing a wakeup 10082657471SMarkus Pfeiffer * sent before going to sleep and remain blocked. 10182657471SMarkus Pfeiffer */ 10282657471SMarkus Pfeiffer umtx_sleep(val, *val, SYSV_TIMEOUT); 10382657471SMarkus Pfeiffer return (sysv_mutex_lock(mutex)); 10482657471SMarkus Pfeiffer } 10582657471SMarkus Pfeiffer 10682657471SMarkus Pfeiffer static int 10782657471SMarkus Pfeiffer sysv_cond_signal(int *val) { 10882657471SMarkus Pfeiffer return (umtx_wakeup(val, 0)); 10982657471SMarkus Pfeiffer } 11082657471SMarkus Pfeiffer 11182657471SMarkus Pfeiffer int 11282657471SMarkus Pfeiffer sysv_rwlock_init(struct sysv_rwlock *rwlock) 11382657471SMarkus Pfeiffer { 114*07d2dac6SAntonio Huete Jimenez int ret = 0; 11582657471SMarkus Pfeiffer 11682657471SMarkus Pfeiffer if (rwlock == NULL) 11782657471SMarkus Pfeiffer return (EINVAL); 11882657471SMarkus Pfeiffer 11982657471SMarkus Pfeiffer /* Initialize the lock. */ 12082657471SMarkus Pfeiffer sysv_mutex_init(&rwlock->lock); 12182657471SMarkus Pfeiffer rwlock->state = 0; 12282657471SMarkus Pfeiffer rwlock->blocked_writers = 0; 12382657471SMarkus Pfeiffer 12482657471SMarkus Pfeiffer return (ret); 12582657471SMarkus Pfeiffer } 12682657471SMarkus Pfeiffer 12782657471SMarkus Pfeiffer int 12882657471SMarkus Pfeiffer sysv_rwlock_unlock (struct sysv_rwlock *rwlock) 12982657471SMarkus Pfeiffer { 13082657471SMarkus Pfeiffer int ret; 13182657471SMarkus Pfeiffer 13282657471SMarkus Pfeiffer if (rwlock == NULL) 13382657471SMarkus Pfeiffer return (EINVAL); 13482657471SMarkus Pfeiffer 13582657471SMarkus Pfeiffer /* Grab the monitor lock. */ 13682657471SMarkus Pfeiffer if ((ret = sysv_mutex_lock(&rwlock->lock)) != 0) 13782657471SMarkus Pfeiffer return (ret); 13882657471SMarkus Pfeiffer 13982657471SMarkus Pfeiffer if (rwlock->state > 0) { 14082657471SMarkus Pfeiffer rdlock_count--; 14182657471SMarkus Pfeiffer rwlock->state--; 14282657471SMarkus Pfeiffer if (rwlock->state == 0 && rwlock->blocked_writers) { 14382657471SMarkus Pfeiffer ret = sysv_cond_signal(&rwlock->write_signal); 14482657471SMarkus Pfeiffer } 14582657471SMarkus Pfeiffer } else if (rwlock->state < 0) { 14682657471SMarkus Pfeiffer rwlock->state = 0; 14782657471SMarkus Pfeiffer 14882657471SMarkus Pfeiffer if (rwlock->blocked_writers) { 14982657471SMarkus Pfeiffer ret = sysv_cond_signal(&rwlock->write_signal); 15082657471SMarkus Pfeiffer } 15182657471SMarkus Pfeiffer else { 15282657471SMarkus Pfeiffer ret = sysv_cond_signal(&rwlock->read_signal); 15382657471SMarkus Pfeiffer } 15482657471SMarkus Pfeiffer } else 15582657471SMarkus Pfeiffer ret = EINVAL; 15682657471SMarkus Pfeiffer 15782657471SMarkus Pfeiffer sysv_mutex_unlock(&rwlock->lock); 15882657471SMarkus Pfeiffer 15982657471SMarkus Pfeiffer return (ret); 16082657471SMarkus Pfeiffer } 16182657471SMarkus Pfeiffer 16282657471SMarkus Pfeiffer int 16382657471SMarkus Pfeiffer sysv_rwlock_wrlock (struct sysv_rwlock *rwlock) 16482657471SMarkus Pfeiffer { 16582657471SMarkus Pfeiffer int ret; 16682657471SMarkus Pfeiffer 16782657471SMarkus Pfeiffer if (rwlock == NULL) 16882657471SMarkus Pfeiffer return (EINVAL); 16982657471SMarkus Pfeiffer 17082657471SMarkus Pfeiffer /* Grab the monitor lock. */ 17182657471SMarkus Pfeiffer if ((ret = sysv_mutex_lock(&rwlock->lock)) != 0) 17282657471SMarkus Pfeiffer return (ret); 17382657471SMarkus Pfeiffer 17482657471SMarkus Pfeiffer while (rwlock->state != 0) { 17582657471SMarkus Pfeiffer rwlock->blocked_writers++; 17682657471SMarkus Pfeiffer 17782657471SMarkus Pfeiffer ret = sysv_cond_wait(&rwlock->write_signal, &rwlock->lock); 17882657471SMarkus Pfeiffer if (ret != 0) { 17982657471SMarkus Pfeiffer rwlock->blocked_writers--; 18082657471SMarkus Pfeiffer /* No unlock is required because only the lock 18182657471SMarkus Pfeiffer * operation can return error. 18282657471SMarkus Pfeiffer */ 18382657471SMarkus Pfeiffer //sysv_mutex_unlock(&rwlock->lock); 18482657471SMarkus Pfeiffer return (ret); 18582657471SMarkus Pfeiffer } 18682657471SMarkus Pfeiffer 18782657471SMarkus Pfeiffer rwlock->blocked_writers--; 18882657471SMarkus Pfeiffer } 18982657471SMarkus Pfeiffer 19082657471SMarkus Pfeiffer /* Indicate that we are locked for writing. */ 19182657471SMarkus Pfeiffer rwlock->state = -1; 19282657471SMarkus Pfeiffer 19382657471SMarkus Pfeiffer sysv_mutex_unlock(&rwlock->lock); 19482657471SMarkus Pfeiffer 19582657471SMarkus Pfeiffer return (ret); 19682657471SMarkus Pfeiffer } 19782657471SMarkus Pfeiffer 19882657471SMarkus Pfeiffer int 19982657471SMarkus Pfeiffer sysv_rwlock_rdlock(struct sysv_rwlock *rwlock) 20082657471SMarkus Pfeiffer { 20182657471SMarkus Pfeiffer int ret; 20282657471SMarkus Pfeiffer 20382657471SMarkus Pfeiffer // sysv_print("try get rd lock\n"); 20482657471SMarkus Pfeiffer if (rwlock == NULL) 20582657471SMarkus Pfeiffer return (EINVAL); 20682657471SMarkus Pfeiffer 20782657471SMarkus Pfeiffer /* Grab the monitor lock. */ 20882657471SMarkus Pfeiffer if ((ret = sysv_mutex_lock(&rwlock->lock)) != 0) 20982657471SMarkus Pfeiffer return (ret); 21082657471SMarkus Pfeiffer 21182657471SMarkus Pfeiffer /* Check the lock count. */ 21282657471SMarkus Pfeiffer if (rwlock->state == MAX_READ_LOCKS) { 21382657471SMarkus Pfeiffer sysv_mutex_unlock(&rwlock->lock); 21482657471SMarkus Pfeiffer return (EAGAIN); 21582657471SMarkus Pfeiffer } 21682657471SMarkus Pfeiffer 21782657471SMarkus Pfeiffer if ((rdlock_count > 0) && (rwlock->state > 0)) { 21882657471SMarkus Pfeiffer /* 21982657471SMarkus Pfeiffer * Taken from the pthread implementation with only 22082657471SMarkus Pfeiffer * one change; rdlock_count is per process not per 22182657471SMarkus Pfeiffer * thread; 22282657471SMarkus Pfeiffer * Original comment: 22382657471SMarkus Pfeiffer * To avoid having to track all the rdlocks held by 22482657471SMarkus Pfeiffer * a thread or all of the threads that hold a rdlock, 22582657471SMarkus Pfeiffer * we keep a simple count of all the rdlocks held by 22682657471SMarkus Pfeiffer * a thread. If a thread holds any rdlocks it is 22782657471SMarkus Pfeiffer * possible that it is attempting to take a recursive 22882657471SMarkus Pfeiffer * rdlock. If there are blocked writers and precedence 22982657471SMarkus Pfeiffer * is given to them, then that would result in the thread 23082657471SMarkus Pfeiffer * deadlocking. So allowing a thread to take the rdlock 23182657471SMarkus Pfeiffer * when it already has one or more rdlocks avoids the 23282657471SMarkus Pfeiffer * deadlock. I hope the reader can follow that logic ;-) 23382657471SMarkus Pfeiffer */ 23482657471SMarkus Pfeiffer ; /* nothing needed */ 23582657471SMarkus Pfeiffer } else { 23682657471SMarkus Pfeiffer /* Give writers priority over readers. */ 23782657471SMarkus Pfeiffer while (rwlock->blocked_writers || rwlock->state < 0) { 23882657471SMarkus Pfeiffer ret = sysv_cond_wait(&rwlock->read_signal, 23982657471SMarkus Pfeiffer &rwlock->lock); 24082657471SMarkus Pfeiffer if (ret != 0) { 24182657471SMarkus Pfeiffer /* No unlock necessary because only lock 24282657471SMarkus Pfeiffer * operation can return error. 24382657471SMarkus Pfeiffer */ 24482657471SMarkus Pfeiffer //sysv_mutex_unlock(&rwlock->lock); 24582657471SMarkus Pfeiffer return (ret); 24682657471SMarkus Pfeiffer } 24782657471SMarkus Pfeiffer } 24882657471SMarkus Pfeiffer } 24982657471SMarkus Pfeiffer 25082657471SMarkus Pfeiffer rdlock_count++; 25182657471SMarkus Pfeiffer rwlock->state++; /* Indicate we are locked for reading. */ 25282657471SMarkus Pfeiffer 25382657471SMarkus Pfeiffer /* 25482657471SMarkus Pfeiffer * Something is really wrong if this call fails. Returning 25582657471SMarkus Pfeiffer * error won't do because we've already obtained the read 25682657471SMarkus Pfeiffer * lock. Decrementing 'state' is no good because we probably 25782657471SMarkus Pfeiffer * don't have the monitor lock. 25882657471SMarkus Pfeiffer */ 25982657471SMarkus Pfeiffer sysv_mutex_unlock(&rwlock->lock); 26082657471SMarkus Pfeiffer 26182657471SMarkus Pfeiffer return (ret); 26282657471SMarkus Pfeiffer } 263