xref: /dflybsd-src/lib/libc/sysvipc/lock_generic.c (revision ff86f40163b90743b832c47e55fc6ca83aa45121)
1*82657471SMarkus Pfeiffer /*-
2*82657471SMarkus Pfeiffer  * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
3*82657471SMarkus Pfeiffer  * Copyright (c) 2005 Matthew Dillon <dillon@backplane.com>
4*82657471SMarkus Pfeiffer  *
5*82657471SMarkus Pfeiffer  * All rights reserved.
6*82657471SMarkus Pfeiffer  *
7*82657471SMarkus Pfeiffer  * Redistribution and use in source and binary forms, with or without
8*82657471SMarkus Pfeiffer  * modification, are permitted provided that the following conditions
9*82657471SMarkus Pfeiffer  * are met:
10*82657471SMarkus Pfeiffer  * 1. Redistributions of source code must retain the above copyright
11*82657471SMarkus Pfeiffer  *    notice, this list of conditions and the following disclaimer.
12*82657471SMarkus Pfeiffer  * 2. Redistributions in binary form must reproduce the above copyright
13*82657471SMarkus Pfeiffer  *    notice, this list of conditions and the following disclaimer in the
14*82657471SMarkus Pfeiffer  *    documentation and/or other materials provided with the distribution.
15*82657471SMarkus Pfeiffer  *
16*82657471SMarkus Pfeiffer  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17*82657471SMarkus Pfeiffer  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18*82657471SMarkus Pfeiffer  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19*82657471SMarkus Pfeiffer  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20*82657471SMarkus Pfeiffer  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21*82657471SMarkus Pfeiffer  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22*82657471SMarkus Pfeiffer  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23*82657471SMarkus Pfeiffer  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24*82657471SMarkus Pfeiffer  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25*82657471SMarkus Pfeiffer  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26*82657471SMarkus Pfeiffer  * SUCH DAMAGE.
27*82657471SMarkus Pfeiffer  */
28*82657471SMarkus Pfeiffer 
29*82657471SMarkus Pfeiffer #include <assert.h>
30*82657471SMarkus Pfeiffer #include <errno.h>
31*82657471SMarkus Pfeiffer #include <unistd.h>
32*82657471SMarkus Pfeiffer #include <sys/time.h>
33*82657471SMarkus Pfeiffer 
34*82657471SMarkus Pfeiffer #include "sysvipc_lock_generic.h"
35*82657471SMarkus Pfeiffer 
36*82657471SMarkus Pfeiffer /*
37*82657471SMarkus Pfeiffer  * This function is used to acquire a contested lock.
38*82657471SMarkus Pfeiffer  */
39*82657471SMarkus Pfeiffer int
__sysv_umtx_lock(volatile umtx_t * mtx,int timo)40*82657471SMarkus Pfeiffer __sysv_umtx_lock(volatile umtx_t *mtx, int timo)
41*82657471SMarkus Pfeiffer {
42*82657471SMarkus Pfeiffer 	int v, errval, ret = 0;
43*82657471SMarkus Pfeiffer 
44*82657471SMarkus Pfeiffer 	/* contested */
45*82657471SMarkus Pfeiffer 	do {
46*82657471SMarkus Pfeiffer 		v = *mtx;
47*82657471SMarkus Pfeiffer 		if (v == 2 || atomic_cmpset_acq_int(mtx, 1, 2)) {
48*82657471SMarkus Pfeiffer 			if (timo == 0)
49*82657471SMarkus Pfeiffer 				umtx_sleep(mtx, 2, timo);
50*82657471SMarkus Pfeiffer 			else if ( (errval = umtx_sleep(mtx, 2, timo)) > 0) {
51*82657471SMarkus Pfeiffer 				if (errval == EAGAIN) {
52*82657471SMarkus Pfeiffer 					if (atomic_cmpset_acq_int(mtx, 0, 2))
53*82657471SMarkus Pfeiffer 						ret = 0;
54*82657471SMarkus Pfeiffer 					else
55*82657471SMarkus Pfeiffer 						ret = ETIMEDOUT;
56*82657471SMarkus Pfeiffer 					break;
57*82657471SMarkus Pfeiffer 				}
58*82657471SMarkus Pfeiffer 			}
59*82657471SMarkus Pfeiffer 		}
60*82657471SMarkus Pfeiffer 	} while (!atomic_cmpset_acq_int(mtx, 0, 2));
61*82657471SMarkus Pfeiffer 
62*82657471SMarkus Pfeiffer 	return (ret);
63*82657471SMarkus Pfeiffer }
64*82657471SMarkus Pfeiffer 
65*82657471SMarkus Pfeiffer void
__sysv_umtx_unlock(volatile umtx_t * mtx)66*82657471SMarkus Pfeiffer __sysv_umtx_unlock(volatile umtx_t *mtx)
67*82657471SMarkus Pfeiffer {
68*82657471SMarkus Pfeiffer 	int v;
69*82657471SMarkus Pfeiffer 
70*82657471SMarkus Pfeiffer 	for (;;) {
71*82657471SMarkus Pfeiffer 		v = *mtx;
72*82657471SMarkus Pfeiffer 		if (atomic_cmpset_acq_int(mtx, v, v-1)) {
73*82657471SMarkus Pfeiffer 			if (v != 1) {
74*82657471SMarkus Pfeiffer 				*mtx = 0;
75*82657471SMarkus Pfeiffer 				umtx_wakeup(mtx, 1);
76*82657471SMarkus Pfeiffer 			}
77*82657471SMarkus Pfeiffer 			break;
78*82657471SMarkus Pfeiffer 		}
79*82657471SMarkus Pfeiffer 	}
80*82657471SMarkus Pfeiffer }
81