xref: /openbsd-src/sys/kern/kern_rwlock.c (revision d13be5d47e4149db2549a9828e244d59dbc43f15)
1 /*	$OpenBSD: kern_rwlock.c,v 1.17 2011/07/05 03:58:22 weingart Exp $	*/
2 
3 /*
4  * Copyright (c) 2002, 2003 Artur Grabowski <art@openbsd.org>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
17  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
18  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
19  * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
20  * EXEMPLARY, OR CONSEQUENTIAL  DAMAGES (INCLUDING, BUT NOT LIMITED TO,
21  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
22  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
23  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
24  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
25  * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/proc.h>
31 #include <sys/rwlock.h>
32 #include <sys/limits.h>
33 
34 #include <machine/lock.h>
35 
36 /* XXX - temporary measure until proc0 is properly aligned */
37 #define RW_PROC(p) (((long)p) & ~RWLOCK_MASK)
38 
39 /*
40  * Magic wand for lock operations. Every operation checks if certain
41  * flags are set and if they aren't, it increments the lock with some
42  * value (that might need some computing in a few cases). If the operation
43  * fails, we need to set certain flags while waiting for the lock.
44  *
45  * RW_WRITE	The lock must be completely empty. We increment it with
46  *		RWLOCK_WRLOCK and the proc pointer of the holder.
47  *		Sets RWLOCK_WAIT|RWLOCK_WRWANT while waiting.
48  * RW_READ	RWLOCK_WRLOCK|RWLOCK_WRWANT may not be set. We increment
49  *		with RWLOCK_READ_INCR. RWLOCK_WAIT while waiting.
50  */
51 static const struct rwlock_op {
52 	unsigned long inc;
53 	unsigned long check;
54 	unsigned long wait_set;
55 	long proc_mult;
56 	int wait_prio;
57 } rw_ops[] = {
58 	{	/* RW_WRITE */
59 		RWLOCK_WRLOCK,
60 		ULONG_MAX,
61 		RWLOCK_WAIT | RWLOCK_WRWANT,
62 		1,
63 		PLOCK - 4
64 	},
65 	{	/* RW_READ */
66 		RWLOCK_READ_INCR,
67 		RWLOCK_WRLOCK,
68 		RWLOCK_WAIT,
69 		0,
70 		PLOCK
71 	},
72 	{	/* RW_DOWNGRADE */
73 		RWLOCK_READ_INCR - RWLOCK_WRLOCK,
74 		0,
75 		0,
76 		-1,
77 		PLOCK
78 	},
79 };
80 
81 #ifndef __HAVE_MD_RWLOCK
82 /*
83  * Simple cases that should be in MD code and atomic.
84  */
85 void
86 rw_enter_read(struct rwlock *rwl)
87 {
88 	unsigned long owner = rwl->rwl_owner;
89 
90 	if (__predict_false((owner & RWLOCK_WRLOCK) ||
91 	    rw_cas(&rwl->rwl_owner, owner, owner + RWLOCK_READ_INCR)))
92 		rw_enter(rwl, RW_READ);
93 }
94 
95 void
96 rw_enter_write(struct rwlock *rwl)
97 {
98 	struct proc *p = curproc;
99 
100 	if (__predict_false(rw_cas(&rwl->rwl_owner, 0,
101 	    RW_PROC(p) | RWLOCK_WRLOCK)))
102 		rw_enter(rwl, RW_WRITE);
103 }
104 
105 void
106 rw_exit_read(struct rwlock *rwl)
107 {
108 	unsigned long owner = rwl->rwl_owner;
109 
110 	rw_assert_rdlock(rwl);
111 
112 	if (__predict_false((owner & RWLOCK_WAIT) ||
113 	    rw_cas(&rwl->rwl_owner, owner, owner - RWLOCK_READ_INCR)))
114 		rw_exit(rwl);
115 }
116 
117 void
118 rw_exit_write(struct rwlock *rwl)
119 {
120 	unsigned long owner = rwl->rwl_owner;
121 
122 	rw_assert_wrlock(rwl);
123 
124 	if (__predict_false((owner & RWLOCK_WAIT) ||
125 	    rw_cas(&rwl->rwl_owner, owner, 0)))
126 		rw_exit(rwl);
127 }
128 
129 #ifndef rw_cas
130 int
131 rw_cas(volatile unsigned long *p, unsigned long o, unsigned long n)
132 {
133 	if (*p != o)
134 		return (1);
135 	*p = n;
136 
137 	return (0);
138 }
139 #endif
140 
141 #endif
142 
143 #ifdef DIAGNOSTIC
144 /*
145  * Put the diagnostic functions here to keep the main code free
146  * from ifdef clutter.
147  */
148 static void
149 rw_enter_diag(struct rwlock *rwl, int flags)
150 {
151 	switch (flags & RW_OPMASK) {
152 	case RW_WRITE:
153 	case RW_READ:
154 		if (RW_PROC(curproc) == RW_PROC(rwl->rwl_owner))
155 			panic("rw_enter: %s locking against myself",
156 			    rwl->rwl_name);
157 		break;
158 	case RW_DOWNGRADE:
159 		/*
160 		 * If we're downgrading, we must hold the write lock.
161 		 */
162 		if ((rwl->rwl_owner & RWLOCK_WRLOCK) == 0)
163 			panic("rw_enter: %s downgrade of non-write lock",
164 			    rwl->rwl_name);
165 		if (RW_PROC(curproc) != RW_PROC(rwl->rwl_owner))
166 			panic("rw_enter: %s downgrade, not holder",
167 			    rwl->rwl_name);
168 		break;
169 
170 	default:
171 		panic("rw_enter: unknown op 0x%x", flags);
172 	}
173 }
174 
175 #else
176 #define rw_enter_diag(r, f)
177 #endif
178 
179 void
180 rw_init(struct rwlock *rwl, const char *name)
181 {
182 	rwl->rwl_owner = 0;
183 	rwl->rwl_name = name;
184 }
185 
186 int
187 rw_enter(struct rwlock *rwl, int flags)
188 {
189 	const struct rwlock_op *op;
190 	struct sleep_state sls;
191 	unsigned long inc, o;
192 	int error;
193 
194 	op = &rw_ops[flags & RW_OPMASK];
195 
196 	inc = op->inc + RW_PROC(curproc) * op->proc_mult;
197 retry:
198 	while (__predict_false(((o = rwl->rwl_owner) & op->check) != 0)) {
199 		unsigned long set = o | op->wait_set;
200 		int do_sleep;
201 
202 		rw_enter_diag(rwl, flags);
203 
204 		if (flags & RW_NOSLEEP)
205 			return (EBUSY);
206 
207 		sleep_setup(&sls, rwl, op->wait_prio, rwl->rwl_name);
208 		if (flags & RW_INTR)
209 			sleep_setup_signal(&sls, op->wait_prio | PCATCH);
210 
211 		do_sleep = !rw_cas(&rwl->rwl_owner, o, set);
212 
213 		sleep_finish(&sls, do_sleep);
214 		if ((flags & RW_INTR) &&
215 		    (error = sleep_finish_signal(&sls)) != 0)
216 			return (error);
217 		if (flags & RW_SLEEPFAIL)
218 			return (EAGAIN);
219 	}
220 
221 	if (__predict_false(rw_cas(&rwl->rwl_owner, o, o + inc)))
222 		goto retry;
223 
224 	/*
225 	 * If old lock had RWLOCK_WAIT and RWLOCK_WRLOCK set, it means we
226 	 * downgraded a write lock and had possible read waiter, wake them
227 	 * to let them retry the lock.
228 	 */
229 	if (__predict_false((o & (RWLOCK_WRLOCK|RWLOCK_WAIT)) ==
230 	    (RWLOCK_WRLOCK|RWLOCK_WAIT)))
231 		wakeup(rwl);
232 
233 	return (0);
234 }
235 
236 void
237 rw_exit(struct rwlock *rwl)
238 {
239 	unsigned long owner = rwl->rwl_owner;
240 	int wrlock = owner & RWLOCK_WRLOCK;
241 	unsigned long set;
242 
243 	if (wrlock)
244 		rw_assert_wrlock(rwl);
245 	else
246 		rw_assert_rdlock(rwl);
247 
248 	do {
249 		owner = rwl->rwl_owner;
250 		if (wrlock)
251 			set = 0;
252 		else
253 			set = (owner - RWLOCK_READ_INCR) &
254 				~(RWLOCK_WAIT|RWLOCK_WRWANT);
255 	} while (rw_cas(&rwl->rwl_owner, owner, set));
256 
257 	if (owner & RWLOCK_WAIT)
258 		wakeup(rwl);
259 }
260 
261 #ifdef DIAGNOSTIC
262 void
263 rw_assert_wrlock(struct rwlock *rwl)
264 {
265 	if (!(rwl->rwl_owner & RWLOCK_WRLOCK))
266 		panic("%s: lock not held", rwl->rwl_name);
267 
268 	if (RWLOCK_OWNER(rwl) != (struct proc *)RW_PROC(curproc))
269 		panic("%s: lock not held by this process", rwl->rwl_name);
270 }
271 
272 void
273 rw_assert_rdlock(struct rwlock *rwl)
274 {
275 	if (!RWLOCK_OWNER(rwl) || (rwl->rwl_owner & RWLOCK_WRLOCK))
276 		panic("%s: lock not shared", rwl->rwl_name);
277 }
278 
279 void
280 rw_assert_unlocked(struct rwlock *rwl)
281 {
282 	if (rwl->rwl_owner != 0L)
283 		panic("%s: lock held", rwl->rwl_name);
284 }
285 #endif
286