xref: /netbsd-src/external/cddl/osnet/dist/uts/common/fs/zfs/rrwlock.c (revision ba2539a9805a0544ff82c0003cc02fe1eee5603d)
1c1cb2cd8Shaad /*
2c1cb2cd8Shaad  * CDDL HEADER START
3c1cb2cd8Shaad  *
4c1cb2cd8Shaad  * The contents of this file are subject to the terms of the
5c1cb2cd8Shaad  * Common Development and Distribution License (the "License").
6c1cb2cd8Shaad  * You may not use this file except in compliance with the License.
7c1cb2cd8Shaad  *
8c1cb2cd8Shaad  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9c1cb2cd8Shaad  * or http://www.opensolaris.org/os/licensing.
10c1cb2cd8Shaad  * See the License for the specific language governing permissions
11c1cb2cd8Shaad  * and limitations under the License.
12c1cb2cd8Shaad  *
13c1cb2cd8Shaad  * When distributing Covered Code, include this CDDL HEADER in each
14c1cb2cd8Shaad  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15c1cb2cd8Shaad  * If applicable, add the following below this CDDL HEADER, with the
16c1cb2cd8Shaad  * fields enclosed by brackets "[]" replaced with your own identifying
17c1cb2cd8Shaad  * information: Portions Copyright [yyyy] [name of copyright owner]
18c1cb2cd8Shaad  *
19c1cb2cd8Shaad  * CDDL HEADER END
20c1cb2cd8Shaad  */
21c1cb2cd8Shaad /*
22a252d550Shaad  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23c1cb2cd8Shaad  * Use is subject to license terms.
24c1cb2cd8Shaad  */
25*ba2539a9Schs /*
26*ba2539a9Schs  * Copyright (c) 2012 by Delphix. All rights reserved.
27*ba2539a9Schs  */
28c1cb2cd8Shaad 
29c1cb2cd8Shaad #include <sys/refcount.h>
30c1cb2cd8Shaad #include <sys/rrwlock.h>
31c1cb2cd8Shaad 
32c1cb2cd8Shaad /*
33c1cb2cd8Shaad  * This file contains the implementation of a re-entrant read
34c1cb2cd8Shaad  * reader/writer lock (aka "rrwlock").
35c1cb2cd8Shaad  *
36c1cb2cd8Shaad  * This is a normal reader/writer lock with the additional feature
37c1cb2cd8Shaad  * of allowing threads who have already obtained a read lock to
38c1cb2cd8Shaad  * re-enter another read lock (re-entrant read) - even if there are
39c1cb2cd8Shaad  * waiting writers.
40c1cb2cd8Shaad  *
41c1cb2cd8Shaad  * Callers who have not obtained a read lock give waiting writers priority.
42c1cb2cd8Shaad  *
43c1cb2cd8Shaad  * The rrwlock_t lock does not allow re-entrant writers, nor does it
44c1cb2cd8Shaad  * allow a re-entrant mix of reads and writes (that is, it does not
45c1cb2cd8Shaad  * allow a caller who has already obtained a read lock to be able to
46c1cb2cd8Shaad  * then grab a write lock without first dropping all read locks, and
47c1cb2cd8Shaad  * vice versa).
48c1cb2cd8Shaad  *
49c1cb2cd8Shaad  * The rrwlock_t uses tsd (thread specific data) to keep a list of
50c1cb2cd8Shaad  * nodes (rrw_node_t), where each node keeps track of which specific
51c1cb2cd8Shaad  * lock (rrw_node_t::rn_rrl) the thread has grabbed.  Since re-entering
52c1cb2cd8Shaad  * should be rare, a thread that grabs multiple reads on the same rrwlock_t
53c1cb2cd8Shaad  * will store multiple rrw_node_ts of the same 'rrn_rrl'. Nodes on the
54c1cb2cd8Shaad  * tsd list can represent a different rrwlock_t.  This allows a thread
55c1cb2cd8Shaad  * to enter multiple and unique rrwlock_ts for read locks at the same time.
56c1cb2cd8Shaad  *
57c1cb2cd8Shaad  * Since using tsd exposes some overhead, the rrwlock_t only needs to
58c1cb2cd8Shaad  * keep tsd data when writers are waiting.  If no writers are waiting, then
59c1cb2cd8Shaad  * a reader just bumps the anonymous read count (rr_anon_rcount) - no tsd
60c1cb2cd8Shaad  * is needed.  Once a writer attempts to grab the lock, readers then
61c1cb2cd8Shaad  * keep tsd data and bump the linked readers count (rr_linked_rcount).
62c1cb2cd8Shaad  *
63c1cb2cd8Shaad  * If there are waiting writers and there are anonymous readers, then a
64c1cb2cd8Shaad  * reader doesn't know if it is a re-entrant lock. But since it may be one,
65c1cb2cd8Shaad  * we allow the read to proceed (otherwise it could deadlock).  Since once
66c1cb2cd8Shaad  * waiting writers are active, readers no longer bump the anonymous count,
67c1cb2cd8Shaad  * the anonymous readers will eventually flush themselves out.  At this point,
68c1cb2cd8Shaad  * readers will be able to tell if they are a re-entrant lock (have a
69c1cb2cd8Shaad  * rrw_node_t entry for the lock) or not. If they are a re-entrant lock, then
70c1cb2cd8Shaad  * we must let the proceed.  If they are not, then the reader blocks for the
71c1cb2cd8Shaad  * waiting writers.  Hence, we do not starve writers.
72c1cb2cd8Shaad  */
73c1cb2cd8Shaad 
74c1cb2cd8Shaad /* global key for TSD */
75c1cb2cd8Shaad uint_t rrw_tsd_key;
76c1cb2cd8Shaad 
77c1cb2cd8Shaad typedef struct rrw_node {
78c1cb2cd8Shaad 	struct rrw_node *rn_next;
79c1cb2cd8Shaad 	rrwlock_t *rn_rrl;
80*ba2539a9Schs 	void *rn_tag;
81c1cb2cd8Shaad } rrw_node_t;
82c1cb2cd8Shaad 
83c1cb2cd8Shaad static rrw_node_t *
rrn_find(rrwlock_t * rrl)84c1cb2cd8Shaad rrn_find(rrwlock_t *rrl)
85c1cb2cd8Shaad {
86c1cb2cd8Shaad 	rrw_node_t *rn;
87c1cb2cd8Shaad 
88c1cb2cd8Shaad 	if (refcount_count(&rrl->rr_linked_rcount) == 0)
89c1cb2cd8Shaad 		return (NULL);
90c1cb2cd8Shaad 
91c1cb2cd8Shaad 	for (rn = tsd_get(rrw_tsd_key); rn != NULL; rn = rn->rn_next) {
92c1cb2cd8Shaad 		if (rn->rn_rrl == rrl)
93c1cb2cd8Shaad 			return (rn);
94c1cb2cd8Shaad 	}
95c1cb2cd8Shaad 	return (NULL);
96c1cb2cd8Shaad }
97c1cb2cd8Shaad 
98c1cb2cd8Shaad /*
99c1cb2cd8Shaad  * Add a node to the head of the singly linked list.
100c1cb2cd8Shaad  */
101c1cb2cd8Shaad static void
rrn_add(rrwlock_t * rrl,void * tag)102*ba2539a9Schs rrn_add(rrwlock_t *rrl, void *tag)
103c1cb2cd8Shaad {
104c1cb2cd8Shaad 	rrw_node_t *rn;
105c1cb2cd8Shaad 
106c1cb2cd8Shaad 	rn = kmem_alloc(sizeof (*rn), KM_SLEEP);
107c1cb2cd8Shaad 	rn->rn_rrl = rrl;
108c1cb2cd8Shaad 	rn->rn_next = tsd_get(rrw_tsd_key);
109*ba2539a9Schs 	rn->rn_tag = tag;
110c1cb2cd8Shaad 	VERIFY(tsd_set(rrw_tsd_key, rn) == 0);
111c1cb2cd8Shaad }
112c1cb2cd8Shaad 
113c1cb2cd8Shaad /*
114c1cb2cd8Shaad  * If a node is found for 'rrl', then remove the node from this
115c1cb2cd8Shaad  * thread's list and return TRUE; otherwise return FALSE.
116c1cb2cd8Shaad  */
117c1cb2cd8Shaad static boolean_t
rrn_find_and_remove(rrwlock_t * rrl,void * tag)118*ba2539a9Schs rrn_find_and_remove(rrwlock_t *rrl, void *tag)
119c1cb2cd8Shaad {
120c1cb2cd8Shaad 	rrw_node_t *rn;
121c1cb2cd8Shaad 	rrw_node_t *prev = NULL;
122c1cb2cd8Shaad 
123c1cb2cd8Shaad 	if (refcount_count(&rrl->rr_linked_rcount) == 0)
1246a125a39Shaad 		return (B_FALSE);
125c1cb2cd8Shaad 
126c1cb2cd8Shaad 	for (rn = tsd_get(rrw_tsd_key); rn != NULL; rn = rn->rn_next) {
127*ba2539a9Schs 		if (rn->rn_rrl == rrl && rn->rn_tag == tag) {
128c1cb2cd8Shaad 			if (prev)
129c1cb2cd8Shaad 				prev->rn_next = rn->rn_next;
130c1cb2cd8Shaad 			else
131c1cb2cd8Shaad 				VERIFY(tsd_set(rrw_tsd_key, rn->rn_next) == 0);
132c1cb2cd8Shaad 			kmem_free(rn, sizeof (*rn));
133c1cb2cd8Shaad 			return (B_TRUE);
134c1cb2cd8Shaad 		}
135c1cb2cd8Shaad 		prev = rn;
136c1cb2cd8Shaad 	}
137c1cb2cd8Shaad 	return (B_FALSE);
138c1cb2cd8Shaad }
139c1cb2cd8Shaad 
140c1cb2cd8Shaad void
rrw_init(rrwlock_t * rrl,boolean_t track_all)141*ba2539a9Schs rrw_init(rrwlock_t *rrl, boolean_t track_all)
142c1cb2cd8Shaad {
143c1cb2cd8Shaad 	mutex_init(&rrl->rr_lock, NULL, MUTEX_DEFAULT, NULL);
144c1cb2cd8Shaad 	cv_init(&rrl->rr_cv, NULL, CV_DEFAULT, NULL);
145c1cb2cd8Shaad 	rrl->rr_writer = NULL;
146c1cb2cd8Shaad 	refcount_create(&rrl->rr_anon_rcount);
147c1cb2cd8Shaad 	refcount_create(&rrl->rr_linked_rcount);
148c1cb2cd8Shaad 	rrl->rr_writer_wanted = B_FALSE;
149*ba2539a9Schs 	rrl->rr_track_all = track_all;
150c1cb2cd8Shaad }
151c1cb2cd8Shaad 
152c1cb2cd8Shaad void
rrw_destroy(rrwlock_t * rrl)153c1cb2cd8Shaad rrw_destroy(rrwlock_t *rrl)
154c1cb2cd8Shaad {
155c1cb2cd8Shaad 	mutex_destroy(&rrl->rr_lock);
156c1cb2cd8Shaad 	cv_destroy(&rrl->rr_cv);
157c1cb2cd8Shaad 	ASSERT(rrl->rr_writer == NULL);
158c1cb2cd8Shaad 	refcount_destroy(&rrl->rr_anon_rcount);
159c1cb2cd8Shaad 	refcount_destroy(&rrl->rr_linked_rcount);
160c1cb2cd8Shaad }
161c1cb2cd8Shaad 
162c1cb2cd8Shaad static void
rrw_enter_read_impl(rrwlock_t * rrl,boolean_t prio,void * tag)163*ba2539a9Schs rrw_enter_read_impl(rrwlock_t *rrl, boolean_t prio, void *tag)
164c1cb2cd8Shaad {
165c1cb2cd8Shaad 	mutex_enter(&rrl->rr_lock);
166a252d550Shaad #if !defined(DEBUG) && defined(_KERNEL)
167*ba2539a9Schs 	if (rrl->rr_writer == NULL && !rrl->rr_writer_wanted &&
168*ba2539a9Schs 	    !rrl->rr_track_all) {
169a252d550Shaad 		rrl->rr_anon_rcount.rc_count++;
170a252d550Shaad 		mutex_exit(&rrl->rr_lock);
171a252d550Shaad 		return;
172a252d550Shaad 	}
173a252d550Shaad 	DTRACE_PROBE(zfs__rrwfastpath__rdmiss);
174a252d550Shaad #endif
175c1cb2cd8Shaad 	ASSERT(rrl->rr_writer != curthread);
176c1cb2cd8Shaad 	ASSERT(refcount_count(&rrl->rr_anon_rcount) >= 0);
177c1cb2cd8Shaad 
178*ba2539a9Schs 	while (rrl->rr_writer != NULL || (rrl->rr_writer_wanted &&
179*ba2539a9Schs 	    refcount_is_zero(&rrl->rr_anon_rcount) && !prio &&
180c1cb2cd8Shaad 	    rrn_find(rrl) == NULL))
181c1cb2cd8Shaad 		cv_wait(&rrl->rr_cv, &rrl->rr_lock);
182c1cb2cd8Shaad 
183*ba2539a9Schs 	if (rrl->rr_writer_wanted || rrl->rr_track_all) {
184c1cb2cd8Shaad 		/* may or may not be a re-entrant enter */
185*ba2539a9Schs 		rrn_add(rrl, tag);
186c1cb2cd8Shaad 		(void) refcount_add(&rrl->rr_linked_rcount, tag);
187c1cb2cd8Shaad 	} else {
188c1cb2cd8Shaad 		(void) refcount_add(&rrl->rr_anon_rcount, tag);
189c1cb2cd8Shaad 	}
190c1cb2cd8Shaad 	ASSERT(rrl->rr_writer == NULL);
191c1cb2cd8Shaad 	mutex_exit(&rrl->rr_lock);
192c1cb2cd8Shaad }
193c1cb2cd8Shaad 
194*ba2539a9Schs void
rrw_enter_read(rrwlock_t * rrl,void * tag)195*ba2539a9Schs rrw_enter_read(rrwlock_t *rrl, void *tag)
196*ba2539a9Schs {
197*ba2539a9Schs 	rrw_enter_read_impl(rrl, B_FALSE, tag);
198*ba2539a9Schs }
199*ba2539a9Schs 
200*ba2539a9Schs /*
201*ba2539a9Schs  * take a read lock even if there are pending write lock requests. if we want
202*ba2539a9Schs  * to take a lock reentrantly, but from different threads (that have a
203*ba2539a9Schs  * relationship to each other), the normal detection mechanism to overrule
204*ba2539a9Schs  * the pending writer does not work, so we have to give an explicit hint here.
205*ba2539a9Schs  */
206*ba2539a9Schs void
rrw_enter_read_prio(rrwlock_t * rrl,void * tag)207*ba2539a9Schs rrw_enter_read_prio(rrwlock_t *rrl, void *tag)
208*ba2539a9Schs {
209*ba2539a9Schs 	rrw_enter_read_impl(rrl, B_TRUE, tag);
210*ba2539a9Schs }
211*ba2539a9Schs 
212*ba2539a9Schs 
213*ba2539a9Schs void
rrw_enter_write(rrwlock_t * rrl)214c1cb2cd8Shaad rrw_enter_write(rrwlock_t *rrl)
215c1cb2cd8Shaad {
216c1cb2cd8Shaad 	mutex_enter(&rrl->rr_lock);
217c1cb2cd8Shaad 	ASSERT(rrl->rr_writer != curthread);
218c1cb2cd8Shaad 
219c1cb2cd8Shaad 	while (refcount_count(&rrl->rr_anon_rcount) > 0 ||
220c1cb2cd8Shaad 	    refcount_count(&rrl->rr_linked_rcount) > 0 ||
221c1cb2cd8Shaad 	    rrl->rr_writer != NULL) {
222c1cb2cd8Shaad 		rrl->rr_writer_wanted = B_TRUE;
223c1cb2cd8Shaad 		cv_wait(&rrl->rr_cv, &rrl->rr_lock);
224c1cb2cd8Shaad 	}
225c1cb2cd8Shaad 	rrl->rr_writer_wanted = B_FALSE;
226c1cb2cd8Shaad 	rrl->rr_writer = curthread;
227c1cb2cd8Shaad 	mutex_exit(&rrl->rr_lock);
228c1cb2cd8Shaad }
229c1cb2cd8Shaad 
230c1cb2cd8Shaad void
rrw_enter(rrwlock_t * rrl,krw_t rw,void * tag)231c1cb2cd8Shaad rrw_enter(rrwlock_t *rrl, krw_t rw, void *tag)
232c1cb2cd8Shaad {
233c1cb2cd8Shaad 	if (rw == RW_READER)
234c1cb2cd8Shaad 		rrw_enter_read(rrl, tag);
235c1cb2cd8Shaad 	else
236c1cb2cd8Shaad 		rrw_enter_write(rrl);
237c1cb2cd8Shaad }
238c1cb2cd8Shaad 
239c1cb2cd8Shaad void
rrw_exit(rrwlock_t * rrl,void * tag)240c1cb2cd8Shaad rrw_exit(rrwlock_t *rrl, void *tag)
241c1cb2cd8Shaad {
242c1cb2cd8Shaad 	mutex_enter(&rrl->rr_lock);
243a252d550Shaad #if !defined(DEBUG) && defined(_KERNEL)
244a252d550Shaad 	if (!rrl->rr_writer && rrl->rr_linked_rcount.rc_count == 0) {
245a252d550Shaad 		rrl->rr_anon_rcount.rc_count--;
246a252d550Shaad 		if (rrl->rr_anon_rcount.rc_count == 0)
247a252d550Shaad 			cv_broadcast(&rrl->rr_cv);
248a252d550Shaad 		mutex_exit(&rrl->rr_lock);
249a252d550Shaad 		return;
250a252d550Shaad 	}
251a252d550Shaad 	DTRACE_PROBE(zfs__rrwfastpath__exitmiss);
252a252d550Shaad #endif
253c1cb2cd8Shaad 	ASSERT(!refcount_is_zero(&rrl->rr_anon_rcount) ||
254c1cb2cd8Shaad 	    !refcount_is_zero(&rrl->rr_linked_rcount) ||
255c1cb2cd8Shaad 	    rrl->rr_writer != NULL);
256c1cb2cd8Shaad 
257c1cb2cd8Shaad 	if (rrl->rr_writer == NULL) {
258a252d550Shaad 		int64_t count;
259*ba2539a9Schs 		if (rrn_find_and_remove(rrl, tag)) {
260a252d550Shaad 			count = refcount_remove(&rrl->rr_linked_rcount, tag);
261*ba2539a9Schs 		} else {
262*ba2539a9Schs 			ASSERT(!rrl->rr_track_all);
263a252d550Shaad 			count = refcount_remove(&rrl->rr_anon_rcount, tag);
264*ba2539a9Schs 		}
265a252d550Shaad 		if (count == 0)
266c1cb2cd8Shaad 			cv_broadcast(&rrl->rr_cv);
267c1cb2cd8Shaad 	} else {
268c1cb2cd8Shaad 		ASSERT(rrl->rr_writer == curthread);
269c1cb2cd8Shaad 		ASSERT(refcount_is_zero(&rrl->rr_anon_rcount) &&
270c1cb2cd8Shaad 		    refcount_is_zero(&rrl->rr_linked_rcount));
271c1cb2cd8Shaad 		rrl->rr_writer = NULL;
272c1cb2cd8Shaad 		cv_broadcast(&rrl->rr_cv);
273c1cb2cd8Shaad 	}
274c1cb2cd8Shaad 	mutex_exit(&rrl->rr_lock);
275c1cb2cd8Shaad }
276c1cb2cd8Shaad 
277*ba2539a9Schs /*
278*ba2539a9Schs  * If the lock was created with track_all, rrw_held(RW_READER) will return
279*ba2539a9Schs  * B_TRUE iff the current thread has the lock for reader.  Otherwise it may
280*ba2539a9Schs  * return B_TRUE if any thread has the lock for reader.
281*ba2539a9Schs  */
282c1cb2cd8Shaad boolean_t
rrw_held(rrwlock_t * rrl,krw_t rw)283c1cb2cd8Shaad rrw_held(rrwlock_t *rrl, krw_t rw)
284c1cb2cd8Shaad {
285c1cb2cd8Shaad 	boolean_t held;
286c1cb2cd8Shaad 
287c1cb2cd8Shaad 	mutex_enter(&rrl->rr_lock);
288c1cb2cd8Shaad 	if (rw == RW_WRITER) {
289c1cb2cd8Shaad 		held = (rrl->rr_writer == curthread);
290c1cb2cd8Shaad 	} else {
291c1cb2cd8Shaad 		held = (!refcount_is_zero(&rrl->rr_anon_rcount) ||
292*ba2539a9Schs 		    rrn_find(rrl) != NULL);
293c1cb2cd8Shaad 	}
294c1cb2cd8Shaad 	mutex_exit(&rrl->rr_lock);
295c1cb2cd8Shaad 
296c1cb2cd8Shaad 	return (held);
297c1cb2cd8Shaad }
298*ba2539a9Schs 
299*ba2539a9Schs void
rrw_tsd_destroy(void * arg)300*ba2539a9Schs rrw_tsd_destroy(void *arg)
301*ba2539a9Schs {
302*ba2539a9Schs 	rrw_node_t *rn = arg;
303*ba2539a9Schs 	if (rn != NULL) {
304*ba2539a9Schs 		panic("thread %p terminating with rrw lock %p held",
305*ba2539a9Schs 		    (void *)curthread, (void *)rn->rn_rrl);
306*ba2539a9Schs 	}
307*ba2539a9Schs }
308*ba2539a9Schs 
309*ba2539a9Schs /*
310*ba2539a9Schs  * A reader-mostly lock implementation, tuning above reader-writer locks
311*ba2539a9Schs  * for hightly parallel read acquisitions, while pessimizing writes.
312*ba2539a9Schs  *
313*ba2539a9Schs  * The idea is to split single busy lock into array of locks, so that
314*ba2539a9Schs  * each reader can lock only one of them for read, depending on result
315*ba2539a9Schs  * of simple hash function.  That proportionally reduces lock congestion.
316*ba2539a9Schs  * Writer same time has to sequentially aquire write on all the locks.
317*ba2539a9Schs  * That makes write aquisition proportionally slower, but in places where
318*ba2539a9Schs  * it is used (filesystem unmount) performance is not critical.
319*ba2539a9Schs  *
320*ba2539a9Schs  * All the functions below are direct wrappers around functions above.
321*ba2539a9Schs  */
322*ba2539a9Schs void
rrm_init(rrmlock_t * rrl,boolean_t track_all)323*ba2539a9Schs rrm_init(rrmlock_t *rrl, boolean_t track_all)
324*ba2539a9Schs {
325*ba2539a9Schs 	int i;
326*ba2539a9Schs 
327*ba2539a9Schs 	for (i = 0; i < RRM_NUM_LOCKS; i++)
328*ba2539a9Schs 		rrw_init(&rrl->locks[i], track_all);
329*ba2539a9Schs }
330*ba2539a9Schs 
331*ba2539a9Schs void
rrm_destroy(rrmlock_t * rrl)332*ba2539a9Schs rrm_destroy(rrmlock_t *rrl)
333*ba2539a9Schs {
334*ba2539a9Schs 	int i;
335*ba2539a9Schs 
336*ba2539a9Schs 	for (i = 0; i < RRM_NUM_LOCKS; i++)
337*ba2539a9Schs 		rrw_destroy(&rrl->locks[i]);
338*ba2539a9Schs }
339*ba2539a9Schs 
340*ba2539a9Schs void
rrm_enter(rrmlock_t * rrl,krw_t rw,void * tag)341*ba2539a9Schs rrm_enter(rrmlock_t *rrl, krw_t rw, void *tag)
342*ba2539a9Schs {
343*ba2539a9Schs 	if (rw == RW_READER)
344*ba2539a9Schs 		rrm_enter_read(rrl, tag);
345*ba2539a9Schs 	else
346*ba2539a9Schs 		rrm_enter_write(rrl);
347*ba2539a9Schs }
348*ba2539a9Schs 
349*ba2539a9Schs /*
350*ba2539a9Schs  * This maps the current thread to a specific lock.  Note that the lock
351*ba2539a9Schs  * must be released by the same thread that acquired it.  We do this
352*ba2539a9Schs  * mapping by taking the thread pointer mod a prime number.  We examine
353*ba2539a9Schs  * only the low 32 bits of the thread pointer, because 32-bit division
354*ba2539a9Schs  * is faster than 64-bit division, and the high 32 bits have little
355*ba2539a9Schs  * entropy anyway.
356*ba2539a9Schs  */
357*ba2539a9Schs #define	RRM_TD_LOCK()	(((uint32_t)(uintptr_t)(curthread)) % RRM_NUM_LOCKS)
358*ba2539a9Schs 
359*ba2539a9Schs void
rrm_enter_read(rrmlock_t * rrl,void * tag)360*ba2539a9Schs rrm_enter_read(rrmlock_t *rrl, void *tag)
361*ba2539a9Schs {
362*ba2539a9Schs 	rrw_enter_read(&rrl->locks[RRM_TD_LOCK()], tag);
363*ba2539a9Schs }
364*ba2539a9Schs 
365*ba2539a9Schs void
rrm_enter_write(rrmlock_t * rrl)366*ba2539a9Schs rrm_enter_write(rrmlock_t *rrl)
367*ba2539a9Schs {
368*ba2539a9Schs 	int i;
369*ba2539a9Schs 
370*ba2539a9Schs 	for (i = 0; i < RRM_NUM_LOCKS; i++)
371*ba2539a9Schs 		rrw_enter_write(&rrl->locks[i]);
372*ba2539a9Schs }
373*ba2539a9Schs 
374*ba2539a9Schs void
rrm_exit(rrmlock_t * rrl,void * tag)375*ba2539a9Schs rrm_exit(rrmlock_t *rrl, void *tag)
376*ba2539a9Schs {
377*ba2539a9Schs 	int i;
378*ba2539a9Schs 
379*ba2539a9Schs 	if (rrl->locks[0].rr_writer == curthread) {
380*ba2539a9Schs 		for (i = 0; i < RRM_NUM_LOCKS; i++)
381*ba2539a9Schs 			rrw_exit(&rrl->locks[i], tag);
382*ba2539a9Schs 	} else {
383*ba2539a9Schs 		rrw_exit(&rrl->locks[RRM_TD_LOCK()], tag);
384*ba2539a9Schs 	}
385*ba2539a9Schs }
386*ba2539a9Schs 
387*ba2539a9Schs boolean_t
rrm_held(rrmlock_t * rrl,krw_t rw)388*ba2539a9Schs rrm_held(rrmlock_t *rrl, krw_t rw)
389*ba2539a9Schs {
390*ba2539a9Schs 	if (rw == RW_WRITER) {
391*ba2539a9Schs 		return (rrw_held(&rrl->locks[0], rw));
392*ba2539a9Schs 	} else {
393*ba2539a9Schs 		return (rrw_held(&rrl->locks[RRM_TD_LOCK()], rw));
394*ba2539a9Schs 	}
395*ba2539a9Schs }
396