Lines Matching +full:re +full:- +full:tuning
9 * or https://opensource.org/licenses/CDDL-1.0.
33 * This file contains the implementation of a re-entrant read
38 * re-enter another read lock (re-entrant read) - even if there are
43 * The rrwlock_t lock does not allow re-entrant writers, nor does it
44 * allow a re-entrant mix of reads and writes (that is, it does not
51 * lock (rrw_node_t::rn_rrl) the thread has grabbed. Since re-entering
59 * a reader just bumps the anonymous read count (rr_anon_rcount) - no tsd
64 * reader doesn't know if it is a re-entrant lock. But since it may be one,
68 * readers will be able to tell if they are a re-entrant lock (have a
69 * rrw_node_t entry for the lock) or not. If they are a re-entrant lock, then
88 if (zfs_refcount_count(&rrl->rr_linked_rcount) == 0) in rrn_find()
91 for (rn = tsd_get(rrw_tsd_key); rn != NULL; rn = rn->rn_next) { in rrn_find()
92 if (rn->rn_rrl == rrl) in rrn_find()
107 rn->rn_rrl = rrl; in rrn_add()
108 rn->rn_next = tsd_get(rrw_tsd_key); in rrn_add()
109 rn->rn_tag = tag; in rrn_add()
123 if (zfs_refcount_count(&rrl->rr_linked_rcount) == 0) in rrn_find_and_remove()
126 for (rn = tsd_get(rrw_tsd_key); rn != NULL; rn = rn->rn_next) { in rrn_find_and_remove()
127 if (rn->rn_rrl == rrl && rn->rn_tag == tag) { in rrn_find_and_remove()
129 prev->rn_next = rn->rn_next; in rrn_find_and_remove()
131 VERIFY(tsd_set(rrw_tsd_key, rn->rn_next) == 0); in rrn_find_and_remove()
143 mutex_init(&rrl->rr_lock, NULL, MUTEX_DEFAULT, NULL); in rrw_init()
144 cv_init(&rrl->rr_cv, NULL, CV_DEFAULT, NULL); in rrw_init()
145 rrl->rr_writer = NULL; in rrw_init()
146 zfs_refcount_create(&rrl->rr_anon_rcount); in rrw_init()
147 zfs_refcount_create(&rrl->rr_linked_rcount); in rrw_init()
148 rrl->rr_writer_wanted = B_FALSE; in rrw_init()
149 rrl->rr_track_all = track_all; in rrw_init()
155 mutex_destroy(&rrl->rr_lock); in rrw_destroy()
156 cv_destroy(&rrl->rr_cv); in rrw_destroy()
157 ASSERT(rrl->rr_writer == NULL); in rrw_destroy()
158 zfs_refcount_destroy(&rrl->rr_anon_rcount); in rrw_destroy()
159 zfs_refcount_destroy(&rrl->rr_linked_rcount); in rrw_destroy()
165 mutex_enter(&rrl->rr_lock); in rrw_enter_read_impl()
167 if (rrl->rr_writer == NULL && !rrl->rr_writer_wanted && in rrw_enter_read_impl()
168 !rrl->rr_track_all) { in rrw_enter_read_impl()
169 rrl->rr_anon_rcount.rc_count++; in rrw_enter_read_impl()
170 mutex_exit(&rrl->rr_lock); in rrw_enter_read_impl()
175 ASSERT(rrl->rr_writer != curthread); in rrw_enter_read_impl()
176 ASSERT(zfs_refcount_count(&rrl->rr_anon_rcount) >= 0); in rrw_enter_read_impl()
178 while (rrl->rr_writer != NULL || (rrl->rr_writer_wanted && in rrw_enter_read_impl()
179 zfs_refcount_is_zero(&rrl->rr_anon_rcount) && !prio && in rrw_enter_read_impl()
181 cv_wait(&rrl->rr_cv, &rrl->rr_lock); in rrw_enter_read_impl()
183 if (rrl->rr_writer_wanted || rrl->rr_track_all) { in rrw_enter_read_impl()
184 /* may or may not be a re-entrant enter */ in rrw_enter_read_impl()
186 (void) zfs_refcount_add(&rrl->rr_linked_rcount, tag); in rrw_enter_read_impl()
188 (void) zfs_refcount_add(&rrl->rr_anon_rcount, tag); in rrw_enter_read_impl()
190 ASSERT(rrl->rr_writer == NULL); in rrw_enter_read_impl()
191 mutex_exit(&rrl->rr_lock); in rrw_enter_read_impl()
216 mutex_enter(&rrl->rr_lock); in rrw_enter_write()
217 ASSERT(rrl->rr_writer != curthread); in rrw_enter_write()
219 while (zfs_refcount_count(&rrl->rr_anon_rcount) > 0 || in rrw_enter_write()
220 zfs_refcount_count(&rrl->rr_linked_rcount) > 0 || in rrw_enter_write()
221 rrl->rr_writer != NULL) { in rrw_enter_write()
222 rrl->rr_writer_wanted = B_TRUE; in rrw_enter_write()
223 cv_wait(&rrl->rr_cv, &rrl->rr_lock); in rrw_enter_write()
225 rrl->rr_writer_wanted = B_FALSE; in rrw_enter_write()
226 rrl->rr_writer = curthread; in rrw_enter_write()
227 mutex_exit(&rrl->rr_lock); in rrw_enter_write()
242 mutex_enter(&rrl->rr_lock); in rrw_exit()
244 if (!rrl->rr_writer && rrl->rr_linked_rcount.rc_count == 0) { in rrw_exit()
245 rrl->rr_anon_rcount.rc_count--; in rrw_exit()
246 if (rrl->rr_anon_rcount.rc_count == 0) in rrw_exit()
247 cv_broadcast(&rrl->rr_cv); in rrw_exit()
248 mutex_exit(&rrl->rr_lock); in rrw_exit()
253 ASSERT(!zfs_refcount_is_zero(&rrl->rr_anon_rcount) || in rrw_exit()
254 !zfs_refcount_is_zero(&rrl->rr_linked_rcount) || in rrw_exit()
255 rrl->rr_writer != NULL); in rrw_exit()
257 if (rrl->rr_writer == NULL) { in rrw_exit()
261 &rrl->rr_linked_rcount, tag); in rrw_exit()
263 ASSERT(!rrl->rr_track_all); in rrw_exit()
264 count = zfs_refcount_remove(&rrl->rr_anon_rcount, tag); in rrw_exit()
267 cv_broadcast(&rrl->rr_cv); in rrw_exit()
269 ASSERT(rrl->rr_writer == curthread); in rrw_exit()
270 ASSERT(zfs_refcount_is_zero(&rrl->rr_anon_rcount) && in rrw_exit()
271 zfs_refcount_is_zero(&rrl->rr_linked_rcount)); in rrw_exit()
272 rrl->rr_writer = NULL; in rrw_exit()
273 cv_broadcast(&rrl->rr_cv); in rrw_exit()
275 mutex_exit(&rrl->rr_lock); in rrw_exit()
288 mutex_enter(&rrl->rr_lock); in rrw_held()
290 held = (rrl->rr_writer == curthread); in rrw_held()
292 held = (!zfs_refcount_is_zero(&rrl->rr_anon_rcount) || in rrw_held()
295 mutex_exit(&rrl->rr_lock); in rrw_held()
306 (void *)curthread, (void *)rn->rn_rrl); in rrw_tsd_destroy()
311 * A reader-mostly lock implementation, tuning above reader-writer locks
329 rrw_init(&rrl->locks[i], track_all); in rrm_init()
338 rrw_destroy(&rrl->locks[i]); in rrm_destroy()
354 * only the low 32 bits of the thread pointer, because 32-bit division
355 * is faster than 64-bit division, and the high 32 bits have little
363 rrw_enter_read(&rrl->locks[RRM_TD_LOCK()], tag); in rrm_enter_read()
372 rrw_enter_write(&rrl->locks[i]); in rrm_enter_write()
380 if (rrl->locks[0].rr_writer == curthread) { in rrm_exit()
382 rrw_exit(&rrl->locks[i], tag); in rrm_exit()
384 rrw_exit(&rrl->locks[RRM_TD_LOCK()], tag); in rrm_exit()
392 return (rrw_held(&rrl->locks[0], rw)); in rrm_held()
394 return (rrw_held(&rrl->locks[RRM_TD_LOCK()], rw)); in rrm_held()