xref: /dflybsd-src/sys/vfs/hammer/hammer_subs.c (revision 91ffdfc56308b48600d5b200bd03a4da6ae6e2e4)
1427e5fc6SMatthew Dillon /*
289cba4dfSMatthew Dillon  * Copyright (c) 2007-2011 The DragonFly Project.  All rights reserved.
3427e5fc6SMatthew Dillon  *
4427e5fc6SMatthew Dillon  * This code is derived from software contributed to The DragonFly Project
5427e5fc6SMatthew Dillon  * by Matthew Dillon <dillon@backplane.com>
6427e5fc6SMatthew Dillon  *
7427e5fc6SMatthew Dillon  * Redistribution and use in source and binary forms, with or without
8427e5fc6SMatthew Dillon  * modification, are permitted provided that the following conditions
9427e5fc6SMatthew Dillon  * are met:
10427e5fc6SMatthew Dillon  *
11427e5fc6SMatthew Dillon  * 1. Redistributions of source code must retain the above copyright
12427e5fc6SMatthew Dillon  *    notice, this list of conditions and the following disclaimer.
13427e5fc6SMatthew Dillon  * 2. Redistributions in binary form must reproduce the above copyright
14427e5fc6SMatthew Dillon  *    notice, this list of conditions and the following disclaimer in
15427e5fc6SMatthew Dillon  *    the documentation and/or other materials provided with the
16427e5fc6SMatthew Dillon  *    distribution.
17427e5fc6SMatthew Dillon  * 3. Neither the name of The DragonFly Project nor the names of its
18427e5fc6SMatthew Dillon  *    contributors may be used to endorse or promote products derived
19427e5fc6SMatthew Dillon  *    from this software without specific, prior written permission.
20427e5fc6SMatthew Dillon  *
21427e5fc6SMatthew Dillon  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22427e5fc6SMatthew Dillon  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23427e5fc6SMatthew Dillon  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24427e5fc6SMatthew Dillon  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25427e5fc6SMatthew Dillon  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26427e5fc6SMatthew Dillon  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27427e5fc6SMatthew Dillon  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28427e5fc6SMatthew Dillon  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29427e5fc6SMatthew Dillon  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30427e5fc6SMatthew Dillon  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31427e5fc6SMatthew Dillon  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32427e5fc6SMatthew Dillon  * SUCH DAMAGE.
33427e5fc6SMatthew Dillon  */
34427e5fc6SMatthew Dillon /*
35427e5fc6SMatthew Dillon  * HAMMER structural locking
36427e5fc6SMatthew Dillon  */
37427e5fc6SMatthew Dillon 
38b45803e3STomohiro Kusumi #include "hammer.h"
39b45803e3STomohiro Kusumi 
40427e5fc6SMatthew Dillon void
hammer_lock_ex_ident(struct hammer_lock * lock,const char * ident)41af209b0fSMatthew Dillon hammer_lock_ex_ident(struct hammer_lock *lock, const char *ident)
42427e5fc6SMatthew Dillon {
43427e5fc6SMatthew Dillon 	thread_t td = curthread;
44899eb297SMatthew Dillon 	u_int lv;
45899eb297SMatthew Dillon 	u_int nlv;
46427e5fc6SMatthew Dillon 
47250aec18SMatthew Dillon 	KKASSERT(lock->refs);
48899eb297SMatthew Dillon 	for (;;) {
49899eb297SMatthew Dillon 		lv = lock->lockval;
50899eb297SMatthew Dillon 
51899eb297SMatthew Dillon 		if (lv == 0) {
52899eb297SMatthew Dillon 			nlv = 1 | HAMMER_LOCKF_EXCLUSIVE;
53899eb297SMatthew Dillon 			if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
54250aec18SMatthew Dillon 				lock->lowner = td;
55899eb297SMatthew Dillon 				break;
56899eb297SMatthew Dillon 			}
57250aec18SMatthew Dillon 		} else if ((lv & HAMMER_LOCKF_EXCLUSIVE) &&
58250aec18SMatthew Dillon 			   lock->lowner == td) {
59899eb297SMatthew Dillon 			nlv = (lv + 1);
60899eb297SMatthew Dillon 			if (atomic_cmpset_int(&lock->lockval, lv, nlv))
61899eb297SMatthew Dillon 				break;
62899eb297SMatthew Dillon 		} else {
637d683b0fSMatthew Dillon 			if (hammer_debug_locks) {
6411605a5cSTomohiro Kusumi 				hdkprintf("held by %p\n", lock->lowner);
657d683b0fSMatthew Dillon 			}
66899eb297SMatthew Dillon 			nlv = lv | HAMMER_LOCKF_WANTED;
677d683b0fSMatthew Dillon 			++hammer_contention_count;
68250aec18SMatthew Dillon 			tsleep_interlock(&lock->lockval, 0);
69899eb297SMatthew Dillon 			if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
70250aec18SMatthew Dillon 				tsleep(&lock->lockval, PINTERLOCKED, ident, 0);
717d683b0fSMatthew Dillon 				if (hammer_debug_locks)
7211605a5cSTomohiro Kusumi 					hdkprintf("try again\n");
73427e5fc6SMatthew Dillon 			}
748cd0a023SMatthew Dillon 		}
75899eb297SMatthew Dillon 	}
76899eb297SMatthew Dillon }
778cd0a023SMatthew Dillon 
788cd0a023SMatthew Dillon /*
798cd0a023SMatthew Dillon  * Try to obtain an exclusive lock
808cd0a023SMatthew Dillon  */
818cd0a023SMatthew Dillon int
hammer_lock_ex_try(struct hammer_lock * lock)828cd0a023SMatthew Dillon hammer_lock_ex_try(struct hammer_lock *lock)
838cd0a023SMatthew Dillon {
848cd0a023SMatthew Dillon 	thread_t td = curthread;
85899eb297SMatthew Dillon 	int error;
86899eb297SMatthew Dillon 	u_int lv;
87899eb297SMatthew Dillon 	u_int nlv;
888cd0a023SMatthew Dillon 
89250aec18SMatthew Dillon 	KKASSERT(lock->refs);
90899eb297SMatthew Dillon 	for (;;) {
91899eb297SMatthew Dillon 		lv = lock->lockval;
92899eb297SMatthew Dillon 
93899eb297SMatthew Dillon 		if (lv == 0) {
94899eb297SMatthew Dillon 			nlv = 1 | HAMMER_LOCKF_EXCLUSIVE;
95899eb297SMatthew Dillon 			if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
96250aec18SMatthew Dillon 				lock->lowner = td;
97899eb297SMatthew Dillon 				error = 0;
98899eb297SMatthew Dillon 				break;
994d75d829SMatthew Dillon 			}
100250aec18SMatthew Dillon 		} else if ((lv & HAMMER_LOCKF_EXCLUSIVE) &&
101250aec18SMatthew Dillon 			   lock->lowner == td) {
102899eb297SMatthew Dillon 			nlv = (lv + 1);
103899eb297SMatthew Dillon 			if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
104899eb297SMatthew Dillon 				error = 0;
105899eb297SMatthew Dillon 				break;
1068cd0a023SMatthew Dillon 			}
107899eb297SMatthew Dillon 		} else {
108899eb297SMatthew Dillon 			error = EAGAIN;
109899eb297SMatthew Dillon 			break;
110899eb297SMatthew Dillon 		}
111899eb297SMatthew Dillon 	}
112899eb297SMatthew Dillon 	return (error);
1138cd0a023SMatthew Dillon }
1148cd0a023SMatthew Dillon 
1154a2796f3SMatthew Dillon /*
1164a2796f3SMatthew Dillon  * Obtain a shared lock
11798da6d8cSMatthew Dillon  *
11898da6d8cSMatthew Dillon  * We do not give pending exclusive locks priority over shared locks as
11998da6d8cSMatthew Dillon  * doing so could lead to a deadlock.
1204a2796f3SMatthew Dillon  */
1218cd0a023SMatthew Dillon void
hammer_lock_sh(struct hammer_lock * lock)1228cd0a023SMatthew Dillon hammer_lock_sh(struct hammer_lock *lock)
1238cd0a023SMatthew Dillon {
124899eb297SMatthew Dillon 	thread_t td = curthread;
125899eb297SMatthew Dillon 	u_int lv;
126899eb297SMatthew Dillon 	u_int nlv;
127250aec18SMatthew Dillon 	const char *ident = "hmrlck";
128899eb297SMatthew Dillon 
129250aec18SMatthew Dillon 	KKASSERT(lock->refs);
130899eb297SMatthew Dillon 	for (;;) {
131899eb297SMatthew Dillon 		lv = lock->lockval;
132899eb297SMatthew Dillon 
133899eb297SMatthew Dillon 		if ((lv & HAMMER_LOCKF_EXCLUSIVE) == 0) {
134899eb297SMatthew Dillon 			nlv = (lv + 1);
135899eb297SMatthew Dillon 			if (atomic_cmpset_int(&lock->lockval, lv, nlv))
136899eb297SMatthew Dillon 				break;
137250aec18SMatthew Dillon 		} else if (lock->lowner == td) {
138899eb297SMatthew Dillon 			/*
139899eb297SMatthew Dillon 			 * Disallowed case, drop into kernel debugger for
140899eb297SMatthew Dillon 			 * now.  A cont continues w/ an exclusive lock.
141899eb297SMatthew Dillon 			 */
142899eb297SMatthew Dillon 			nlv = (lv + 1);
143899eb297SMatthew Dillon 			if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
144fc73edd8SMatthew Dillon 				if (hammer_debug_critical)
145fc73edd8SMatthew Dillon 					Debugger("hammer_lock_sh: holding ex");
146899eb297SMatthew Dillon 				break;
1478cd0a023SMatthew Dillon 			}
148899eb297SMatthew Dillon 		} else {
149899eb297SMatthew Dillon 			nlv = lv | HAMMER_LOCKF_WANTED;
150899eb297SMatthew Dillon 			++hammer_contention_count;
151250aec18SMatthew Dillon 			tsleep_interlock(&lock->lockval, 0);
152250aec18SMatthew Dillon 			if (atomic_cmpset_int(&lock->lockval, lv, nlv))
153250aec18SMatthew Dillon 				tsleep(&lock->lockval, PINTERLOCKED, ident, 0);
154427e5fc6SMatthew Dillon 		}
155899eb297SMatthew Dillon 	}
156899eb297SMatthew Dillon }
157427e5fc6SMatthew Dillon 
15847637bffSMatthew Dillon int
hammer_lock_sh_try(struct hammer_lock * lock)15947637bffSMatthew Dillon hammer_lock_sh_try(struct hammer_lock *lock)
16047637bffSMatthew Dillon {
161899eb297SMatthew Dillon 	thread_t td = curthread;
162899eb297SMatthew Dillon 	u_int lv;
163899eb297SMatthew Dillon 	u_int nlv;
164899eb297SMatthew Dillon 	int error;
165899eb297SMatthew Dillon 
166250aec18SMatthew Dillon 	KKASSERT(lock->refs);
167899eb297SMatthew Dillon 	for (;;) {
168899eb297SMatthew Dillon 		lv = lock->lockval;
169899eb297SMatthew Dillon 
170899eb297SMatthew Dillon 		if ((lv & HAMMER_LOCKF_EXCLUSIVE) == 0) {
171899eb297SMatthew Dillon 			nlv = (lv + 1);
172899eb297SMatthew Dillon 			if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
173899eb297SMatthew Dillon 				error = 0;
174899eb297SMatthew Dillon 				break;
17547637bffSMatthew Dillon 			}
176250aec18SMatthew Dillon 		} else if (lock->lowner == td) {
177899eb297SMatthew Dillon 			/*
178899eb297SMatthew Dillon 			 * Disallowed case, drop into kernel debugger for
179899eb297SMatthew Dillon 			 * now.  A cont continues w/ an exclusive lock.
180899eb297SMatthew Dillon 			 */
181899eb297SMatthew Dillon 			nlv = (lv + 1);
182899eb297SMatthew Dillon 			if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
183fc73edd8SMatthew Dillon 				if (hammer_debug_critical)
184fc73edd8SMatthew Dillon 					Debugger("hammer_lock_sh: holding ex");
185899eb297SMatthew Dillon 				error = 0;
186899eb297SMatthew Dillon 				break;
187899eb297SMatthew Dillon 			}
188899eb297SMatthew Dillon 		} else {
189899eb297SMatthew Dillon 			error = EAGAIN;
190899eb297SMatthew Dillon 			break;
191899eb297SMatthew Dillon 		}
192899eb297SMatthew Dillon 	}
193899eb297SMatthew Dillon 	return (error);
19447637bffSMatthew Dillon }
19547637bffSMatthew Dillon 
1966a37e7e4SMatthew Dillon /*
1976a37e7e4SMatthew Dillon  * Upgrade a shared lock to an exclusively held lock.  This function will
1986a37e7e4SMatthew Dillon  * return EDEADLK If there is more then one shared holder.
1996a37e7e4SMatthew Dillon  *
2006a37e7e4SMatthew Dillon  * No error occurs and no action is taken if the lock is already exclusively
2017aa3b8a6SMatthew Dillon  * held by the caller.  If the lock is not held at all or held exclusively
2027aa3b8a6SMatthew Dillon  * by someone else, this function will panic.
2036a37e7e4SMatthew Dillon  */
2046a37e7e4SMatthew Dillon int
hammer_lock_upgrade(struct hammer_lock * lock,int shcount)205bb29b5d8SMatthew Dillon hammer_lock_upgrade(struct hammer_lock *lock, int shcount)
2066a37e7e4SMatthew Dillon {
207899eb297SMatthew Dillon 	thread_t td = curthread;
208899eb297SMatthew Dillon 	u_int lv;
209899eb297SMatthew Dillon 	u_int nlv;
2106a37e7e4SMatthew Dillon 	int error;
2116a37e7e4SMatthew Dillon 
212899eb297SMatthew Dillon 	for (;;) {
213899eb297SMatthew Dillon 		lv = lock->lockval;
214899eb297SMatthew Dillon 
215bb29b5d8SMatthew Dillon 		if ((lv & ~HAMMER_LOCKF_WANTED) == shcount) {
216899eb297SMatthew Dillon 			nlv = lv | HAMMER_LOCKF_EXCLUSIVE;
217899eb297SMatthew Dillon 			if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
218250aec18SMatthew Dillon 				lock->lowner = td;
2196a37e7e4SMatthew Dillon 				error = 0;
220899eb297SMatthew Dillon 				break;
221899eb297SMatthew Dillon 			}
222899eb297SMatthew Dillon 		} else if (lv & HAMMER_LOCKF_EXCLUSIVE) {
223250aec18SMatthew Dillon 			if (lock->lowner != curthread)
224903fdd05STomohiro Kusumi 				hpanic("illegal state");
2256a37e7e4SMatthew Dillon 			error = 0;
226899eb297SMatthew Dillon 			break;
227899eb297SMatthew Dillon 		} else if ((lv & ~HAMMER_LOCKF_WANTED) == 0) {
228903fdd05STomohiro Kusumi 			hpanic("lock is not held");
2297aa3b8a6SMatthew Dillon 			/* NOT REACHED */
230899eb297SMatthew Dillon 			error = EDEADLK;
231899eb297SMatthew Dillon 			break;
232899eb297SMatthew Dillon 		} else {
233899eb297SMatthew Dillon 			error = EDEADLK;
234899eb297SMatthew Dillon 			break;
2356a37e7e4SMatthew Dillon 		}
236899eb297SMatthew Dillon 	}
2376a37e7e4SMatthew Dillon 	return (error);
2386a37e7e4SMatthew Dillon }
2396a37e7e4SMatthew Dillon 
2406a37e7e4SMatthew Dillon /*
2416a37e7e4SMatthew Dillon  * Downgrade an exclusively held lock to a shared lock.
2426a37e7e4SMatthew Dillon  */
243427e5fc6SMatthew Dillon void
hammer_lock_downgrade(struct hammer_lock * lock,int shcount)244bb29b5d8SMatthew Dillon hammer_lock_downgrade(struct hammer_lock *lock, int shcount)
245427e5fc6SMatthew Dillon {
246f64b567cSSascha Wildner 	thread_t td __debugvar = curthread;
247899eb297SMatthew Dillon 	u_int lv;
248899eb297SMatthew Dillon 	u_int nlv;
249899eb297SMatthew Dillon 
250899eb297SMatthew Dillon 	KKASSERT((lock->lockval & ~HAMMER_LOCKF_WANTED) ==
251bb29b5d8SMatthew Dillon 		 (HAMMER_LOCKF_EXCLUSIVE | shcount));
252250aec18SMatthew Dillon 	KKASSERT(lock->lowner == td);
253899eb297SMatthew Dillon 
254899eb297SMatthew Dillon 	/*
255899eb297SMatthew Dillon 	 * NOTE: Must clear owner before releasing exclusivity
256899eb297SMatthew Dillon 	 */
257250aec18SMatthew Dillon 	lock->lowner = NULL;
258899eb297SMatthew Dillon 
259899eb297SMatthew Dillon 	for (;;) {
260899eb297SMatthew Dillon 		lv = lock->lockval;
261899eb297SMatthew Dillon 		nlv = lv & ~(HAMMER_LOCKF_EXCLUSIVE | HAMMER_LOCKF_WANTED);
262899eb297SMatthew Dillon 		if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
263899eb297SMatthew Dillon 			if (lv & HAMMER_LOCKF_WANTED)
264250aec18SMatthew Dillon 				wakeup(&lock->lockval);
265899eb297SMatthew Dillon 			break;
266427e5fc6SMatthew Dillon 		}
267899eb297SMatthew Dillon 	}
2688cd0a023SMatthew Dillon }
2698cd0a023SMatthew Dillon 
2708cd0a023SMatthew Dillon void
hammer_unlock(struct hammer_lock * lock)2718cd0a023SMatthew Dillon hammer_unlock(struct hammer_lock *lock)
2728cd0a023SMatthew Dillon {
273f64b567cSSascha Wildner 	thread_t td __debugvar = curthread;
274899eb297SMatthew Dillon 	u_int lv;
275899eb297SMatthew Dillon 	u_int nlv;
276899eb297SMatthew Dillon 
277899eb297SMatthew Dillon 	lv = lock->lockval;
278899eb297SMatthew Dillon 	KKASSERT(lv != 0);
279899eb297SMatthew Dillon 	if (lv & HAMMER_LOCKF_EXCLUSIVE)
280250aec18SMatthew Dillon 		KKASSERT(lock->lowner == td);
281899eb297SMatthew Dillon 
282899eb297SMatthew Dillon 	for (;;) {
283899eb297SMatthew Dillon 		lv = lock->lockval;
284899eb297SMatthew Dillon 		nlv = lv & ~(HAMMER_LOCKF_EXCLUSIVE | HAMMER_LOCKF_WANTED);
285899eb297SMatthew Dillon 		if (nlv > 1) {
286899eb297SMatthew Dillon 			nlv = lv - 1;
287899eb297SMatthew Dillon 			if (atomic_cmpset_int(&lock->lockval, lv, nlv))
288899eb297SMatthew Dillon 				break;
289899eb297SMatthew Dillon 		} else if (nlv == 1) {
290899eb297SMatthew Dillon 			nlv = 0;
291899eb297SMatthew Dillon 			if (lv & HAMMER_LOCKF_EXCLUSIVE)
292250aec18SMatthew Dillon 				lock->lowner = NULL;
293899eb297SMatthew Dillon 			if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
294899eb297SMatthew Dillon 				if (lv & HAMMER_LOCKF_WANTED)
295250aec18SMatthew Dillon 					wakeup(&lock->lockval);
296899eb297SMatthew Dillon 				break;
2978cd0a023SMatthew Dillon 			}
2988cd0a023SMatthew Dillon 		} else {
299903fdd05STomohiro Kusumi 			hpanic("lock %p is not held", lock);
3008cd0a023SMatthew Dillon 		}
3018cd0a023SMatthew Dillon 	}
30266325755SMatthew Dillon }
30366325755SMatthew Dillon 
304b3bad96fSMatthew Dillon /*
305b3bad96fSMatthew Dillon  * The calling thread must be holding a shared or exclusive lock.
306b3bad96fSMatthew Dillon  * Returns < 0 if lock is held shared, and > 0 if held exlusively.
307b3bad96fSMatthew Dillon  */
308b3bad96fSMatthew Dillon int
hammer_lock_status(struct hammer_lock * lock)309b3bad96fSMatthew Dillon hammer_lock_status(struct hammer_lock *lock)
310b3bad96fSMatthew Dillon {
311899eb297SMatthew Dillon 	u_int lv = lock->lockval;
312899eb297SMatthew Dillon 
313899eb297SMatthew Dillon 	if (lv & HAMMER_LOCKF_EXCLUSIVE)
314b3bad96fSMatthew Dillon 		return(1);
315899eb297SMatthew Dillon 	else if (lv)
316899eb297SMatthew Dillon 		return(-1);
317903fdd05STomohiro Kusumi 	hpanic("lock must be held: %p", lock);
318b3bad96fSMatthew Dillon }
319b3bad96fSMatthew Dillon 
320250aec18SMatthew Dillon /*
321250aec18SMatthew Dillon  * Bump the ref count for a lock (not the excl/share count, but a separate
322250aec18SMatthew Dillon  * structural reference count).  The CHECK flag will be set on a 0->1
323250aec18SMatthew Dillon  * transition.
324250aec18SMatthew Dillon  *
325250aec18SMatthew Dillon  * This function does nothing to serialize races between multple threads.
326250aec18SMatthew Dillon  * The caller can interlock it later on to deal with serialization.
327250aec18SMatthew Dillon  *
328250aec18SMatthew Dillon  * MPSAFE
329250aec18SMatthew Dillon  */
33066325755SMatthew Dillon void
hammer_ref(struct hammer_lock * lock)33166325755SMatthew Dillon hammer_ref(struct hammer_lock *lock)
33266325755SMatthew Dillon {
333250aec18SMatthew Dillon 	u_int lv;
334250aec18SMatthew Dillon 	u_int nlv;
335250aec18SMatthew Dillon 
336250aec18SMatthew Dillon 	for (;;) {
337250aec18SMatthew Dillon 		lv = lock->refs;
338250aec18SMatthew Dillon 		if ((lv & ~HAMMER_REFS_FLAGS) == 0) {
339250aec18SMatthew Dillon 			nlv = (lv + 1) | HAMMER_REFS_CHECK;
340250aec18SMatthew Dillon 			if (atomic_cmpset_int(&lock->refs, lv, nlv))
341250aec18SMatthew Dillon 				return;
342250aec18SMatthew Dillon 		} else {
343250aec18SMatthew Dillon 			nlv = (lv + 1);
344250aec18SMatthew Dillon 			KKASSERT((int)nlv > 0);
345250aec18SMatthew Dillon 			if (atomic_cmpset_int(&lock->refs, lv, nlv))
346250aec18SMatthew Dillon 				return;
347250aec18SMatthew Dillon 		}
348250aec18SMatthew Dillon 	}
349250aec18SMatthew Dillon 	/* not reached */
35066325755SMatthew Dillon }
35166325755SMatthew Dillon 
352250aec18SMatthew Dillon /*
353250aec18SMatthew Dillon  * Drop the ref count for a lock (not the excl/share count, but a separate
354250aec18SMatthew Dillon  * structural reference count).  The CHECK flag will be cleared on a 1->0
355250aec18SMatthew Dillon  * transition.
356250aec18SMatthew Dillon  *
357250aec18SMatthew Dillon  * This function does nothing to serialize races between multple threads.
358250aec18SMatthew Dillon  *
359250aec18SMatthew Dillon  * MPSAFE
360250aec18SMatthew Dillon  */
36166325755SMatthew Dillon void
hammer_rel(struct hammer_lock * lock)362250aec18SMatthew Dillon hammer_rel(struct hammer_lock *lock)
36366325755SMatthew Dillon {
364250aec18SMatthew Dillon 	u_int lv;
365250aec18SMatthew Dillon 	u_int nlv;
366250aec18SMatthew Dillon 
367250aec18SMatthew Dillon 	for (;;) {
368250aec18SMatthew Dillon 		lv = lock->refs;
369250aec18SMatthew Dillon 		if ((lv & ~HAMMER_REFS_FLAGS) == 1) {
370250aec18SMatthew Dillon 			nlv = (lv - 1) & ~HAMMER_REFS_CHECK;
371250aec18SMatthew Dillon 			if (atomic_cmpset_int(&lock->refs, lv, nlv))
372250aec18SMatthew Dillon 				return;
373250aec18SMatthew Dillon 		} else {
374250aec18SMatthew Dillon 			KKASSERT((int)lv > 0);
375250aec18SMatthew Dillon 			nlv = (lv - 1);
376250aec18SMatthew Dillon 			if (atomic_cmpset_int(&lock->refs, lv, nlv))
377250aec18SMatthew Dillon 				return;
378250aec18SMatthew Dillon 		}
379250aec18SMatthew Dillon 	}
380250aec18SMatthew Dillon 	/* not reached */
381250aec18SMatthew Dillon }
382250aec18SMatthew Dillon 
383250aec18SMatthew Dillon /*
384250aec18SMatthew Dillon  * The hammer_*_interlock() and hammer_*_interlock_done() functions are
385250aec18SMatthew Dillon  * more sophisticated versions which handle MP transition races and block
386250aec18SMatthew Dillon  * when necessary.
387250aec18SMatthew Dillon  *
388250aec18SMatthew Dillon  * hammer_ref_interlock() bumps the ref-count and conditionally acquires
389250aec18SMatthew Dillon  * the interlock for 0->1 transitions or if the CHECK is found to be set.
390250aec18SMatthew Dillon  *
391cdabf053STomohiro Kusumi  * This case will return 1, the interlock will be held, and the CHECK
392250aec18SMatthew Dillon  * bit also set.  Other threads attempting to ref will see the CHECK bit
393250aec18SMatthew Dillon  * and block until we clean up.
394250aec18SMatthew Dillon  *
395cdabf053STomohiro Kusumi  * 0 is returned for transitions other than 0->1 when the CHECK bit
396250aec18SMatthew Dillon  * is not found to be set, or if the function loses the race with another
397250aec18SMatthew Dillon  * thread.
398250aec18SMatthew Dillon  *
399cdabf053STomohiro Kusumi  * 1 is only returned to one thread and the others will block.
400cdabf053STomohiro Kusumi  * Effectively a 1 indicator means 'someone transitioned 0->1
401250aec18SMatthew Dillon  * and you are the first guy to successfully lock it after that, so you
402250aec18SMatthew Dillon  * need to check'.  Due to races the ref-count may be greater than 1 upon
403250aec18SMatthew Dillon  * return.
404250aec18SMatthew Dillon  *
405250aec18SMatthew Dillon  * MPSAFE
406250aec18SMatthew Dillon  */
407250aec18SMatthew Dillon int
hammer_ref_interlock(struct hammer_lock * lock)408250aec18SMatthew Dillon hammer_ref_interlock(struct hammer_lock *lock)
409250aec18SMatthew Dillon {
410250aec18SMatthew Dillon 	u_int lv;
411250aec18SMatthew Dillon 	u_int nlv;
412250aec18SMatthew Dillon 
413250aec18SMatthew Dillon 	/*
414250aec18SMatthew Dillon 	 * Integrated reference count bump, lock, and check, with hot-path.
415250aec18SMatthew Dillon 	 *
416250aec18SMatthew Dillon 	 * (a) Return 1	(+LOCKED, +CHECK)	0->1 transition
417250aec18SMatthew Dillon 	 * (b) Return 0 (-LOCKED, -CHECK)	N->N+1 transition
418250aec18SMatthew Dillon 	 * (c) Break out (+CHECK)		Check condition and Cannot lock
419250aec18SMatthew Dillon 	 * (d) Return 1 (+LOCKED, +CHECK)	Successfully locked
420250aec18SMatthew Dillon 	 */
421250aec18SMatthew Dillon 	for (;;) {
422250aec18SMatthew Dillon 		lv = lock->refs;
423250aec18SMatthew Dillon 		if (lv == 0) {
424250aec18SMatthew Dillon 			nlv = 1 | HAMMER_REFS_LOCKED | HAMMER_REFS_CHECK;
425250aec18SMatthew Dillon 			if (atomic_cmpset_int(&lock->refs, lv, nlv)) {
426250aec18SMatthew Dillon 				lock->rowner = curthread;
427250aec18SMatthew Dillon 				return(1);
428250aec18SMatthew Dillon 			}
429250aec18SMatthew Dillon 		} else {
430250aec18SMatthew Dillon 			nlv = (lv + 1);
431250aec18SMatthew Dillon 			if ((lv & ~HAMMER_REFS_FLAGS) == 0)
432250aec18SMatthew Dillon 				nlv |= HAMMER_REFS_CHECK;
433250aec18SMatthew Dillon 			if ((nlv & HAMMER_REFS_CHECK) == 0) {
434250aec18SMatthew Dillon 				if (atomic_cmpset_int(&lock->refs, lv, nlv))
435250aec18SMatthew Dillon 					return(0);
436250aec18SMatthew Dillon 			} else if (lv & HAMMER_REFS_LOCKED) {
437250aec18SMatthew Dillon 				/* CHECK also set here */
438250aec18SMatthew Dillon 				if (atomic_cmpset_int(&lock->refs, lv, nlv))
439250aec18SMatthew Dillon 					break;
440250aec18SMatthew Dillon 			} else {
441250aec18SMatthew Dillon 				/* CHECK also set here */
442250aec18SMatthew Dillon 				nlv |= HAMMER_REFS_LOCKED;
443250aec18SMatthew Dillon 				if (atomic_cmpset_int(&lock->refs, lv, nlv)) {
444250aec18SMatthew Dillon 					lock->rowner = curthread;
445250aec18SMatthew Dillon 					return(1);
446250aec18SMatthew Dillon 				}
447250aec18SMatthew Dillon 			}
448250aec18SMatthew Dillon 		}
449250aec18SMatthew Dillon 	}
450250aec18SMatthew Dillon 
451250aec18SMatthew Dillon 	/*
452250aec18SMatthew Dillon 	 * Defered check condition because we were unable to acquire the
453250aec18SMatthew Dillon 	 * lock.  We must block until the check condition is cleared due
454250aec18SMatthew Dillon 	 * to a race with another thread, or we are able to acquire the
455250aec18SMatthew Dillon 	 * lock.
456250aec18SMatthew Dillon 	 *
457250aec18SMatthew Dillon 	 * (a) Return 0	(-CHECK)		Another thread handled it
458250aec18SMatthew Dillon 	 * (b) Return 1 (+LOCKED, +CHECK)	We handled it.
459250aec18SMatthew Dillon 	 */
460250aec18SMatthew Dillon 	for (;;) {
461250aec18SMatthew Dillon 		lv = lock->refs;
462250aec18SMatthew Dillon 		if ((lv & HAMMER_REFS_CHECK) == 0)
463250aec18SMatthew Dillon 			return(0);
464250aec18SMatthew Dillon 		if (lv & HAMMER_REFS_LOCKED) {
465250aec18SMatthew Dillon 			tsleep_interlock(&lock->refs, 0);
466250aec18SMatthew Dillon 			nlv = (lv | HAMMER_REFS_WANTED);
467250aec18SMatthew Dillon 			if (atomic_cmpset_int(&lock->refs, lv, nlv))
468250aec18SMatthew Dillon 				tsleep(&lock->refs, PINTERLOCKED, "h1lk", 0);
469250aec18SMatthew Dillon 		} else {
470250aec18SMatthew Dillon 			/* CHECK also set here */
471250aec18SMatthew Dillon 			nlv = lv | HAMMER_REFS_LOCKED;
472250aec18SMatthew Dillon 			if (atomic_cmpset_int(&lock->refs, lv, nlv)) {
473250aec18SMatthew Dillon 				lock->rowner = curthread;
474250aec18SMatthew Dillon 				return(1);
475250aec18SMatthew Dillon 			}
476250aec18SMatthew Dillon 		}
477250aec18SMatthew Dillon 	}
478250aec18SMatthew Dillon 	/* not reached */
479250aec18SMatthew Dillon }
480250aec18SMatthew Dillon 
481250aec18SMatthew Dillon /*
482250aec18SMatthew Dillon  * This is the same as hammer_ref_interlock() but asserts that the
483250aec18SMatthew Dillon  * 0->1 transition is always true, thus the lock must have no references
484250aec18SMatthew Dillon  * on entry or have CHECK set, and will have one reference with the
485250aec18SMatthew Dillon  * interlock held on return.  It must also not be interlocked on entry
486250aec18SMatthew Dillon  * by anyone.
487250aec18SMatthew Dillon  *
488250aec18SMatthew Dillon  * NOTE that CHECK will never be found set when the ref-count is 0.
489250aec18SMatthew Dillon  *
490cdabf053STomohiro Kusumi  * 1 is always returned to match the API for hammer_ref_interlock().
491250aec18SMatthew Dillon  * This function returns with one ref, the lock held, and the CHECK bit set.
492250aec18SMatthew Dillon  */
493250aec18SMatthew Dillon int
hammer_ref_interlock_true(struct hammer_lock * lock)494250aec18SMatthew Dillon hammer_ref_interlock_true(struct hammer_lock *lock)
495250aec18SMatthew Dillon {
496250aec18SMatthew Dillon 	u_int lv;
497250aec18SMatthew Dillon 	u_int nlv;
498250aec18SMatthew Dillon 
499250aec18SMatthew Dillon 	for (;;) {
500250aec18SMatthew Dillon 		lv = lock->refs;
501250aec18SMatthew Dillon 
502250aec18SMatthew Dillon 		if (lv) {
503903fdd05STomohiro Kusumi 			hpanic("bad lock %p %08x", lock, lock->refs);
504250aec18SMatthew Dillon 		}
505250aec18SMatthew Dillon 		nlv = 1 | HAMMER_REFS_LOCKED | HAMMER_REFS_CHECK;
506250aec18SMatthew Dillon 		if (atomic_cmpset_int(&lock->refs, lv, nlv)) {
507250aec18SMatthew Dillon 			lock->rowner = curthread;
508250aec18SMatthew Dillon 			return (1);
509250aec18SMatthew Dillon 		}
510250aec18SMatthew Dillon 	}
511250aec18SMatthew Dillon }
512250aec18SMatthew Dillon 
513250aec18SMatthew Dillon /*
514250aec18SMatthew Dillon  * Unlock the interlock acquired by hammer_ref_interlock() and clear the
515250aec18SMatthew Dillon  * CHECK flag.  The ref-count remains unchanged.
516250aec18SMatthew Dillon  *
517250aec18SMatthew Dillon  * This routine is called in the load path when the load succeeds.
518250aec18SMatthew Dillon  */
519250aec18SMatthew Dillon void
hammer_ref_interlock_done(struct hammer_lock * lock)520250aec18SMatthew Dillon hammer_ref_interlock_done(struct hammer_lock *lock)
521250aec18SMatthew Dillon {
522250aec18SMatthew Dillon 	u_int lv;
523250aec18SMatthew Dillon 	u_int nlv;
524250aec18SMatthew Dillon 
525250aec18SMatthew Dillon 	for (;;) {
526250aec18SMatthew Dillon 		lv = lock->refs;
527250aec18SMatthew Dillon 		nlv = lv & ~HAMMER_REFS_FLAGS;
528250aec18SMatthew Dillon 		if (atomic_cmpset_int(&lock->refs, lv, nlv)) {
529250aec18SMatthew Dillon 			if (lv & HAMMER_REFS_WANTED)
530250aec18SMatthew Dillon 				wakeup(&lock->refs);
531250aec18SMatthew Dillon 			break;
532250aec18SMatthew Dillon 		}
533250aec18SMatthew Dillon 	}
534250aec18SMatthew Dillon }
535250aec18SMatthew Dillon 
536250aec18SMatthew Dillon /*
537250aec18SMatthew Dillon  * hammer_rel_interlock() works a bit differently in that it must
538250aec18SMatthew Dillon  * acquire the lock in tandem with a 1->0 transition.  CHECK is
539250aec18SMatthew Dillon  * not used.
540250aec18SMatthew Dillon  *
541cdabf053STomohiro Kusumi  * 1 is returned on 1->0 transitions with the lock held on return
542cdabf053STomohiro Kusumi  * and 0 is returned otherwise with the lock not held.
543250aec18SMatthew Dillon  *
544250aec18SMatthew Dillon  * It is important to note that the refs are not stable and may
545cdabf053STomohiro Kusumi  * increase while we hold the lock, the 1 indication only means
546250aec18SMatthew Dillon  * that we transitioned 1->0, not necessarily that we stayed at 0.
547250aec18SMatthew Dillon  *
548250aec18SMatthew Dillon  * Another thread bumping refs while we hold the lock will set CHECK,
549250aec18SMatthew Dillon  * causing one of the competing hammer_ref_interlock() calls to
550cdabf053STomohiro Kusumi  * return 1 after we release our lock.
551250aec18SMatthew Dillon  *
552250aec18SMatthew Dillon  * MPSAFE
553250aec18SMatthew Dillon  */
554250aec18SMatthew Dillon int
hammer_rel_interlock(struct hammer_lock * lock,int locked)555250aec18SMatthew Dillon hammer_rel_interlock(struct hammer_lock *lock, int locked)
556250aec18SMatthew Dillon {
557250aec18SMatthew Dillon 	u_int lv;
558250aec18SMatthew Dillon 	u_int nlv;
559250aec18SMatthew Dillon 
560250aec18SMatthew Dillon 	/*
561250aec18SMatthew Dillon 	 * In locked mode (failure/unload path) we release the
562250aec18SMatthew Dillon 	 * ref-count but leave it locked.
563250aec18SMatthew Dillon 	 */
564250aec18SMatthew Dillon 	if (locked) {
565250aec18SMatthew Dillon 		hammer_rel(lock);
566250aec18SMatthew Dillon 		return(1);
567250aec18SMatthew Dillon 	}
568250aec18SMatthew Dillon 
569250aec18SMatthew Dillon 	/*
570250aec18SMatthew Dillon 	 * Integrated reference count drop with LOCKED, plus the hot-path
571250aec18SMatthew Dillon 	 * returns.
572250aec18SMatthew Dillon 	 */
573250aec18SMatthew Dillon 	for (;;) {
574250aec18SMatthew Dillon 		lv = lock->refs;
575250aec18SMatthew Dillon 
576250aec18SMatthew Dillon 		if (lv == 1) {
577250aec18SMatthew Dillon 			nlv = 0 | HAMMER_REFS_LOCKED;
578250aec18SMatthew Dillon 			if (atomic_cmpset_int(&lock->refs, lv, nlv)) {
579250aec18SMatthew Dillon 				lock->rowner = curthread;
580250aec18SMatthew Dillon 				return(1);
581250aec18SMatthew Dillon 			}
582250aec18SMatthew Dillon 		} else if ((lv & ~HAMMER_REFS_FLAGS) == 1) {
583250aec18SMatthew Dillon 			if ((lv & HAMMER_REFS_LOCKED) == 0) {
584250aec18SMatthew Dillon 				nlv = (lv - 1) | HAMMER_REFS_LOCKED;
585250aec18SMatthew Dillon 				if (atomic_cmpset_int(&lock->refs, lv, nlv)) {
586250aec18SMatthew Dillon 					lock->rowner = curthread;
587250aec18SMatthew Dillon 					return(1);
588250aec18SMatthew Dillon 				}
589250aec18SMatthew Dillon 			} else {
590250aec18SMatthew Dillon 				nlv = lv | HAMMER_REFS_WANTED;
591250aec18SMatthew Dillon 				tsleep_interlock(&lock->refs, 0);
592250aec18SMatthew Dillon 				if (atomic_cmpset_int(&lock->refs, lv, nlv)) {
593250aec18SMatthew Dillon 					tsleep(&lock->refs, PINTERLOCKED,
594250aec18SMatthew Dillon 					       "h0lk", 0);
595250aec18SMatthew Dillon 				}
596250aec18SMatthew Dillon 			}
597250aec18SMatthew Dillon 		} else {
598250aec18SMatthew Dillon 			nlv = (lv - 1);
599250aec18SMatthew Dillon 			KKASSERT((int)nlv >= 0);
600250aec18SMatthew Dillon 			if (atomic_cmpset_int(&lock->refs, lv, nlv))
601250aec18SMatthew Dillon 				return(0);
602250aec18SMatthew Dillon 		}
603250aec18SMatthew Dillon 	}
604250aec18SMatthew Dillon 	/* not reached */
605250aec18SMatthew Dillon }
606250aec18SMatthew Dillon 
607250aec18SMatthew Dillon /*
608250aec18SMatthew Dillon  * Unlock the interlock acquired by hammer_rel_interlock().
609250aec18SMatthew Dillon  *
610250aec18SMatthew Dillon  * If orig_locked is non-zero the interlock was originally held prior to
611250aec18SMatthew Dillon  * the hammer_rel_interlock() call and passed through to us.  In this
612250aec18SMatthew Dillon  * case we want to retain the CHECK error state if not transitioning
613250aec18SMatthew Dillon  * to 0.
614250aec18SMatthew Dillon  *
615250aec18SMatthew Dillon  * The code is the same either way so we do not have to conditionalize
616250aec18SMatthew Dillon  * on orig_locked.
617250aec18SMatthew Dillon  */
618250aec18SMatthew Dillon void
hammer_rel_interlock_done(struct hammer_lock * lock,int orig_locked __unused)619250aec18SMatthew Dillon hammer_rel_interlock_done(struct hammer_lock *lock, int orig_locked __unused)
620250aec18SMatthew Dillon {
621250aec18SMatthew Dillon 	u_int lv;
622250aec18SMatthew Dillon 	u_int nlv;
623250aec18SMatthew Dillon 
624250aec18SMatthew Dillon 	for (;;) {
625250aec18SMatthew Dillon 		lv = lock->refs;
626250aec18SMatthew Dillon 		nlv = lv & ~(HAMMER_REFS_LOCKED | HAMMER_REFS_WANTED);
627250aec18SMatthew Dillon 		if ((lv & ~HAMMER_REFS_FLAGS) == 0)
628250aec18SMatthew Dillon 			nlv &= ~HAMMER_REFS_CHECK;
629250aec18SMatthew Dillon 		if (atomic_cmpset_int(&lock->refs, lv, nlv)) {
630250aec18SMatthew Dillon 			if (lv & HAMMER_REFS_WANTED)
631250aec18SMatthew Dillon 				wakeup(&lock->refs);
632250aec18SMatthew Dillon 			break;
633250aec18SMatthew Dillon 		}
634250aec18SMatthew Dillon 	}
635250aec18SMatthew Dillon }
636250aec18SMatthew Dillon 
637250aec18SMatthew Dillon /*
638250aec18SMatthew Dillon  * Acquire the interlock on lock->refs.
639250aec18SMatthew Dillon  *
640cdabf053STomohiro Kusumi  * Return 1 if CHECK is currently set.  Note that CHECK will not
641250aec18SMatthew Dillon  * be set if the reference count is 0, but can get set if this function
642250aec18SMatthew Dillon  * is preceeded by, say, hammer_ref(), or through races with other
643250aec18SMatthew Dillon  * threads.  The return value allows the caller to use the same logic
644250aec18SMatthew Dillon  * as hammer_ref_interlock().
645250aec18SMatthew Dillon  *
646250aec18SMatthew Dillon  * MPSAFE
647250aec18SMatthew Dillon  */
648250aec18SMatthew Dillon int
hammer_get_interlock(struct hammer_lock * lock)649250aec18SMatthew Dillon hammer_get_interlock(struct hammer_lock *lock)
650250aec18SMatthew Dillon {
651250aec18SMatthew Dillon 	u_int lv;
652250aec18SMatthew Dillon 	u_int nlv;
653250aec18SMatthew Dillon 
654250aec18SMatthew Dillon 	for (;;) {
655250aec18SMatthew Dillon 		lv = lock->refs;
656250aec18SMatthew Dillon 		if (lv & HAMMER_REFS_LOCKED) {
657250aec18SMatthew Dillon 			nlv = lv | HAMMER_REFS_WANTED;
658250aec18SMatthew Dillon 			tsleep_interlock(&lock->refs, 0);
659250aec18SMatthew Dillon 			if (atomic_cmpset_int(&lock->refs, lv, nlv))
660250aec18SMatthew Dillon 				tsleep(&lock->refs, PINTERLOCKED, "hilk", 0);
661250aec18SMatthew Dillon 		} else {
662250aec18SMatthew Dillon 			nlv = (lv | HAMMER_REFS_LOCKED);
663250aec18SMatthew Dillon 			if (atomic_cmpset_int(&lock->refs, lv, nlv)) {
664250aec18SMatthew Dillon 				lock->rowner = curthread;
665250aec18SMatthew Dillon 				return((lv & HAMMER_REFS_CHECK) ? 1 : 0);
666250aec18SMatthew Dillon 			}
667250aec18SMatthew Dillon 		}
668250aec18SMatthew Dillon 	}
669250aec18SMatthew Dillon }
670250aec18SMatthew Dillon 
671250aec18SMatthew Dillon /*
672250aec18SMatthew Dillon  * Attempt to acquire the interlock and expect 0 refs.  Used by the buffer
673250aec18SMatthew Dillon  * cache callback code to disassociate or lock the bufs related to HAMMER
674250aec18SMatthew Dillon  * structures.
675250aec18SMatthew Dillon  *
676250aec18SMatthew Dillon  * During teardown the related bp will be acquired by hammer_io_release()
677250aec18SMatthew Dillon  * which interocks our test.
678250aec18SMatthew Dillon  *
679250aec18SMatthew Dillon  * Returns non-zero on success, zero on failure.
680250aec18SMatthew Dillon  */
681250aec18SMatthew Dillon int
hammer_try_interlock_norefs(struct hammer_lock * lock)682250aec18SMatthew Dillon hammer_try_interlock_norefs(struct hammer_lock *lock)
683250aec18SMatthew Dillon {
684250aec18SMatthew Dillon 	u_int lv;
685250aec18SMatthew Dillon 	u_int nlv;
686250aec18SMatthew Dillon 
687250aec18SMatthew Dillon 	for (;;) {
688250aec18SMatthew Dillon 		lv = lock->refs;
689250aec18SMatthew Dillon 		if (lv == 0) {
690250aec18SMatthew Dillon 			nlv = lv | HAMMER_REFS_LOCKED;
691250aec18SMatthew Dillon 			if (atomic_cmpset_int(&lock->refs, lv, nlv)) {
692250aec18SMatthew Dillon 				lock->rowner = curthread;
693250aec18SMatthew Dillon 				return(1);
694250aec18SMatthew Dillon 			}
695250aec18SMatthew Dillon 		} else {
696250aec18SMatthew Dillon 			return(0);
697250aec18SMatthew Dillon 		}
698250aec18SMatthew Dillon 	}
699250aec18SMatthew Dillon 	/* not reached */
700250aec18SMatthew Dillon }
701250aec18SMatthew Dillon 
702250aec18SMatthew Dillon /*
703250aec18SMatthew Dillon  * Release the interlock on lock->refs.  This function will set
704250aec18SMatthew Dillon  * CHECK if the refs is non-zero and error is non-zero, and clear
705250aec18SMatthew Dillon  * CHECK otherwise.
706250aec18SMatthew Dillon  *
707250aec18SMatthew Dillon  * MPSAFE
708250aec18SMatthew Dillon  */
709250aec18SMatthew Dillon void
hammer_put_interlock(struct hammer_lock * lock,int error)710250aec18SMatthew Dillon hammer_put_interlock(struct hammer_lock *lock, int error)
711250aec18SMatthew Dillon {
712250aec18SMatthew Dillon 	u_int lv;
713250aec18SMatthew Dillon 	u_int nlv;
714250aec18SMatthew Dillon 
715250aec18SMatthew Dillon 	for (;;) {
716250aec18SMatthew Dillon 		lv = lock->refs;
717250aec18SMatthew Dillon 		KKASSERT(lv & HAMMER_REFS_LOCKED);
718250aec18SMatthew Dillon 		nlv = lv & ~(HAMMER_REFS_LOCKED | HAMMER_REFS_WANTED);
719250aec18SMatthew Dillon 
720250aec18SMatthew Dillon 		if ((nlv & ~HAMMER_REFS_FLAGS) == 0 || error == 0)
721250aec18SMatthew Dillon 			nlv &= ~HAMMER_REFS_CHECK;
722250aec18SMatthew Dillon 		else
723250aec18SMatthew Dillon 			nlv |= HAMMER_REFS_CHECK;
724250aec18SMatthew Dillon 
725250aec18SMatthew Dillon 		if (atomic_cmpset_int(&lock->refs, lv, nlv)) {
726250aec18SMatthew Dillon 			if (lv & HAMMER_REFS_WANTED)
727250aec18SMatthew Dillon 				wakeup(&lock->refs);
728250aec18SMatthew Dillon 			return;
729250aec18SMatthew Dillon 		}
730250aec18SMatthew Dillon 	}
73166325755SMatthew Dillon }
73266325755SMatthew Dillon 
7332f85fa4dSMatthew Dillon /*
7342f85fa4dSMatthew Dillon  * The sync_lock must be held when doing any modifying operations on
73598da6d8cSMatthew Dillon  * meta-data.  It does not have to be held when modifying non-meta-data buffers
73698da6d8cSMatthew Dillon  * (backend or frontend).
7372f85fa4dSMatthew Dillon  *
73898da6d8cSMatthew Dillon  * The flusher holds the lock exclusively while all other consumers hold it
73998da6d8cSMatthew Dillon  * shared.  All modifying operations made while holding the lock are atomic
74098da6d8cSMatthew Dillon  * in that they will be made part of the same flush group.
74198da6d8cSMatthew Dillon  *
74298da6d8cSMatthew Dillon  * Due to the atomicy requirement deadlock recovery code CANNOT release the
74398da6d8cSMatthew Dillon  * sync lock, nor can we give pending exclusive sync locks priority over
74498da6d8cSMatthew Dillon  * a shared sync lock as this could lead to a 3-way deadlock.
7452f85fa4dSMatthew Dillon  */
7462f85fa4dSMatthew Dillon void
hammer_sync_lock_ex(hammer_transaction_t trans)7472f85fa4dSMatthew Dillon hammer_sync_lock_ex(hammer_transaction_t trans)
7482f85fa4dSMatthew Dillon {
7492f85fa4dSMatthew Dillon 	++trans->sync_lock_refs;
7507538695eSMatthew Dillon 	hammer_lock_ex(&trans->hmp->sync_lock);
7512f85fa4dSMatthew Dillon }
7522f85fa4dSMatthew Dillon 
7532f85fa4dSMatthew Dillon void
hammer_sync_lock_sh(hammer_transaction_t trans)7542f85fa4dSMatthew Dillon hammer_sync_lock_sh(hammer_transaction_t trans)
7552f85fa4dSMatthew Dillon {
7562f85fa4dSMatthew Dillon 	++trans->sync_lock_refs;
7577538695eSMatthew Dillon 	hammer_lock_sh(&trans->hmp->sync_lock);
7582f85fa4dSMatthew Dillon }
7592f85fa4dSMatthew Dillon 
76047637bffSMatthew Dillon int
hammer_sync_lock_sh_try(hammer_transaction_t trans)76147637bffSMatthew Dillon hammer_sync_lock_sh_try(hammer_transaction_t trans)
76247637bffSMatthew Dillon {
76347637bffSMatthew Dillon 	int error;
76447637bffSMatthew Dillon 
76547637bffSMatthew Dillon 	++trans->sync_lock_refs;
76647637bffSMatthew Dillon 	if ((error = hammer_lock_sh_try(&trans->hmp->sync_lock)) != 0)
76747637bffSMatthew Dillon 		--trans->sync_lock_refs;
76847637bffSMatthew Dillon 	return (error);
76947637bffSMatthew Dillon }
77047637bffSMatthew Dillon 
7712f85fa4dSMatthew Dillon void
hammer_sync_unlock(hammer_transaction_t trans)7722f85fa4dSMatthew Dillon hammer_sync_unlock(hammer_transaction_t trans)
7732f85fa4dSMatthew Dillon {
7742f85fa4dSMatthew Dillon 	--trans->sync_lock_refs;
7752f85fa4dSMatthew Dillon 	hammer_unlock(&trans->hmp->sync_lock);
7762f85fa4dSMatthew Dillon }
7772f85fa4dSMatthew Dillon 
7782f85fa4dSMatthew Dillon /*
7792f85fa4dSMatthew Dillon  * Misc
7802f85fa4dSMatthew Dillon  */
78146137e17STomohiro Kusumi uint32_t
hammer_to_unix_xid(hammer_uuid_t * uuid)78290da8fc8STomohiro Kusumi hammer_to_unix_xid(hammer_uuid_t *uuid)
78366325755SMatthew Dillon {
78446137e17STomohiro Kusumi 	return(*(uint32_t *)&uuid->node[2]);
78566325755SMatthew Dillon }
78666325755SMatthew Dillon 
78766325755SMatthew Dillon void
hammer_guid_to_uuid(hammer_uuid_t * uuid,uint32_t guid)78890da8fc8STomohiro Kusumi hammer_guid_to_uuid(hammer_uuid_t *uuid, uint32_t guid)
78966325755SMatthew Dillon {
7908cd0a023SMatthew Dillon 	bzero(uuid, sizeof(*uuid));
79146137e17STomohiro Kusumi 	*(uint32_t *)&uuid->node[2] = guid;
79266325755SMatthew Dillon }
79366325755SMatthew Dillon 
7948cd0a023SMatthew Dillon void
hammer_time_to_timespec(uint64_t xtime,struct timespec * ts)79546137e17STomohiro Kusumi hammer_time_to_timespec(uint64_t xtime, struct timespec *ts)
7968cd0a023SMatthew Dillon {
797ddfdf542SMatthew Dillon 	ts->tv_sec = (unsigned long)(xtime / 1000000);
798ddfdf542SMatthew Dillon 	ts->tv_nsec = (unsigned int)(xtime % 1000000) * 1000L;
7998cd0a023SMatthew Dillon }
8008cd0a023SMatthew Dillon 
80146137e17STomohiro Kusumi uint64_t
hammer_timespec_to_time(struct timespec * ts)802ddfdf542SMatthew Dillon hammer_timespec_to_time(struct timespec *ts)
8038cd0a023SMatthew Dillon {
80446137e17STomohiro Kusumi 	uint64_t xtime;
8058cd0a023SMatthew Dillon 
806ddfdf542SMatthew Dillon 	xtime = (unsigned)(ts->tv_nsec / 1000) +
807ddfdf542SMatthew Dillon 		(unsigned long)ts->tv_sec * 1000000ULL;
808ddfdf542SMatthew Dillon 	return(xtime);
8098cd0a023SMatthew Dillon }
8108cd0a023SMatthew Dillon 
8118cd0a023SMatthew Dillon 
81266325755SMatthew Dillon /*
81366325755SMatthew Dillon  * Convert a HAMMER filesystem object type to a vnode type
81466325755SMatthew Dillon  */
81566325755SMatthew Dillon enum vtype
hammer_get_vnode_type(uint8_t obj_type)81646137e17STomohiro Kusumi hammer_get_vnode_type(uint8_t obj_type)
81766325755SMatthew Dillon {
81866325755SMatthew Dillon 	switch(obj_type) {
81966325755SMatthew Dillon 	case HAMMER_OBJTYPE_DIRECTORY:
82066325755SMatthew Dillon 		return(VDIR);
82166325755SMatthew Dillon 	case HAMMER_OBJTYPE_REGFILE:
82266325755SMatthew Dillon 		return(VREG);
82366325755SMatthew Dillon 	case HAMMER_OBJTYPE_DBFILE:
82466325755SMatthew Dillon 		return(VDATABASE);
82566325755SMatthew Dillon 	case HAMMER_OBJTYPE_FIFO:
82666325755SMatthew Dillon 		return(VFIFO);
827b3bad96fSMatthew Dillon 	case HAMMER_OBJTYPE_SOCKET:
828b3bad96fSMatthew Dillon 		return(VSOCK);
82966325755SMatthew Dillon 	case HAMMER_OBJTYPE_CDEV:
83066325755SMatthew Dillon 		return(VCHR);
83166325755SMatthew Dillon 	case HAMMER_OBJTYPE_BDEV:
83266325755SMatthew Dillon 		return(VBLK);
83366325755SMatthew Dillon 	case HAMMER_OBJTYPE_SOFTLINK:
83466325755SMatthew Dillon 		return(VLNK);
83566325755SMatthew Dillon 	default:
83666325755SMatthew Dillon 		return(VBAD);
83766325755SMatthew Dillon 	}
83866325755SMatthew Dillon 	/* not reached */
83966325755SMatthew Dillon }
84066325755SMatthew Dillon 
8416b4f890bSMatthew Dillon int
hammer_get_dtype(uint8_t obj_type)84246137e17STomohiro Kusumi hammer_get_dtype(uint8_t obj_type)
8436b4f890bSMatthew Dillon {
8446b4f890bSMatthew Dillon 	switch(obj_type) {
8456b4f890bSMatthew Dillon 	case HAMMER_OBJTYPE_DIRECTORY:
8466b4f890bSMatthew Dillon 		return(DT_DIR);
8476b4f890bSMatthew Dillon 	case HAMMER_OBJTYPE_REGFILE:
8486b4f890bSMatthew Dillon 		return(DT_REG);
8496b4f890bSMatthew Dillon 	case HAMMER_OBJTYPE_DBFILE:
8506b4f890bSMatthew Dillon 		return(DT_DBF);
8516b4f890bSMatthew Dillon 	case HAMMER_OBJTYPE_FIFO:
8526b4f890bSMatthew Dillon 		return(DT_FIFO);
853b3bad96fSMatthew Dillon 	case HAMMER_OBJTYPE_SOCKET:
854b3bad96fSMatthew Dillon 		return(DT_SOCK);
8556b4f890bSMatthew Dillon 	case HAMMER_OBJTYPE_CDEV:
8566b4f890bSMatthew Dillon 		return(DT_CHR);
8576b4f890bSMatthew Dillon 	case HAMMER_OBJTYPE_BDEV:
8586b4f890bSMatthew Dillon 		return(DT_BLK);
8596b4f890bSMatthew Dillon 	case HAMMER_OBJTYPE_SOFTLINK:
8606b4f890bSMatthew Dillon 		return(DT_LNK);
8616b4f890bSMatthew Dillon 	default:
8626b4f890bSMatthew Dillon 		return(DT_UNKNOWN);
8636b4f890bSMatthew Dillon 	}
8646b4f890bSMatthew Dillon 	/* not reached */
8656b4f890bSMatthew Dillon }
8666b4f890bSMatthew Dillon 
86746137e17STomohiro Kusumi uint8_t
hammer_get_obj_type(enum vtype vtype)86866325755SMatthew Dillon hammer_get_obj_type(enum vtype vtype)
86966325755SMatthew Dillon {
87066325755SMatthew Dillon 	switch(vtype) {
87166325755SMatthew Dillon 	case VDIR:
87266325755SMatthew Dillon 		return(HAMMER_OBJTYPE_DIRECTORY);
87366325755SMatthew Dillon 	case VREG:
87466325755SMatthew Dillon 		return(HAMMER_OBJTYPE_REGFILE);
87566325755SMatthew Dillon 	case VDATABASE:
87666325755SMatthew Dillon 		return(HAMMER_OBJTYPE_DBFILE);
87766325755SMatthew Dillon 	case VFIFO:
87866325755SMatthew Dillon 		return(HAMMER_OBJTYPE_FIFO);
879b3bad96fSMatthew Dillon 	case VSOCK:
880b3bad96fSMatthew Dillon 		return(HAMMER_OBJTYPE_SOCKET);
88166325755SMatthew Dillon 	case VCHR:
88266325755SMatthew Dillon 		return(HAMMER_OBJTYPE_CDEV);
88366325755SMatthew Dillon 	case VBLK:
88466325755SMatthew Dillon 		return(HAMMER_OBJTYPE_BDEV);
88566325755SMatthew Dillon 	case VLNK:
88666325755SMatthew Dillon 		return(HAMMER_OBJTYPE_SOFTLINK);
88766325755SMatthew Dillon 	default:
88866325755SMatthew Dillon 		return(HAMMER_OBJTYPE_UNKNOWN);
88966325755SMatthew Dillon 	}
89066325755SMatthew Dillon 	/* not reached */
89166325755SMatthew Dillon }
89266325755SMatthew Dillon 
893602c6cb8SMatthew Dillon /*
894602c6cb8SMatthew Dillon  * Return flags for hammer_delete_at_cursor()
895602c6cb8SMatthew Dillon  */
896e63644f0SMatthew Dillon int
hammer_nohistory(hammer_inode_t ip)897e63644f0SMatthew Dillon hammer_nohistory(hammer_inode_t ip)
898e63644f0SMatthew Dillon {
899e63644f0SMatthew Dillon 	if (ip->hmp->hflags & HMNT_NOHISTORY)
900602c6cb8SMatthew Dillon 		return(HAMMER_DELETE_DESTROY);
901e63644f0SMatthew Dillon 	if (ip->ino_data.uflags & (SF_NOHISTORY|UF_NOHISTORY))
902602c6cb8SMatthew Dillon 		return(HAMMER_DELETE_DESTROY);
903e63644f0SMatthew Dillon 	return(0);
904e63644f0SMatthew Dillon }
905e63644f0SMatthew Dillon 
90666325755SMatthew Dillon /*
90789cba4dfSMatthew Dillon  * ALGORITHM VERSION 0:
90866325755SMatthew Dillon  *	Return a namekey hash.   The 64 bit namekey hash consists of a 32 bit
9095e435c92SMatthew Dillon  *	crc in the MSB and 0 in the LSB.  The caller will use the low 32 bits
9105e435c92SMatthew Dillon  *	to generate a unique key and will scan all entries with the same upper
91166325755SMatthew Dillon  *	32 bits when issuing a lookup.
9126b4f890bSMatthew Dillon  *
9135e435c92SMatthew Dillon  *	0hhhhhhhhhhhhhhh hhhhhhhhhhhhhhhh 0000000000000000 0000000000000000
9145e435c92SMatthew Dillon  *
91589cba4dfSMatthew Dillon  * ALGORITHM VERSION 1:
9165e435c92SMatthew Dillon  *
91789cba4dfSMatthew Dillon  *	This algorithm breaks the filename down into a separate 32-bit crcs
91889cba4dfSMatthew Dillon  *	for each filename segment separated by a special character (dot,
91989cba4dfSMatthew Dillon  *	underscore, underline, or tilde).  The CRCs are then added together.
92089cba4dfSMatthew Dillon  *	This allows temporary names.  A full-filename 16 bit crc is also
92189cba4dfSMatthew Dillon  *	generated to deal with degenerate conditions.
9225e435c92SMatthew Dillon  *
92389cba4dfSMatthew Dillon  *	The algorithm is designed to handle create/rename situations such
92489cba4dfSMatthew Dillon  *	that a create with an extention to a rename without an extention
92589cba4dfSMatthew Dillon  *	only shifts the key space rather than randomizes it.
9265e435c92SMatthew Dillon  *
92789cba4dfSMatthew Dillon  *	NOTE: The inode allocator cache can only match 10 bits so we do
92889cba4dfSMatthew Dillon  *	      not really have any room for a partial sorted name, and
92989cba4dfSMatthew Dillon  *	      numbers don't sort well in that situation anyway.
9305e435c92SMatthew Dillon  *
93189cba4dfSMatthew Dillon  *	0mmmmmmmmmmmmmmm mmmmmmmmmmmmmmmm llllllllllllllll 0000000000000000
93289cba4dfSMatthew Dillon  *
9335e435c92SMatthew Dillon  *
9346b4f890bSMatthew Dillon  * We strip bit 63 in order to provide a positive key, this way a seek
9356b4f890bSMatthew Dillon  * offset of 0 will represent the base of the directory.
936b3deaf57SMatthew Dillon  *
93789cba4dfSMatthew Dillon  * We usually strip bit 0 (set it to 0) in order to provide a consistent
93889cba4dfSMatthew Dillon  * iteration space for collisions.
93989cba4dfSMatthew Dillon  *
940b3deaf57SMatthew Dillon  * This function can never return 0.  We use the MSB-0 space to synthesize
941b3deaf57SMatthew Dillon  * artificial directory entries such as "." and "..".
94266325755SMatthew Dillon  */
94366325755SMatthew Dillon int64_t
hammer_direntry_namekey(hammer_inode_t dip,const void * name,int len,uint32_t * max_iterationsp)9446e414d58STomohiro Kusumi hammer_direntry_namekey(hammer_inode_t dip, const void *name, int len,
94546137e17STomohiro Kusumi 			 uint32_t *max_iterationsp)
94666325755SMatthew Dillon {
9475e435c92SMatthew Dillon 	const char *aname = name;
94889cba4dfSMatthew Dillon 	int32_t crcx;
94989cba4dfSMatthew Dillon 	int64_t key;
95089cba4dfSMatthew Dillon 	int i;
95189cba4dfSMatthew Dillon 	int j;
95266325755SMatthew Dillon 
9535e435c92SMatthew Dillon 	switch (dip->ino_data.cap_flags & HAMMER_INODE_CAP_DIRHASH_MASK) {
9545e435c92SMatthew Dillon 	case HAMMER_INODE_CAP_DIRHASH_ALG0:
95589cba4dfSMatthew Dillon 		/*
95689cba4dfSMatthew Dillon 		 * Original algorithm
95789cba4dfSMatthew Dillon 		 */
9585e435c92SMatthew Dillon 		key = (int64_t)(crc32(aname, len) & 0x7FFFFFFF) << 32;
959b3deaf57SMatthew Dillon 		if (key == 0)
960b3deaf57SMatthew Dillon 			key |= 0x100000000LL;
9615e435c92SMatthew Dillon 		*max_iterationsp = 0xFFFFFFFFU;
9625e435c92SMatthew Dillon 		break;
9635e435c92SMatthew Dillon 	case HAMMER_INODE_CAP_DIRHASH_ALG1:
96489cba4dfSMatthew Dillon 		/*
96589cba4dfSMatthew Dillon 		 * Filesystem version 6 or better will create directories
96689cba4dfSMatthew Dillon 		 * using the ALG1 dirhash.  This hash breaks the filename
96789cba4dfSMatthew Dillon 		 * up into domains separated by special characters and
96889cba4dfSMatthew Dillon 		 * hashes each domain independently.
96989cba4dfSMatthew Dillon 		 *
97089cba4dfSMatthew Dillon 		 * We also do a simple sub-sort using the first character
97189cba4dfSMatthew Dillon 		 * of the filename in the top 5-bits.
97289cba4dfSMatthew Dillon 		 */
97389cba4dfSMatthew Dillon 		key = 0;
9745e435c92SMatthew Dillon 
97589cba4dfSMatthew Dillon 		/*
97689cba4dfSMatthew Dillon 		 * m32
97789cba4dfSMatthew Dillon 		 */
97889cba4dfSMatthew Dillon 		crcx = 0;
97989cba4dfSMatthew Dillon 		for (i = j = 0; i < len; ++i) {
98089cba4dfSMatthew Dillon 			if (aname[i] == '.' ||
98189cba4dfSMatthew Dillon 			    aname[i] == '-' ||
98289cba4dfSMatthew Dillon 			    aname[i] == '_' ||
98389cba4dfSMatthew Dillon 			    aname[i] == '~') {
98489cba4dfSMatthew Dillon 				if (i != j)
98589cba4dfSMatthew Dillon 					crcx += crc32(aname + j, i - j);
98689cba4dfSMatthew Dillon 				j = i + 1;
9875e435c92SMatthew Dillon 			}
98889cba4dfSMatthew Dillon 		}
98989cba4dfSMatthew Dillon 		if (i != j)
99089cba4dfSMatthew Dillon 			crcx += crc32(aname + j, i - j);
99189cba4dfSMatthew Dillon 
99289cba4dfSMatthew Dillon #if 0
99389cba4dfSMatthew Dillon 		/*
99489cba4dfSMatthew Dillon 		 * xor top 5 bits 0mmmm into low bits and steal the top 5
99589cba4dfSMatthew Dillon 		 * bits as a semi sub sort using the first character of
99689cba4dfSMatthew Dillon 		 * the filename.  bit 63 is always left as 0 so directory
99789cba4dfSMatthew Dillon 		 * keys are positive numbers.
99889cba4dfSMatthew Dillon 		 */
99989cba4dfSMatthew Dillon 		crcx ^= (uint32_t)crcx >> (32 - 5);
100089cba4dfSMatthew Dillon 		crcx = (crcx & 0x07FFFFFF) | ((aname[0] & 0x0F) << (32 - 5));
100189cba4dfSMatthew Dillon #endif
100289cba4dfSMatthew Dillon 		crcx &= 0x7FFFFFFFU;
100389cba4dfSMatthew Dillon 
100489cba4dfSMatthew Dillon 		key |= (uint64_t)crcx << 32;
100589cba4dfSMatthew Dillon 
100689cba4dfSMatthew Dillon 		/*
100789cba4dfSMatthew Dillon 		 * l16 - crc of entire filename
100889cba4dfSMatthew Dillon 		 *
100989cba4dfSMatthew Dillon 		 * This crc reduces degenerate hash collision conditions
101089cba4dfSMatthew Dillon 		 */
101189cba4dfSMatthew Dillon 		crcx = crc32(aname, len);
101289cba4dfSMatthew Dillon 		crcx = crcx ^ (crcx << 16);
101389cba4dfSMatthew Dillon 		key |= crcx & 0xFFFF0000U;
101489cba4dfSMatthew Dillon 
101589cba4dfSMatthew Dillon 		/*
101689cba4dfSMatthew Dillon 		 * Cleanup
101789cba4dfSMatthew Dillon 		 */
10185e435c92SMatthew Dillon 		if ((key & 0xFFFFFFFF00000000LL) == 0)
10195e435c92SMatthew Dillon 			key |= 0x100000000LL;
10205e435c92SMatthew Dillon 		if (hammer_debug_general & 0x0400) {
102135a5249bSTomohiro Kusumi 			hdkprintf("0x%016jx %*.*s\n",
102235a5249bSTomohiro Kusumi 				(intmax_t)key, len, len, aname);
10235e435c92SMatthew Dillon 		}
10245e435c92SMatthew Dillon 		*max_iterationsp = 0x00FFFFFF;
10255e435c92SMatthew Dillon 		break;
10265e435c92SMatthew Dillon 	case HAMMER_INODE_CAP_DIRHASH_ALG2:
10275e435c92SMatthew Dillon 	case HAMMER_INODE_CAP_DIRHASH_ALG3:
10285e435c92SMatthew Dillon 	default:
10295e435c92SMatthew Dillon 		key = 0;			/* compiler warning */
10305e435c92SMatthew Dillon 		*max_iterationsp = 1;		/* sanity */
1031903fdd05STomohiro Kusumi 		hpanic("bad algorithm %p", dip);
10325e435c92SMatthew Dillon 		break;
10335e435c92SMatthew Dillon 	}
103466325755SMatthew Dillon 	return(key);
1035427e5fc6SMatthew Dillon }
1036427e5fc6SMatthew Dillon 
1037bc6c1f13SMatthew Dillon /*
1038bc6c1f13SMatthew Dillon  * Convert string after @@ (@@ not included) to TID.  Returns 0 on success,
1039bc6c1f13SMatthew Dillon  * EINVAL on failure.
1040bc6c1f13SMatthew Dillon  *
1041bc6c1f13SMatthew Dillon  * If this function fails *ispfs, *tidp, and *localizationp will not
1042bc6c1f13SMatthew Dillon  * be modified.
1043bc6c1f13SMatthew Dillon  */
1044bc6c1f13SMatthew Dillon int
hammer_str_to_tid(const char * str,int * ispfsp,hammer_tid_t * tidp,uint32_t * localizationp)1045bc6c1f13SMatthew Dillon hammer_str_to_tid(const char *str, int *ispfsp,
104646137e17STomohiro Kusumi 		  hammer_tid_t *tidp, uint32_t *localizationp)
1047d113fda1SMatthew Dillon {
1048d113fda1SMatthew Dillon 	hammer_tid_t tid;
104946137e17STomohiro Kusumi 	uint32_t localization;
1050ea434b6fSMatthew Dillon 	char *ptr;
1051bc6c1f13SMatthew Dillon 	int ispfs;
1052bc6c1f13SMatthew Dillon 	int n;
1053d113fda1SMatthew Dillon 
1054bc6c1f13SMatthew Dillon 	/*
1055bc6c1f13SMatthew Dillon 	 * Forms allowed for TID:  "0x%016llx"
1056bc6c1f13SMatthew Dillon 	 *			   "-1"
1057bc6c1f13SMatthew Dillon 	 */
1058ea434b6fSMatthew Dillon 	tid = strtouq(str, &ptr, 0);
1059bc6c1f13SMatthew Dillon 	n = ptr - str;
1060bc6c1f13SMatthew Dillon 	if (n == 2 && str[0] == '-' && str[1] == '1') {
1061bc6c1f13SMatthew Dillon 		/* ok */
1062bc6c1f13SMatthew Dillon 	} else if (n == 18 && str[0] == '0' && (str[1] | 0x20) == 'x') {
1063bc6c1f13SMatthew Dillon 		/* ok */
1064ea434b6fSMatthew Dillon 	} else {
1065bc6c1f13SMatthew Dillon 		return(EINVAL);
1066ea434b6fSMatthew Dillon 	}
1067bc6c1f13SMatthew Dillon 
1068bc6c1f13SMatthew Dillon 	/*
1069bc6c1f13SMatthew Dillon 	 * Forms allowed for PFS:  ":%05d"  (i.e. "...:0" would be illegal).
1070bc6c1f13SMatthew Dillon 	 */
1071bc6c1f13SMatthew Dillon 	str = ptr;
1072bc6c1f13SMatthew Dillon 	if (*str == ':') {
107320cf2291STomohiro Kusumi 		localization = pfs_to_lo(strtoul(str + 1, &ptr, 10));
1074bc6c1f13SMatthew Dillon 		if (ptr - str != 6)
1075bc6c1f13SMatthew Dillon 			return(EINVAL);
1076bc6c1f13SMatthew Dillon 		str = ptr;
1077bc6c1f13SMatthew Dillon 		ispfs = 1;
1078bc6c1f13SMatthew Dillon 	} else {
1079bc6c1f13SMatthew Dillon 		localization = *localizationp;
1080bc6c1f13SMatthew Dillon 		ispfs = 0;
1081bc6c1f13SMatthew Dillon 	}
1082bc6c1f13SMatthew Dillon 
1083bc6c1f13SMatthew Dillon 	/*
1084bc6c1f13SMatthew Dillon 	 * Any trailing junk invalidates special extension handling.
1085bc6c1f13SMatthew Dillon 	 */
1086bc6c1f13SMatthew Dillon 	if (*str)
1087bc6c1f13SMatthew Dillon 		return(EINVAL);
1088bc6c1f13SMatthew Dillon 	*tidp = tid;
1089bc6c1f13SMatthew Dillon 	*localizationp = localization;
1090bc6c1f13SMatthew Dillon 	*ispfsp = ispfs;
1091bc6c1f13SMatthew Dillon 	return(0);
1092d113fda1SMatthew Dillon }
1093d113fda1SMatthew Dillon 
10944a2796f3SMatthew Dillon /*
10954a2796f3SMatthew Dillon  * Return the block size at the specified file offset.
10964a2796f3SMatthew Dillon  */
10974a2796f3SMatthew Dillon int
hammer_blocksize(int64_t file_offset)10984a2796f3SMatthew Dillon hammer_blocksize(int64_t file_offset)
10994a2796f3SMatthew Dillon {
11004a2796f3SMatthew Dillon 	if (file_offset < HAMMER_XDEMARC)
11014a2796f3SMatthew Dillon 		return(HAMMER_BUFSIZE);
11024a2796f3SMatthew Dillon 	else
11034a2796f3SMatthew Dillon 		return(HAMMER_XBUFSIZE);
11044a2796f3SMatthew Dillon }
11054a2796f3SMatthew Dillon 
11066362a262SMatthew Dillon int
hammer_blockoff(int64_t file_offset)11076362a262SMatthew Dillon hammer_blockoff(int64_t file_offset)
11086362a262SMatthew Dillon {
11096362a262SMatthew Dillon 	if (file_offset < HAMMER_XDEMARC)
11106362a262SMatthew Dillon 		return((int)file_offset & HAMMER_BUFMASK);
11116362a262SMatthew Dillon 	else
11126362a262SMatthew Dillon 		return((int)file_offset & HAMMER_XBUFMASK);
11136362a262SMatthew Dillon }
11146362a262SMatthew Dillon 
11154a2796f3SMatthew Dillon /*
11164a2796f3SMatthew Dillon  * Return the demarkation point between the two offsets where
11174a2796f3SMatthew Dillon  * the block size changes.
11184a2796f3SMatthew Dillon  */
11194a2796f3SMatthew Dillon int64_t
hammer_blockdemarc(int64_t file_offset1,int64_t file_offset2)11204a2796f3SMatthew Dillon hammer_blockdemarc(int64_t file_offset1, int64_t file_offset2)
11214a2796f3SMatthew Dillon {
11224a2796f3SMatthew Dillon 	if (file_offset1 < HAMMER_XDEMARC) {
11234a2796f3SMatthew Dillon 		if (file_offset2 <= HAMMER_XDEMARC)
11244a2796f3SMatthew Dillon 			return(file_offset2);
11254a2796f3SMatthew Dillon 		return(HAMMER_XDEMARC);
11264a2796f3SMatthew Dillon 	}
112735a5249bSTomohiro Kusumi 	hpanic("illegal range %jd %jd",
112835a5249bSTomohiro Kusumi 	      (intmax_t)file_offset1, (intmax_t)file_offset2);
11294a2796f3SMatthew Dillon }
11304a2796f3SMatthew Dillon 
1131*91ffdfc5SSascha Wildner dev_t
hammer_fsid_to_udev(hammer_uuid_t * uuid)113290da8fc8STomohiro Kusumi hammer_fsid_to_udev(hammer_uuid_t *uuid)
1133a56cb012SMatthew Dillon {
113446137e17STomohiro Kusumi 	uint32_t crc;
1135a56cb012SMatthew Dillon 
1136a56cb012SMatthew Dillon 	crc = crc32(uuid, sizeof(*uuid));
1137*91ffdfc5SSascha Wildner 	return((dev_t)crc);
1138a56cb012SMatthew Dillon }
1139a56cb012SMatthew Dillon 
1140