xref: /dflybsd-src/sys/kern/lwkt_token.c (revision 88abd8b5763f2e5d4b4db5c5dc1b5bb4c489698b)
1 /*
2  * Copyright (c) 2003,2004,2009 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 /*
36  * lwkt_token - Implement soft token locks.
37  *
38  * Tokens are locks which serialize a thread only while the thread is
39  * running.  If the thread blocks all tokens are released, then reacquired
40  * when the thread resumes.
41  *
42  * This implementation requires no critical sections or spin locks, but
43  * does use atomic_cmpset_ptr().
44  *
45  * Tokens may be recursively acquired by the same thread.  However the
46  * caller must be sure to release such tokens in reverse order.
47  */
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/kernel.h>
51 #include <sys/proc.h>
52 #include <sys/rtprio.h>
53 #include <sys/queue.h>
54 #include <sys/sysctl.h>
55 #include <sys/ktr.h>
56 #include <sys/kthread.h>
57 #include <machine/cpu.h>
58 #include <sys/lock.h>
59 #include <sys/caps.h>
60 #include <sys/spinlock.h>
61 
62 #include <sys/thread2.h>
63 #include <sys/spinlock2.h>
64 #include <sys/mplock2.h>
65 
66 #include <vm/vm.h>
67 #include <vm/vm_param.h>
68 #include <vm/vm_kern.h>
69 #include <vm/vm_object.h>
70 #include <vm/vm_page.h>
71 #include <vm/vm_map.h>
72 #include <vm/vm_pager.h>
73 #include <vm/vm_extern.h>
74 #include <vm/vm_zone.h>
75 
76 #include <machine/stdarg.h>
77 #include <machine/smp.h>
78 
79 #ifndef LWKT_NUM_POOL_TOKENS
80 #define LWKT_NUM_POOL_TOKENS	1024	/* power of 2 */
81 #endif
82 #define LWKT_MASK_POOL_TOKENS	(LWKT_NUM_POOL_TOKENS - 1)
83 
84 #ifdef INVARIANTS
85 static int token_debug = 0;
86 #endif
87 
88 static lwkt_token	pool_tokens[LWKT_NUM_POOL_TOKENS];
89 
90 #define TOKEN_STRING	"REF=%p TOK=%p TD=%p"
91 #define CONTENDED_STRING	"REF=%p TOK=%p TD=%p (contention started)"
92 #define UNCONTENDED_STRING	"REF=%p TOK=%p TD=%p (contention stopped)"
93 #if !defined(KTR_TOKENS)
94 #define	KTR_TOKENS	KTR_ALL
95 #endif
96 
97 KTR_INFO_MASTER(tokens);
98 KTR_INFO(KTR_TOKENS, tokens, fail, 0, TOKEN_STRING, sizeof(void *) * 3);
99 KTR_INFO(KTR_TOKENS, tokens, succ, 1, TOKEN_STRING, sizeof(void *) * 3);
100 #if 0
101 KTR_INFO(KTR_TOKENS, tokens, release, 2, TOKEN_STRING, sizeof(void *) * 3);
102 KTR_INFO(KTR_TOKENS, tokens, remote, 3, TOKEN_STRING, sizeof(void *) * 3);
103 KTR_INFO(KTR_TOKENS, tokens, reqremote, 4, TOKEN_STRING, sizeof(void *) * 3);
104 KTR_INFO(KTR_TOKENS, tokens, reqfail, 5, TOKEN_STRING, sizeof(void *) * 3);
105 KTR_INFO(KTR_TOKENS, tokens, drain, 6, TOKEN_STRING, sizeof(void *) * 3);
106 KTR_INFO(KTR_TOKENS, tokens, contention_start, 7, CONTENDED_STRING, sizeof(void *) * 3);
107 KTR_INFO(KTR_TOKENS, tokens, contention_stop, 7, UNCONTENDED_STRING, sizeof(void *) * 3);
108 #endif
109 
110 #define logtoken(name, ref)						\
111 	KTR_LOG(tokens_ ## name, ref, ref->tr_tok, curthread)
112 
113 #ifdef INVARIANTS
114 SYSCTL_INT(_lwkt, OID_AUTO, token_debug, CTLFLAG_RW, &token_debug, 0, "");
115 #endif
116 
117 /*
118  * Global tokens.  These replace the MP lock for major subsystem locking.
119  * These tokens are initially used to lockup both global and individual
120  * operations.
121  *
122  * Once individual structures get their own locks these tokens are used
123  * only to protect global lists & other variables and to interlock
124  * allocations and teardowns and such.
125  *
126  * The UP initializer causes token acquisition to also acquire the MP lock
127  * for maximum compatibility.  The feature may be enabled and disabled at
128  * any time, the MP state is copied to the tokref when the token is acquired
129  * and will not race against sysctl changes.
130  */
131 struct lwkt_token pmap_token = LWKT_TOKEN_UP_INITIALIZER(pmap_token);
132 struct lwkt_token dev_token = LWKT_TOKEN_UP_INITIALIZER(dev_token);
133 struct lwkt_token vm_token = LWKT_TOKEN_UP_INITIALIZER(vm_token);
134 struct lwkt_token vmspace_token = LWKT_TOKEN_UP_INITIALIZER(vmspace_token);
135 struct lwkt_token kvm_token = LWKT_TOKEN_UP_INITIALIZER(kvm_token);
136 struct lwkt_token proc_token = LWKT_TOKEN_UP_INITIALIZER(proc_token);
137 struct lwkt_token tty_token = LWKT_TOKEN_UP_INITIALIZER(tty_token);
138 struct lwkt_token vnode_token = LWKT_TOKEN_UP_INITIALIZER(vnode_token);
139 struct lwkt_token vmobj_token = LWKT_TOKEN_UP_INITIALIZER(vmobj_token);
140 
141 SYSCTL_INT(_lwkt, OID_AUTO, pmap_mpsafe,
142 	   CTLFLAG_RW, &pmap_token.t_flags, 0, "");
143 SYSCTL_INT(_lwkt, OID_AUTO, dev_mpsafe,
144 	   CTLFLAG_RW, &dev_token.t_flags, 0, "");
145 SYSCTL_INT(_lwkt, OID_AUTO, vm_mpsafe,
146 	   CTLFLAG_RW, &vm_token.t_flags, 0, "");
147 SYSCTL_INT(_lwkt, OID_AUTO, vmspace_mpsafe,
148 	   CTLFLAG_RW, &vmspace_token.t_flags, 0, "");
149 SYSCTL_INT(_lwkt, OID_AUTO, kvm_mpsafe,
150 	   CTLFLAG_RW, &kvm_token.t_flags, 0, "");
151 SYSCTL_INT(_lwkt, OID_AUTO, proc_mpsafe,
152 	   CTLFLAG_RW, &proc_token.t_flags, 0, "");
153 SYSCTL_INT(_lwkt, OID_AUTO, tty_mpsafe,
154 	   CTLFLAG_RW, &tty_token.t_flags, 0, "");
155 SYSCTL_INT(_lwkt, OID_AUTO, vnode_mpsafe,
156 	   CTLFLAG_RW, &vnode_token.t_flags, 0, "");
157 SYSCTL_INT(_lwkt, OID_AUTO, vmobj_mpsafe,
158 	   CTLFLAG_RW, &vmobj_token.t_flags, 0, "");
159 
160 /*
161  * The collision count is bumped every time the LWKT scheduler fails
162  * to acquire needed tokens in addition to a normal lwkt_gettoken()
163  * stall.
164  */
165 SYSCTL_LONG(_lwkt, OID_AUTO, pmap_collisions,
166 	    CTLFLAG_RW, &pmap_token.t_collisions, 0, "");
167 SYSCTL_LONG(_lwkt, OID_AUTO, dev_collisions,
168 	    CTLFLAG_RW, &dev_token.t_collisions, 0, "");
169 SYSCTL_LONG(_lwkt, OID_AUTO, vm_collisions,
170 	    CTLFLAG_RW, &vm_token.t_collisions, 0, "");
171 SYSCTL_LONG(_lwkt, OID_AUTO, vmspace_collisions,
172 	    CTLFLAG_RW, &vmspace_token.t_collisions, 0, "");
173 SYSCTL_LONG(_lwkt, OID_AUTO, kvm_collisions,
174 	    CTLFLAG_RW, &kvm_token.t_collisions, 0, "");
175 SYSCTL_LONG(_lwkt, OID_AUTO, proc_collisions,
176 	    CTLFLAG_RW, &proc_token.t_collisions, 0, "");
177 SYSCTL_LONG(_lwkt, OID_AUTO, tty_collisions,
178 	    CTLFLAG_RW, &tty_token.t_collisions, 0, "");
179 SYSCTL_LONG(_lwkt, OID_AUTO, vnode_collisions,
180 	    CTLFLAG_RW, &vnode_token.t_collisions, 0, "");
181 
182 /*
183  * Return a pool token given an address
184  */
185 static __inline
186 lwkt_token_t
187 _lwkt_token_pool_lookup(void *ptr)
188 {
189 	int i;
190 
191 	i = ((int)(intptr_t)ptr >> 2) ^ ((int)(intptr_t)ptr >> 12);
192 	return(&pool_tokens[i & LWKT_MASK_POOL_TOKENS]);
193 }
194 
195 /*
196  * Initialize a tokref_t prior to making it visible in the thread's
197  * token array.
198  *
199  * As an optimization we set the MPSAFE flag if the thread is already
200  * holding the MP lock.  This bypasses unncessary calls to get_mplock() and
201  * rel_mplock() on tokens which are not normally MPSAFE when the thread
202  * is already holding the MP lock.
203  *
204  * WARNING: The inherited td_xpcount does not count here because a switch
205  *	    could schedule the preempted thread and blow away the inherited
206  *	    mplock.
207  */
208 static __inline
209 void
210 _lwkt_tokref_init(lwkt_tokref_t ref, lwkt_token_t tok, thread_t td)
211 {
212 	ref->tr_tok = tok;
213 	ref->tr_owner = td;
214 	ref->tr_flags = tok->t_flags;
215 #ifdef SMP
216 	if (td->td_mpcount)
217 #endif
218 		ref->tr_flags |= LWKT_TOKEN_MPSAFE;
219 }
220 
221 /*
222  * Obtain all the tokens required by the specified thread on the current
223  * cpu, return 0 on failure and non-zero on success.  If a failure occurs
224  * any partially acquired tokens will be released prior to return.
225  *
226  * lwkt_getalltokens is called by the LWKT scheduler to acquire all
227  * tokens that the thread had acquired prior to going to sleep.
228  *
229  * The scheduler is responsible for maintaining the MP lock count, so
230  * we don't need to deal with tr_flags here.  We also do not do any
231  * logging here.  The logging done by lwkt_gettoken() is plenty good
232  * enough to get a feel for it.
233  *
234  * Called from a critical section.
235  */
236 int
237 lwkt_getalltokens(thread_t td, const char **msgp, const void **addrp)
238 {
239 	lwkt_tokref_t scan;
240 	lwkt_tokref_t ref;
241 	lwkt_token_t tok;
242 
243 	/*
244 	 * Acquire tokens in forward order, assign or validate tok->t_ref.
245 	 */
246 	for (scan = &td->td_toks_base; scan < td->td_toks_stop; ++scan) {
247 		tok = scan->tr_tok;
248 		for (;;) {
249 			/*
250 			 * Try to acquire the token if we do not already have
251 			 * it.
252 			 *
253 			 * NOTE: If atomic_cmpset_ptr() fails we have to
254 			 *	 loop and try again.  It just means we
255 			 *	 lost a cpu race.
256 			 */
257 			ref = tok->t_ref;
258 			if (ref == NULL) {
259 				if (atomic_cmpset_ptr(&tok->t_ref, NULL, scan))
260 					break;
261 				continue;
262 			}
263 
264 			/*
265 			 * Test if ref is already recursively held by this
266 			 * thread.  We cannot safely dereference tok->t_ref
267 			 * (it might belong to another thread and is thus
268 			 * unstable), but we don't have to. We can simply
269 			 * range-check it.
270 			 */
271 			if (ref >= &td->td_toks_base && ref < td->td_toks_stop)
272 				break;
273 
274 			/*
275 			 * Otherwise we failed to acquire all the tokens.
276 			 * Undo and return.
277 			 */
278 			*msgp = tok->t_desc;
279 			*addrp = scan->tr_stallpc;
280 			atomic_add_long(&tok->t_collisions, 1);
281 			lwkt_relalltokens(td);
282 			return(FALSE);
283 		}
284 	}
285 	return (TRUE);
286 }
287 
288 /*
289  * Release all tokens owned by the specified thread on the current cpu.
290  *
291  * This code is really simple.  Even in cases where we own all the tokens
292  * note that t_ref may not match the scan for recursively held tokens,
293  * or for the case where a lwkt_getalltokens() failed.
294  *
295  * The scheduler is responsible for maintaining the MP lock count, so
296  * we don't need to deal with tr_flags here.
297  *
298  * Called from a critical section.
299  */
300 void
301 lwkt_relalltokens(thread_t td)
302 {
303 	lwkt_tokref_t scan;
304 	lwkt_token_t tok;
305 
306 	for (scan = &td->td_toks_base; scan < td->td_toks_stop; ++scan) {
307 		tok = scan->tr_tok;
308 		if (tok->t_ref == scan)
309 			tok->t_ref = NULL;
310 	}
311 }
312 
313 /*
314  * Token acquisition helper function.  The caller must have already
315  * made nref visible by adjusting td_toks_stop and will be responsible
316  * for the disposition of nref on either success or failure.
317  *
318  * When acquiring tokens recursively we want tok->t_ref to point to
319  * the outer (first) acquisition so it gets cleared only on the last
320  * release.
321  */
322 static __inline
323 int
324 _lwkt_trytokref2(lwkt_tokref_t nref, thread_t td, int blocking)
325 {
326 	lwkt_token_t tok;
327 	lwkt_tokref_t ref;
328 
329 	/*
330 	 * Make sure the compiler does not reorder prior instructions
331 	 * beyond this demark.
332 	 */
333 	cpu_ccfence();
334 
335 	/*
336 	 * Attempt to gain ownership
337 	 */
338 	tok = nref->tr_tok;
339 	for (;;) {
340 		/*
341 		 * Try to acquire the token if we do not already have
342 		 * it.  This is not allowed if we are in a hard code
343 		 * section (because it 'might' have blocked).
344 		 */
345 		ref = tok->t_ref;
346 		if (ref == NULL) {
347 			KASSERT((blocking == 0 ||
348 				td->td_gd->gd_intr_nesting_level == 0 ||
349 				panic_cpu_gd == mycpu),
350 				("Attempt to acquire token %p not already "
351 				 "held in hard code section", tok));
352 
353 			/*
354 			 * NOTE: If atomic_cmpset_ptr() fails we have to
355 			 *	 loop and try again.  It just means we
356 			 *	 lost a cpu race.
357 			 */
358 			if (atomic_cmpset_ptr(&tok->t_ref, NULL, nref))
359 				return (TRUE);
360 			continue;
361 		}
362 
363 		/*
364 		 * Test if ref is already recursively held by this
365 		 * thread.  We cannot safely dereference tok->t_ref
366 		 * (it might belong to another thread and is thus
367 		 * unstable), but we don't have to. We can simply
368 		 * range-check it.
369 		 *
370 		 * It is ok to acquire a token that is already held
371 		 * by the current thread when in a hard code section.
372 		 */
373 		if (ref >= &td->td_toks_base && ref < td->td_toks_stop)
374 			return(TRUE);
375 
376 		/*
377 		 * Otherwise we failed, and it is not ok to attempt to
378 		 * acquire a token in a hard code section.
379 		 */
380 		KASSERT((blocking == 0 ||
381 			td->td_gd->gd_intr_nesting_level == 0),
382 			("Attempt to acquire token %p not already "
383 			 "held in hard code section", tok));
384 
385 		return(FALSE);
386 	}
387 }
388 
389 /*
390  * Acquire a serializing token.  This routine does not block.
391  */
392 static __inline
393 int
394 _lwkt_trytokref(lwkt_tokref_t ref, thread_t td)
395 {
396 	if ((ref->tr_flags & LWKT_TOKEN_MPSAFE) == 0) {
397 		if (try_mplock() == 0) {
398 			--td->td_toks_stop;
399 			return (FALSE);
400 		}
401 	}
402 	if (_lwkt_trytokref2(ref, td, 0) == FALSE) {
403 		/*
404 		 * Cleanup, deactivate the failed token.
405 		 */
406 		if ((ref->tr_flags & LWKT_TOKEN_MPSAFE) == 0)
407 			rel_mplock();
408 		--td->td_toks_stop;
409 		return (FALSE);
410 	}
411 	return (TRUE);
412 }
413 
414 /*
415  * Acquire a serializing token.  This routine can block.
416  */
417 static __inline
418 void
419 _lwkt_gettokref(lwkt_tokref_t ref, thread_t td, const void **stkframe)
420 {
421 	if ((ref->tr_flags & LWKT_TOKEN_MPSAFE) == 0)
422 		get_mplock();
423 	if (_lwkt_trytokref2(ref, td, 1) == FALSE) {
424 		/*
425 		 * Give up running if we can't acquire the token right now.
426 		 *
427 		 * Since the tokref is already active the scheduler now
428 		 * takes care of acquisition, so we need only call
429 		 * lwkt_switch().
430 		 *
431 		 * Since we failed this was not a recursive token so upon
432 		 * return tr_tok->t_ref should be assigned to this specific
433 		 * ref.
434 		 */
435 		ref->tr_stallpc = stkframe[-1];
436 		atomic_add_long(&ref->tr_tok->t_collisions, 1);
437 		logtoken(fail, ref);
438 		lwkt_switch();
439 		logtoken(succ, ref);
440 		KKASSERT(ref->tr_tok->t_ref == ref);
441 	}
442 }
443 
444 void
445 lwkt_gettoken(lwkt_token_t tok)
446 {
447 	thread_t td = curthread;
448 	lwkt_tokref_t ref;
449 
450 	ref = td->td_toks_stop;
451 	KKASSERT(ref < &td->td_toks_end);
452 	++td->td_toks_stop;
453 	cpu_ccfence();
454 	_lwkt_tokref_init(ref, tok, td);
455 	_lwkt_gettokref(ref, td, (const void **)&tok);
456 }
457 
458 void
459 lwkt_gettoken_hard(lwkt_token_t tok)
460 {
461 	thread_t td = curthread;
462 	lwkt_tokref_t ref;
463 
464 	ref = td->td_toks_stop;
465 	KKASSERT(ref < &td->td_toks_end);
466 	++td->td_toks_stop;
467 	cpu_ccfence();
468 	_lwkt_tokref_init(ref, tok, td);
469 	_lwkt_gettokref(ref, td, (const void **)&tok);
470 	crit_enter_hard_gd(td->td_gd);
471 }
472 
473 lwkt_token_t
474 lwkt_getpooltoken(void *ptr)
475 {
476 	thread_t td = curthread;
477 	lwkt_token_t tok;
478 	lwkt_tokref_t ref;
479 
480 	ref = td->td_toks_stop;
481 	KKASSERT(ref < &td->td_toks_end);
482 	++td->td_toks_stop;
483 	cpu_ccfence();
484 	tok = _lwkt_token_pool_lookup(ptr);
485 	_lwkt_tokref_init(ref, tok, td);
486 	_lwkt_gettokref(ref, td, (const void **)&ptr);
487 	return(tok);
488 }
489 
490 /*
491  * Attempt to acquire a token, return TRUE on success, FALSE on failure.
492  */
493 int
494 lwkt_trytoken(lwkt_token_t tok)
495 {
496 	thread_t td = curthread;
497 	lwkt_tokref_t ref;
498 
499 	ref = td->td_toks_stop;
500 	KKASSERT(ref < &td->td_toks_end);
501 	++td->td_toks_stop;
502 	cpu_ccfence();
503 	_lwkt_tokref_init(ref, tok, td);
504 	return(_lwkt_trytokref(ref, td));
505 }
506 
507 /*
508  * Release a serializing token.
509  *
510  * WARNING!  All tokens must be released in reverse order.  This will be
511  *	     asserted.
512  */
513 void
514 lwkt_reltoken(lwkt_token_t tok)
515 {
516 	thread_t td = curthread;
517 	lwkt_tokref_t ref;
518 
519 	/*
520 	 * Remove ref from thread token list and assert that it matches
521 	 * the token passed in.  Tokens must be released in reverse order.
522 	 */
523 	ref = td->td_toks_stop - 1;
524 	KKASSERT(ref >= &td->td_toks_base && ref->tr_tok == tok);
525 
526 	/*
527 	 * Only clear the token if it matches ref.  If ref was a recursively
528 	 * acquired token it may not match.
529 	 *
530 	 * If the token was not MPSAFE release the MP lock.
531 	 *
532 	 * NOTE: We have to do this before adjust td_toks_stop, otherwise
533 	 *	 a fast interrupt can come along and reuse our ref while
534 	 *	 tok is still attached to it.
535 	 */
536 	if (tok->t_ref == ref)
537 		tok->t_ref = NULL;
538 	cpu_ccfence();
539 	if ((ref->tr_flags & LWKT_TOKEN_MPSAFE) == 0)
540 		rel_mplock();
541 
542 	/*
543 	 * Finally adjust td_toks_stop, be very sure that the compiler
544 	 * does not reorder the clearing of tok->t_ref with the
545 	 * decrementing of td->td_toks_stop.
546 	 */
547 	cpu_ccfence();
548 	td->td_toks_stop = ref;
549 	KKASSERT(tok->t_ref != ref);
550 }
551 
552 void
553 lwkt_reltoken_hard(lwkt_token_t tok)
554 {
555 	lwkt_reltoken(tok);
556 	crit_exit_hard();
557 }
558 
559 /*
560  * It is faster for users of lwkt_getpooltoken() to use the returned
561  * token and just call lwkt_reltoken(), but for convenience we provide
562  * this function which looks the token up based on the ident.
563  */
564 void
565 lwkt_relpooltoken(void *ptr)
566 {
567 	lwkt_token_t tok = _lwkt_token_pool_lookup(ptr);
568 	lwkt_reltoken(tok);
569 }
570 
571 
572 /*
573  * Pool tokens are used to provide a type-stable serializing token
574  * pointer that does not race against disappearing data structures.
575  *
576  * This routine is called in early boot just after we setup the BSP's
577  * globaldata structure.
578  */
579 void
580 lwkt_token_pool_init(void)
581 {
582 	int i;
583 
584 	for (i = 0; i < LWKT_NUM_POOL_TOKENS; ++i)
585 		lwkt_token_init(&pool_tokens[i], 1, "pool");
586 }
587 
588 lwkt_token_t
589 lwkt_token_pool_lookup(void *ptr)
590 {
591 	return (_lwkt_token_pool_lookup(ptr));
592 }
593 
594 /*
595  * Initialize a token.  If mpsafe is 0, the MP lock is acquired before
596  * acquiring the token and released after releasing the token.
597  */
598 void
599 lwkt_token_init(lwkt_token_t tok, int mpsafe, const char *desc)
600 {
601 	tok->t_ref = NULL;
602 	tok->t_flags = mpsafe ? LWKT_TOKEN_MPSAFE : 0;
603 	tok->t_collisions = 0;
604 	tok->t_desc = desc;
605 }
606 
607 void
608 lwkt_token_uninit(lwkt_token_t tok)
609 {
610 	/* empty */
611 }
612 
613 #if 0
614 int
615 lwkt_token_is_stale(lwkt_tokref_t ref)
616 {
617 	lwkt_token_t tok = ref->tr_tok;
618 
619 	KKASSERT(tok->t_owner == curthread && ref->tr_state == 1 &&
620 		 tok->t_count > 0);
621 
622 	/* Token is not stale */
623 	if (tok->t_lastowner == tok->t_owner)
624 		return (FALSE);
625 
626 	/*
627 	 * The token is stale. Reset to not stale so that the next call to
628 	 * lwkt_token_is_stale will return "not stale" unless the token
629 	 * was acquired in-between by another thread.
630 	 */
631 	tok->t_lastowner = tok->t_owner;
632 	return (TRUE);
633 }
634 #endif
635