xref: /dflybsd-src/sys/kern/lwkt_token.c (revision b5d16701e255c342d21e69a6c80b8711c028dc65)
1 /*
2  * Copyright (c) 2003,2004,2009 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 /*
36  * lwkt_token - Implement soft token locks.
37  *
38  * Tokens are locks which serialize a thread only while the thread is
39  * running.  If the thread blocks all tokens are released, then reacquired
40  * when the thread resumes.
41  *
42  * This implementation requires no critical sections or spin locks, but
43  * does use atomic_cmpset_ptr().
44  *
45  * Tokens may be recursively acquired by the same thread.  However the
46  * caller must be sure to release such tokens in reverse order.
47  */
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/kernel.h>
51 #include <sys/proc.h>
52 #include <sys/rtprio.h>
53 #include <sys/queue.h>
54 #include <sys/sysctl.h>
55 #include <sys/ktr.h>
56 #include <sys/kthread.h>
57 #include <machine/cpu.h>
58 #include <sys/lock.h>
59 #include <sys/caps.h>
60 #include <sys/spinlock.h>
61 
62 #include <sys/thread2.h>
63 #include <sys/spinlock2.h>
64 #include <sys/mplock2.h>
65 
66 #include <vm/vm.h>
67 #include <vm/vm_param.h>
68 #include <vm/vm_kern.h>
69 #include <vm/vm_object.h>
70 #include <vm/vm_page.h>
71 #include <vm/vm_map.h>
72 #include <vm/vm_pager.h>
73 #include <vm/vm_extern.h>
74 #include <vm/vm_zone.h>
75 
76 #include <machine/stdarg.h>
77 #include <machine/smp.h>
78 
79 #ifndef LWKT_NUM_POOL_TOKENS
80 #define LWKT_NUM_POOL_TOKENS	1024	/* power of 2 */
81 #endif
82 #define LWKT_MASK_POOL_TOKENS	(LWKT_NUM_POOL_TOKENS - 1)
83 
84 static lwkt_token	pool_tokens[LWKT_NUM_POOL_TOKENS];
85 
86 #define TOKEN_STRING	"REF=%p TOK=%p TD=%p"
87 #define CONTENDED_STRING	"REF=%p TOK=%p TD=%p (contention started)"
88 #define UNCONTENDED_STRING	"REF=%p TOK=%p TD=%p (contention stopped)"
89 #if !defined(KTR_TOKENS)
90 #define	KTR_TOKENS	KTR_ALL
91 #endif
92 
93 KTR_INFO_MASTER(tokens);
94 KTR_INFO(KTR_TOKENS, tokens, fail, 0, TOKEN_STRING, sizeof(void *) * 3);
95 KTR_INFO(KTR_TOKENS, tokens, succ, 1, TOKEN_STRING, sizeof(void *) * 3);
96 #if 0
97 KTR_INFO(KTR_TOKENS, tokens, release, 2, TOKEN_STRING, sizeof(void *) * 3);
98 KTR_INFO(KTR_TOKENS, tokens, remote, 3, TOKEN_STRING, sizeof(void *) * 3);
99 KTR_INFO(KTR_TOKENS, tokens, reqremote, 4, TOKEN_STRING, sizeof(void *) * 3);
100 KTR_INFO(KTR_TOKENS, tokens, reqfail, 5, TOKEN_STRING, sizeof(void *) * 3);
101 KTR_INFO(KTR_TOKENS, tokens, drain, 6, TOKEN_STRING, sizeof(void *) * 3);
102 KTR_INFO(KTR_TOKENS, tokens, contention_start, 7, CONTENDED_STRING, sizeof(void *) * 3);
103 KTR_INFO(KTR_TOKENS, tokens, contention_stop, 7, UNCONTENDED_STRING, sizeof(void *) * 3);
104 #endif
105 
106 #define logtoken(name, ref)						\
107 	KTR_LOG(tokens_ ## name, ref, ref->tr_tok, curthread)
108 
109 /*
110  * Cpu contention mask for directed wakeups.
111  */
112 cpumask_t cpu_contention_mask;
113 
114 /*
115  * Global tokens.  These replace the MP lock for major subsystem locking.
116  * These tokens are initially used to lockup both global and individual
117  * operations.
118  *
119  * Once individual structures get their own locks these tokens are used
120  * only to protect global lists & other variables and to interlock
121  * allocations and teardowns and such.
122  *
123  * The UP initializer causes token acquisition to also acquire the MP lock
124  * for maximum compatibility.  The feature may be enabled and disabled at
125  * any time, the MP state is copied to the tokref when the token is acquired
126  * and will not race against sysctl changes.
127  */
128 struct lwkt_token mp_token = LWKT_TOKEN_MP_INITIALIZER(mp_token);
129 struct lwkt_token pmap_token = LWKT_TOKEN_UP_INITIALIZER(pmap_token);
130 struct lwkt_token dev_token = LWKT_TOKEN_UP_INITIALIZER(dev_token);
131 struct lwkt_token vm_token = LWKT_TOKEN_UP_INITIALIZER(vm_token);
132 struct lwkt_token vmspace_token = LWKT_TOKEN_UP_INITIALIZER(vmspace_token);
133 struct lwkt_token kvm_token = LWKT_TOKEN_UP_INITIALIZER(kvm_token);
134 struct lwkt_token proc_token = LWKT_TOKEN_UP_INITIALIZER(proc_token);
135 struct lwkt_token tty_token = LWKT_TOKEN_UP_INITIALIZER(tty_token);
136 struct lwkt_token vnode_token = LWKT_TOKEN_UP_INITIALIZER(vnode_token);
137 struct lwkt_token vmobj_token = LWKT_TOKEN_UP_INITIALIZER(vmobj_token);
138 
139 SYSCTL_INT(_lwkt, OID_AUTO, pmap_mpsafe, CTLFLAG_RW,
140     &pmap_token.t_flags, 0, "Require MP lock for pmap_token");
141 SYSCTL_INT(_lwkt, OID_AUTO, dev_mpsafe, CTLFLAG_RW,
142     &dev_token.t_flags, 0, "Require MP lock for dev_token");
143 SYSCTL_INT(_lwkt, OID_AUTO, vm_mpsafe, CTLFLAG_RW,
144     &vm_token.t_flags, 0, "Require MP lock for vm_token");
145 SYSCTL_INT(_lwkt, OID_AUTO, vmspace_mpsafe, CTLFLAG_RW,
146     &vmspace_token.t_flags, 0, "Require MP lock for vmspace_token");
147 SYSCTL_INT(_lwkt, OID_AUTO, kvm_mpsafe, CTLFLAG_RW,
148     &kvm_token.t_flags, 0, "Require MP lock for kvm_token");
149 SYSCTL_INT(_lwkt, OID_AUTO, proc_mpsafe, CTLFLAG_RW,
150     &proc_token.t_flags, 0, "Require MP lock for proc_token");
151 SYSCTL_INT(_lwkt, OID_AUTO, tty_mpsafe, CTLFLAG_RW,
152     &tty_token.t_flags, 0, "Require MP lock for tty_token");
153 SYSCTL_INT(_lwkt, OID_AUTO, vnode_mpsafe, CTLFLAG_RW,
154     &vnode_token.t_flags, 0, "Require MP lock for vnode_token");
155 SYSCTL_INT(_lwkt, OID_AUTO, vmobj_mpsafe, CTLFLAG_RW,
156     &vmobj_token.t_flags, 0, "Require MP lock for vmobj_token");
157 
158 /*
159  * The collision count is bumped every time the LWKT scheduler fails
160  * to acquire needed tokens in addition to a normal lwkt_gettoken()
161  * stall.
162  */
163 SYSCTL_LONG(_lwkt, OID_AUTO, mp_collisions, CTLFLAG_RW,
164     &mp_token.t_collisions, 0, "Collision counter of mp_token");
165 SYSCTL_LONG(_lwkt, OID_AUTO, pmap_collisions, CTLFLAG_RW,
166     &pmap_token.t_collisions, 0, "Collision counter of pmap_token");
167 SYSCTL_LONG(_lwkt, OID_AUTO, dev_collisions, CTLFLAG_RW,
168     &dev_token.t_collisions, 0, "Collision counter of dev_token");
169 SYSCTL_LONG(_lwkt, OID_AUTO, vm_collisions, CTLFLAG_RW,
170     &vm_token.t_collisions, 0, "Collision counter of vm_token");
171 SYSCTL_LONG(_lwkt, OID_AUTO, vmspace_collisions, CTLFLAG_RW,
172     &vmspace_token.t_collisions, 0, "Collision counter of vmspace_token");
173 SYSCTL_LONG(_lwkt, OID_AUTO, kvm_collisions, CTLFLAG_RW,
174     &kvm_token.t_collisions, 0, "Collision counter of kvm_token");
175 SYSCTL_LONG(_lwkt, OID_AUTO, proc_collisions, CTLFLAG_RW,
176     &proc_token.t_collisions, 0, "Collision counter of proc_token");
177 SYSCTL_LONG(_lwkt, OID_AUTO, tty_collisions, CTLFLAG_RW,
178     &tty_token.t_collisions, 0, "Collision counter of tty_token");
179 SYSCTL_LONG(_lwkt, OID_AUTO, vnode_collisions, CTLFLAG_RW,
180     &vnode_token.t_collisions, 0, "Collision counter of vnode_token");
181 
182 #ifdef SMP
183 /*
184  * Acquire the initial mplock
185  *
186  * (low level boot only)
187  */
188 void
189 cpu_get_initial_mplock(void)
190 {
191 	KKASSERT(mp_token.t_ref == NULL);
192 	if (lwkt_trytoken(&mp_token) == FALSE)
193 		panic("cpu_get_initial_mplock");
194 }
195 #endif
196 
197 /*
198  * Return a pool token given an address
199  */
200 static __inline
201 lwkt_token_t
202 _lwkt_token_pool_lookup(void *ptr)
203 {
204 	int i;
205 
206 	i = ((int)(intptr_t)ptr >> 2) ^ ((int)(intptr_t)ptr >> 12);
207 	return(&pool_tokens[i & LWKT_MASK_POOL_TOKENS]);
208 }
209 
210 /*
211  * Initialize a tokref_t prior to making it visible in the thread's
212  * token array.
213  *
214  * As an optimization we set the MPSAFE flag if the thread is already
215  * holding the mp_token.  This bypasses unncessary calls to get_mplock() and
216  * rel_mplock() on tokens which are not normally MPSAFE when the thread
217  * is already holding the MP lock.
218  */
219 static __inline
220 intptr_t
221 _lwkt_tok_flags(lwkt_token_t tok, thread_t td)
222 {
223 	intptr_t flags;
224 
225 	/*
226 	 * tok->t_flags can change out from under us, make sure we have
227 	 * a local copy.
228 	 */
229 	flags = tok->t_flags;
230 	cpu_ccfence();
231 #ifdef SMP
232 	if ((flags & LWKT_TOKEN_MPSAFE) == 0 &&
233 	    _lwkt_token_held(&mp_token, td)) {
234 		return (flags | LWKT_TOKEN_MPSAFE);
235 	} else {
236 		return (flags);
237 	}
238 #else
239 	return (flags | LWKT_TOKEN_MPSAFE);
240 #endif
241 }
242 
243 static __inline
244 void
245 _lwkt_tokref_init(lwkt_tokref_t ref, lwkt_token_t tok, thread_t td,
246 		  intptr_t flags)
247 {
248 	ref->tr_tok = tok;
249 	ref->tr_owner = td;
250 	ref->tr_flags = flags;
251 }
252 
253 /*
254  * Obtain all the tokens required by the specified thread on the current
255  * cpu, return 0 on failure and non-zero on success.  If a failure occurs
256  * any partially acquired tokens will be released prior to return.
257  *
258  * lwkt_getalltokens is called by the LWKT scheduler to acquire all
259  * tokens that the thread had acquired prior to going to sleep.
260  *
261  * The scheduler is responsible for maintaining the MP lock count, so
262  * we don't need to deal with tr_flags here.  We also do not do any
263  * logging here.  The logging done by lwkt_gettoken() is plenty good
264  * enough to get a feel for it.
265  *
266  * Called from a critical section.
267  */
268 int
269 lwkt_getalltokens(thread_t td)
270 {
271 	lwkt_tokref_t scan;
272 	lwkt_tokref_t ref;
273 	lwkt_token_t tok;
274 
275 	/*
276 	 * Acquire tokens in forward order, assign or validate tok->t_ref.
277 	 */
278 	for (scan = &td->td_toks_base; scan < td->td_toks_stop; ++scan) {
279 		tok = scan->tr_tok;
280 		for (;;) {
281 			/*
282 			 * Try to acquire the token if we do not already have
283 			 * it.
284 			 *
285 			 * NOTE: If atomic_cmpset_ptr() fails we have to
286 			 *	 loop and try again.  It just means we
287 			 *	 lost a cpu race.
288 			 */
289 			ref = tok->t_ref;
290 			if (ref == NULL) {
291 				if (atomic_cmpset_ptr(&tok->t_ref, NULL, scan))
292 					break;
293 				continue;
294 			}
295 
296 			/*
297 			 * Test if ref is already recursively held by this
298 			 * thread.  We cannot safely dereference tok->t_ref
299 			 * (it might belong to another thread and is thus
300 			 * unstable), but we don't have to. We can simply
301 			 * range-check it.
302 			 */
303 			if (ref >= &td->td_toks_base && ref < td->td_toks_stop)
304 				break;
305 
306 			/*
307 			 * Otherwise we failed to acquire all the tokens.
308 			 * Undo and return.
309 			 */
310 			td->td_wmesg = tok->t_desc;
311 			atomic_add_long(&tok->t_collisions, 1);
312 			lwkt_relalltokens(td);
313 			return(FALSE);
314 		}
315 	}
316 	return (TRUE);
317 }
318 
319 /*
320  * Release all tokens owned by the specified thread on the current cpu.
321  *
322  * This code is really simple.  Even in cases where we own all the tokens
323  * note that t_ref may not match the scan for recursively held tokens,
324  * or for the case where a lwkt_getalltokens() failed.
325  *
326  * The scheduler is responsible for maintaining the MP lock count, so
327  * we don't need to deal with tr_flags here.
328  *
329  * Called from a critical section.
330  */
331 void
332 lwkt_relalltokens(thread_t td)
333 {
334 	lwkt_tokref_t scan;
335 	lwkt_token_t tok;
336 
337 	for (scan = &td->td_toks_base; scan < td->td_toks_stop; ++scan) {
338 		tok = scan->tr_tok;
339 		if (tok->t_ref == scan)
340 			tok->t_ref = NULL;
341 	}
342 }
343 
344 /*
345  * Token acquisition helper function.  The caller must have already
346  * made nref visible by adjusting td_toks_stop and will be responsible
347  * for the disposition of nref on either success or failure.
348  *
349  * When acquiring tokens recursively we want tok->t_ref to point to
350  * the outer (first) acquisition so it gets cleared only on the last
351  * release.
352  */
353 static __inline
354 int
355 _lwkt_trytokref2(lwkt_tokref_t nref, thread_t td, int blocking)
356 {
357 	lwkt_token_t tok;
358 	lwkt_tokref_t ref;
359 
360 	/*
361 	 * Make sure the compiler does not reorder prior instructions
362 	 * beyond this demark.
363 	 */
364 	cpu_ccfence();
365 
366 	/*
367 	 * Attempt to gain ownership
368 	 */
369 	tok = nref->tr_tok;
370 	for (;;) {
371 		/*
372 		 * Try to acquire the token if we do not already have
373 		 * it.  This is not allowed if we are in a hard code
374 		 * section (because it 'might' have blocked).
375 		 */
376 		ref = tok->t_ref;
377 		if (ref == NULL) {
378 			KASSERT((blocking == 0 ||
379 				td->td_gd->gd_intr_nesting_level == 0 ||
380 				panic_cpu_gd == mycpu),
381 				("Attempt to acquire token %p not already "
382 				 "held in hard code section", tok));
383 
384 			/*
385 			 * NOTE: If atomic_cmpset_ptr() fails we have to
386 			 *	 loop and try again.  It just means we
387 			 *	 lost a cpu race.
388 			 */
389 			if (atomic_cmpset_ptr(&tok->t_ref, NULL, nref))
390 				return (TRUE);
391 			continue;
392 		}
393 
394 		/*
395 		 * Test if ref is already recursively held by this
396 		 * thread.  We cannot safely dereference tok->t_ref
397 		 * (it might belong to another thread and is thus
398 		 * unstable), but we don't have to. We can simply
399 		 * range-check it.
400 		 *
401 		 * It is ok to acquire a token that is already held
402 		 * by the current thread when in a hard code section.
403 		 */
404 		if (ref >= &td->td_toks_base && ref < td->td_toks_stop)
405 			return(TRUE);
406 
407 		/*
408 		 * Otherwise we failed, and it is not ok to attempt to
409 		 * acquire a token in a hard code section.
410 		 */
411 		KASSERT((blocking == 0 ||
412 			td->td_gd->gd_intr_nesting_level == 0),
413 			("Attempt to acquire token %p not already "
414 			 "held in hard code section", tok));
415 
416 		return(FALSE);
417 	}
418 }
419 
420 /*
421  * Get a serializing token.  This routine can block.
422  */
423 void
424 lwkt_gettoken(lwkt_token_t tok)
425 {
426 	thread_t td = curthread;
427 	lwkt_tokref_t ref;
428 	intptr_t flags;
429 
430 	flags = _lwkt_tok_flags(tok, td);
431 	if ((flags & LWKT_TOKEN_MPSAFE) == 0)
432 		get_mplock();
433 
434 	ref = td->td_toks_stop;
435 	KKASSERT(ref < &td->td_toks_end);
436 	++td->td_toks_stop;
437 	cpu_ccfence();
438 	_lwkt_tokref_init(ref, tok, td, flags);
439 
440 	if (_lwkt_trytokref2(ref, td, 1) == FALSE) {
441 		/*
442 		 * Give up running if we can't acquire the token right now.
443 		 *
444 		 * Since the tokref is already active the scheduler now
445 		 * takes care of acquisition, so we need only call
446 		 * lwkt_switch().
447 		 *
448 		 * Since we failed this was not a recursive token so upon
449 		 * return tr_tok->t_ref should be assigned to this specific
450 		 * ref.
451 		 */
452 		atomic_add_long(&ref->tr_tok->t_collisions, 1);
453 		logtoken(fail, ref);
454 		lwkt_switch();
455 		logtoken(succ, ref);
456 		KKASSERT(ref->tr_tok->t_ref == ref);
457 	}
458 }
459 
460 void
461 lwkt_gettoken_hard(lwkt_token_t tok)
462 {
463 	thread_t td = curthread;
464 	lwkt_tokref_t ref;
465 	intptr_t flags;
466 
467 	flags = _lwkt_tok_flags(tok, td);
468 	if ((flags & LWKT_TOKEN_MPSAFE) == 0)
469 		get_mplock();
470 
471 	ref = td->td_toks_stop;
472 	KKASSERT(ref < &td->td_toks_end);
473 	++td->td_toks_stop;
474 	cpu_ccfence();
475 	_lwkt_tokref_init(ref, tok, td, flags);
476 
477 	if (_lwkt_trytokref2(ref, td, 1) == FALSE) {
478 		/*
479 		 * Give up running if we can't acquire the token right now.
480 		 *
481 		 * Since the tokref is already active the scheduler now
482 		 * takes care of acquisition, so we need only call
483 		 * lwkt_switch().
484 		 *
485 		 * Since we failed this was not a recursive token so upon
486 		 * return tr_tok->t_ref should be assigned to this specific
487 		 * ref.
488 		 */
489 		atomic_add_long(&ref->tr_tok->t_collisions, 1);
490 		logtoken(fail, ref);
491 		lwkt_switch();
492 		logtoken(succ, ref);
493 		KKASSERT(ref->tr_tok->t_ref == ref);
494 	}
495 	crit_enter_hard_gd(td->td_gd);
496 }
497 
498 lwkt_token_t
499 lwkt_getpooltoken(void *ptr)
500 {
501 	thread_t td = curthread;
502 	lwkt_token_t tok;
503 	lwkt_tokref_t ref;
504 	intptr_t flags;
505 
506 	tok = _lwkt_token_pool_lookup(ptr);
507 	flags = _lwkt_tok_flags(tok, td);
508 	if ((flags & LWKT_TOKEN_MPSAFE) == 0)
509 		get_mplock();
510 
511 	ref = td->td_toks_stop;
512 	KKASSERT(ref < &td->td_toks_end);
513 	++td->td_toks_stop;
514 	cpu_ccfence();
515 	_lwkt_tokref_init(ref, tok, td, flags);
516 
517 	if (_lwkt_trytokref2(ref, td, 1) == FALSE) {
518 		/*
519 		 * Give up running if we can't acquire the token right now.
520 		 *
521 		 * Since the tokref is already active the scheduler now
522 		 * takes care of acquisition, so we need only call
523 		 * lwkt_switch().
524 		 *
525 		 * Since we failed this was not a recursive token so upon
526 		 * return tr_tok->t_ref should be assigned to this specific
527 		 * ref.
528 		 */
529 		atomic_add_long(&ref->tr_tok->t_collisions, 1);
530 		logtoken(fail, ref);
531 		lwkt_switch();
532 		logtoken(succ, ref);
533 		KKASSERT(ref->tr_tok->t_ref == ref);
534 	}
535 	return(tok);
536 }
537 
538 /*
539  * Attempt to acquire a token, return TRUE on success, FALSE on failure.
540  */
541 int
542 lwkt_trytoken(lwkt_token_t tok)
543 {
544 	thread_t td = curthread;
545 	lwkt_tokref_t ref;
546 	intptr_t flags;
547 
548 	flags = _lwkt_tok_flags(tok, td);
549 	if ((flags & LWKT_TOKEN_MPSAFE) == 0) {
550 		if (try_mplock() == 0)
551 			return (FALSE);
552 	}
553 
554 	ref = td->td_toks_stop;
555 	KKASSERT(ref < &td->td_toks_end);
556 	++td->td_toks_stop;
557 	cpu_ccfence();
558 	_lwkt_tokref_init(ref, tok, td, flags);
559 
560 	if (_lwkt_trytokref2(ref, td, 0) == FALSE) {
561 		/*
562 		 * Cleanup, deactivate the failed token.
563 		 */
564 		if ((ref->tr_flags & LWKT_TOKEN_MPSAFE) == 0) {
565 			cpu_ccfence();
566 			--td->td_toks_stop;
567 			cpu_ccfence();
568 			rel_mplock();
569 		} else {
570 			cpu_ccfence();
571 			--td->td_toks_stop;
572 		}
573 		return (FALSE);
574 	}
575 	return (TRUE);
576 }
577 
578 /*
579  * Release a serializing token.
580  *
581  * WARNING!  All tokens must be released in reverse order.  This will be
582  *	     asserted.
583  */
584 void
585 lwkt_reltoken(lwkt_token_t tok)
586 {
587 	thread_t td = curthread;
588 	lwkt_tokref_t ref;
589 
590 	/*
591 	 * Remove ref from thread token list and assert that it matches
592 	 * the token passed in.  Tokens must be released in reverse order.
593 	 */
594 	ref = td->td_toks_stop - 1;
595 	KKASSERT(ref >= &td->td_toks_base && ref->tr_tok == tok);
596 
597 	/*
598 	 * Only clear the token if it matches ref.  If ref was a recursively
599 	 * acquired token it may not match.  Then adjust td_toks_stop.
600 	 *
601 	 * Some comparisons must be run prior to adjusting td_toks_stop
602 	 * to avoid racing against a fast interrupt/ ipi which tries to
603 	 * acquire a token.
604 	 *
605 	 * We must also be absolutely sure that the compiler does not
606 	 * reorder the clearing of t_ref and the adjustment of td_toks_stop,
607 	 * or reorder the adjustment of td_toks_stop against the conditional.
608 	 *
609 	 * NOTE: The mplock is a token also so sequencing is a bit complex.
610 	 */
611 	if (tok->t_ref == ref)
612 		tok->t_ref = NULL;
613 	cpu_sfence();
614 	if ((ref->tr_flags & LWKT_TOKEN_MPSAFE) == 0) {
615 		cpu_ccfence();
616 		td->td_toks_stop = ref;
617 		cpu_ccfence();
618 		rel_mplock();
619 	} else {
620 		cpu_ccfence();
621 		td->td_toks_stop = ref;
622 		cpu_ccfence();
623 	}
624 	KKASSERT(tok->t_ref != ref);
625 }
626 
627 void
628 lwkt_reltoken_hard(lwkt_token_t tok)
629 {
630 	lwkt_reltoken(tok);
631 	crit_exit_hard();
632 }
633 
634 /*
635  * It is faster for users of lwkt_getpooltoken() to use the returned
636  * token and just call lwkt_reltoken(), but for convenience we provide
637  * this function which looks the token up based on the ident.
638  */
639 void
640 lwkt_relpooltoken(void *ptr)
641 {
642 	lwkt_token_t tok = _lwkt_token_pool_lookup(ptr);
643 	lwkt_reltoken(tok);
644 }
645 
646 /*
647  * Return a count of the number of token refs the thread has to the
648  * specified token, whether it currently owns the token or not.
649  */
650 int
651 lwkt_cnttoken(lwkt_token_t tok, thread_t td)
652 {
653 	lwkt_tokref_t scan;
654 	int count = 0;
655 
656 	for (scan = &td->td_toks_base; scan < td->td_toks_stop; ++scan) {
657 		if (scan->tr_tok == tok)
658 			++count;
659 	}
660 	return(count);
661 }
662 
663 
664 /*
665  * Pool tokens are used to provide a type-stable serializing token
666  * pointer that does not race against disappearing data structures.
667  *
668  * This routine is called in early boot just after we setup the BSP's
669  * globaldata structure.
670  */
671 void
672 lwkt_token_pool_init(void)
673 {
674 	int i;
675 
676 	for (i = 0; i < LWKT_NUM_POOL_TOKENS; ++i)
677 		lwkt_token_init(&pool_tokens[i], 1, "pool");
678 }
679 
680 lwkt_token_t
681 lwkt_token_pool_lookup(void *ptr)
682 {
683 	return (_lwkt_token_pool_lookup(ptr));
684 }
685 
686 /*
687  * Initialize a token.  If mpsafe is 0, the MP lock is acquired before
688  * acquiring the token and released after releasing the token.
689  */
690 void
691 lwkt_token_init(lwkt_token_t tok, int mpsafe, const char *desc)
692 {
693 	tok->t_ref = NULL;
694 	tok->t_flags = mpsafe ? LWKT_TOKEN_MPSAFE : 0;
695 	tok->t_collisions = 0;
696 	tok->t_desc = desc;
697 }
698 
699 void
700 lwkt_token_uninit(lwkt_token_t tok)
701 {
702 	/* empty */
703 }
704 
705 #if 0
706 int
707 lwkt_token_is_stale(lwkt_tokref_t ref)
708 {
709 	lwkt_token_t tok = ref->tr_tok;
710 
711 	KKASSERT(tok->t_owner == curthread && ref->tr_state == 1 &&
712 		 tok->t_count > 0);
713 
714 	/* Token is not stale */
715 	if (tok->t_lastowner == tok->t_owner)
716 		return (FALSE);
717 
718 	/*
719 	 * The token is stale. Reset to not stale so that the next call to
720 	 * lwkt_token_is_stale will return "not stale" unless the token
721 	 * was acquired in-between by another thread.
722 	 */
723 	tok->t_lastowner = tok->t_owner;
724 	return (TRUE);
725 }
726 #endif
727