xref: /dflybsd-src/sys/kern/kern_mutex.c (revision d2d1103f52e6fb116ee65a9940477c5449933f28)
1 /*
2  * Copyright (c) 2009 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 /*
35  * Implement fast persistent locks based on atomic_cmpset_int() with
36  * semantics similar to lockmgr locks but faster and taking up much less
37  * space.  Taken from HAMMER's lock implementation.
38  *
39  * These are meant to complement our LWKT tokens.  Tokens are only held
40  * while the thread is running.  Mutexes can be held across blocking
41  * conditions.
42  *
43  * Most of the support is in sys/mutex[2].h.  We mostly provide backoff
44  * functions here.
45  */
46 
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <sys/sysctl.h>
51 #include <sys/thread.h>
52 
53 #include <machine/cpufunc.h>
54 
55 #include <sys/thread2.h>
56 #include <sys/mutex2.h>
57 
58 static __int64_t mtx_contention_count;
59 static __int64_t mtx_collision_count;
60 static __int64_t mtx_wakeup_count;
61 
62 SYSCTL_QUAD(_kern, OID_AUTO, mtx_contention_count, CTLFLAG_RW,
63 	    &mtx_contention_count, 0, "");
64 SYSCTL_QUAD(_kern, OID_AUTO, mtx_collision_count, CTLFLAG_RW,
65 	    &mtx_collision_count, 0, "");
66 SYSCTL_QUAD(_kern, OID_AUTO, mtx_wakeup_count, CTLFLAG_RW,
67 	    &mtx_wakeup_count, 0, "");
68 
69 static void mtx_chain_link(mtx_t mtx);
70 static void mtx_delete_link(mtx_t mtx, mtx_link_t link);
71 
72 /*
73  * Exclusive-lock a mutex, block until acquired.  Recursion is allowed.
74  *
75  * Returns 0 on success, or the tsleep() return code on failure.
76  * An error can only be returned if PCATCH is specified in the flags.
77  */
78 static __inline int
79 __mtx_lock_ex(mtx_t mtx, mtx_link_t link, const char *ident, int flags, int to)
80 {
81 	u_int	lock;
82 	u_int	nlock;
83 	int	error;
84 
85 	for (;;) {
86 		lock = mtx->mtx_lock;
87 		if (lock == 0) {
88 			nlock = MTX_EXCLUSIVE | 1;
89 			if (atomic_cmpset_int(&mtx->mtx_lock, 0, nlock)) {
90 				mtx->mtx_owner = curthread;
91 				error = 0;
92 				break;
93 			}
94 		} else if ((lock & MTX_EXCLUSIVE) &&
95 			   mtx->mtx_owner == curthread) {
96 			KKASSERT((lock & MTX_MASK) != MTX_MASK);
97 			nlock = lock + 1;
98 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
99 				error = 0;
100 				break;
101 			}
102 		} else {
103 			/*
104 			 * Clearing MTX_EXLINK in lock causes us to loop until
105 			 * MTX_EXLINK is available.  However, to avoid
106 			 * unnecessary cpu cache traffic we poll instead.
107 			 *
108 			 * Setting MTX_EXLINK in nlock causes us to loop until
109 			 * we can acquire MTX_EXLINK.
110 			 *
111 			 * Also set MTX_EXWANTED coincident with EXLINK, if
112 			 * not already set.
113 			 */
114 			thread_t td;
115 
116 			if (lock & MTX_EXLINK) {
117 				cpu_pause();
118 				++mtx_collision_count;
119 				continue;
120 			}
121 			td = curthread;
122 			/*lock &= ~MTX_EXLINK;*/
123 			nlock = lock | MTX_EXWANTED | MTX_EXLINK;
124 			++td->td_critcount;
125 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
126 				/*
127 				 * Check for early abort
128 				 */
129 				if (link->state == MTX_LINK_ABORTED) {
130 					atomic_clear_int(&mtx->mtx_lock,
131 							 MTX_EXLINK);
132 					--td->td_critcount;
133 					error = ENOLCK;
134 					if (mtx->mtx_link == NULL) {
135 						atomic_clear_int(&mtx->mtx_lock,
136 								 MTX_EXWANTED);
137 					}
138 					break;
139 				}
140 
141 				/*
142 				 * Success.  Link in our structure then
143 				 * release EXLINK and sleep.
144 				 */
145 				link->owner = td;
146 				link->state = MTX_LINK_LINKED;
147 				if (mtx->mtx_link) {
148 					link->next = mtx->mtx_link;
149 					link->prev = link->next->prev;
150 					link->next->prev = link;
151 					link->prev->next = link;
152 				} else {
153 					link->next = link;
154 					link->prev = link;
155 					mtx->mtx_link = link;
156 				}
157 				tsleep_interlock(link, 0);
158 				atomic_clear_int(&mtx->mtx_lock, MTX_EXLINK);
159 				--td->td_critcount;
160 
161 				error = tsleep(link, flags, ident, to);
162 				++mtx_contention_count;
163 
164 				/*
165 				 * Normal unlink, we should own the exclusive
166 				 * lock now.
167 				 */
168 				if (link->state == MTX_LINK_LINKED)
169 					mtx_delete_link(mtx, link);
170 				if (link->state == MTX_LINK_ACQUIRED) {
171 					KKASSERT(mtx->mtx_owner == link->owner);
172 					error = 0;
173 					break;
174 				}
175 
176 				/*
177 				 * Aborted lock (mtx_abort_ex called).
178 				 */
179 				if (link->state == MTX_LINK_ABORTED) {
180 					error = ENOLCK;
181 					break;
182 				}
183 
184 				/*
185 				 * tsleep error, else retry.
186 				 */
187 				if (error)
188 					break;
189 			} else {
190 				--td->td_critcount;
191 			}
192 		}
193 		++mtx_collision_count;
194 	}
195 	return (error);
196 }
197 
198 int
199 _mtx_lock_ex_link(mtx_t mtx, mtx_link_t link,
200 		  const char *ident, int flags, int to)
201 {
202 	return(__mtx_lock_ex(mtx, link, ident, flags, to));
203 }
204 
205 int
206 _mtx_lock_ex(mtx_t mtx, const char *ident, int flags, int to)
207 {
208 	struct mtx_link link;
209 
210 	mtx_link_init(&link);
211 	return(__mtx_lock_ex(mtx, &link, ident, flags, to));
212 }
213 
214 int
215 _mtx_lock_ex_quick(mtx_t mtx, const char *ident)
216 {
217 	struct mtx_link link;
218 
219 	mtx_link_init(&link);
220 	return(__mtx_lock_ex(mtx, &link, ident, 0, 0));
221 }
222 
223 /*
224  * Share-lock a mutex, block until acquired.  Recursion is allowed.
225  *
226  * Returns 0 on success, or the tsleep() return code on failure.
227  * An error can only be returned if PCATCH is specified in the flags.
228  *
229  * NOTE: Shared locks get a mass-wakeup so if the tsleep fails we
230  *	 do not have to chain the wakeup().
231  */
232 static __inline int
233 __mtx_lock_sh(mtx_t mtx, const char *ident, int flags, int to)
234 {
235 	u_int	lock;
236 	u_int	nlock;
237 	int	error;
238 
239 	for (;;) {
240 		lock = mtx->mtx_lock;
241 		if ((lock & MTX_EXCLUSIVE) == 0) {
242 			KKASSERT((lock & MTX_MASK) != MTX_MASK);
243 			nlock = lock + 1;
244 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
245 				error = 0;
246 				break;
247 			}
248 		} else {
249 			nlock = lock | MTX_SHWANTED;
250 			tsleep_interlock(mtx, 0);
251 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
252 				error = tsleep(mtx, flags, ident, to);
253 				if (error)
254 					break;
255 				++mtx_contention_count;
256 				/* retry */
257 			} else {
258 				tsleep_remove(curthread);
259 			}
260 		}
261 		++mtx_collision_count;
262 	}
263 	return (error);
264 }
265 
266 int
267 _mtx_lock_sh(mtx_t mtx, const char *ident, int flags, int to)
268 {
269 	return (__mtx_lock_sh(mtx, ident, flags, to));
270 }
271 
272 int
273 _mtx_lock_sh_quick(mtx_t mtx, const char *ident)
274 {
275 	return (__mtx_lock_sh(mtx, ident, 0, 0));
276 }
277 
278 void
279 _mtx_spinlock_ex(mtx_t mtx)
280 {
281 	u_int	lock;
282 	u_int	nlock;
283 	int	bb = 1;
284 	int	bo;
285 
286 	for (;;) {
287 		lock = mtx->mtx_lock;
288 		if (lock == 0) {
289 			nlock = MTX_EXCLUSIVE | 1;
290 			if (atomic_cmpset_int(&mtx->mtx_lock, 0, nlock)) {
291 				mtx->mtx_owner = curthread;
292 				break;
293 			}
294 		} else if ((lock & MTX_EXCLUSIVE) &&
295 			   mtx->mtx_owner == curthread) {
296 			KKASSERT((lock & MTX_MASK) != MTX_MASK);
297 			nlock = lock + 1;
298 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
299 				break;
300 		} else {
301 			/* MWAIT here */
302 			if (bb < 1000)
303 				++bb;
304 			cpu_pause();
305 			for (bo = 0; bo < bb; ++bo)
306 				;
307 			++mtx_contention_count;
308 		}
309 		cpu_pause();
310 		++mtx_collision_count;
311 	}
312 }
313 
314 void
315 _mtx_spinlock_sh(mtx_t mtx)
316 {
317 	u_int	lock;
318 	u_int	nlock;
319 	int	bb = 1;
320 	int	bo;
321 
322 	for (;;) {
323 		lock = mtx->mtx_lock;
324 		if ((lock & MTX_EXCLUSIVE) == 0) {
325 			KKASSERT((lock & MTX_MASK) != MTX_MASK);
326 			nlock = lock + 1;
327 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
328 				break;
329 		} else {
330 			/* MWAIT here */
331 			if (bb < 1000)
332 				++bb;
333 			cpu_pause();
334 			for (bo = 0; bo < bb; ++bo)
335 				;
336 			++mtx_contention_count;
337 		}
338 		cpu_pause();
339 		++mtx_collision_count;
340 	}
341 }
342 
343 int
344 _mtx_lock_ex_try(mtx_t mtx)
345 {
346 	u_int	lock;
347 	u_int	nlock;
348 	int	error = 0;
349 
350 	for (;;) {
351 		lock = mtx->mtx_lock;
352 		if (lock == 0) {
353 			nlock = MTX_EXCLUSIVE | 1;
354 			if (atomic_cmpset_int(&mtx->mtx_lock, 0, nlock)) {
355 				mtx->mtx_owner = curthread;
356 				break;
357 			}
358 		} else if ((lock & MTX_EXCLUSIVE) &&
359 			   mtx->mtx_owner == curthread) {
360 			KKASSERT((lock & MTX_MASK) != MTX_MASK);
361 			nlock = lock + 1;
362 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
363 				break;
364 		} else {
365 			error = EAGAIN;
366 			break;
367 		}
368 		cpu_pause();
369 		++mtx_collision_count;
370 	}
371 	return (error);
372 }
373 
374 int
375 _mtx_lock_sh_try(mtx_t mtx)
376 {
377 	u_int	lock;
378 	u_int	nlock;
379 	int	error = 0;
380 
381 	for (;;) {
382 		lock = mtx->mtx_lock;
383 		if ((lock & MTX_EXCLUSIVE) == 0) {
384 			KKASSERT((lock & MTX_MASK) != MTX_MASK);
385 			nlock = lock + 1;
386 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
387 				break;
388 		} else {
389 			error = EAGAIN;
390 			break;
391 		}
392 		cpu_pause();
393 		++mtx_collision_count;
394 	}
395 	return (error);
396 }
397 
398 /*
399  * If the lock is held exclusively it must be owned by the caller.  If the
400  * lock is already a shared lock this operation is a NOP.  A panic will
401  * occur if the lock is not held either shared or exclusive.
402  *
403  * The exclusive count is converted to a shared count.
404  */
405 void
406 _mtx_downgrade(mtx_t mtx)
407 {
408 	u_int	lock;
409 	u_int	nlock;
410 
411 	for (;;) {
412 		lock = mtx->mtx_lock;
413 		if ((lock & MTX_EXCLUSIVE) == 0) {
414 			KKASSERT((lock & MTX_MASK) > 0);
415 			break;
416 		}
417 		KKASSERT(mtx->mtx_owner == curthread);
418 		nlock = lock & ~(MTX_EXCLUSIVE | MTX_SHWANTED);
419 		if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
420 			if (lock & MTX_SHWANTED) {
421 				wakeup(mtx);
422 				++mtx_wakeup_count;
423 			}
424 			break;
425 		}
426 		cpu_pause();
427 		++mtx_collision_count;
428 	}
429 }
430 
431 /*
432  * Upgrade a shared lock to an exclusive lock.  The upgrade will fail if
433  * the shared lock has a count other then 1.  Optimize the most likely case
434  * but note that a single cmpset can fail due to WANTED races.
435  *
436  * If the lock is held exclusively it must be owned by the caller and
437  * this function will simply return without doing anything.   A panic will
438  * occur if the lock is held exclusively by someone other then the caller.
439  *
440  * Returns 0 on success, EDEADLK on failure.
441  */
442 int
443 _mtx_upgrade_try(mtx_t mtx)
444 {
445 	u_int	lock;
446 	u_int	nlock;
447 	int	error = 0;
448 
449 	for (;;) {
450 		lock = mtx->mtx_lock;
451 
452 		if ((lock & ~MTX_EXWANTED) == 1) {
453 			nlock = lock | MTX_EXCLUSIVE;
454 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
455 				mtx->mtx_owner = curthread;
456 				break;
457 			}
458 		} else if (lock & MTX_EXCLUSIVE) {
459 			KKASSERT(mtx->mtx_owner == curthread);
460 			break;
461 		} else {
462 			error = EDEADLK;
463 			break;
464 		}
465 		cpu_pause();
466 		++mtx_collision_count;
467 	}
468 	return (error);
469 }
470 
471 /*
472  * Unlock a lock.  The caller must hold the lock either shared or exclusive.
473  *
474  * Any release which makes the lock available when others want an exclusive
475  * lock causes us to chain the owner to the next exclusive lock instead of
476  * releasing the lock.
477  */
478 void
479 _mtx_unlock(mtx_t mtx)
480 {
481 	u_int	lock;
482 	u_int	nlock;
483 
484 	for (;;) {
485 		lock = mtx->mtx_lock;
486 		nlock = lock & ~(MTX_SHWANTED | MTX_EXLINK);
487 
488 		if (nlock == 1) {
489 			/*
490 			 * Last release, shared lock, no exclusive waiters.
491 			 */
492 			nlock = lock & MTX_EXLINK;
493 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
494 				break;
495 		} else if (nlock == (MTX_EXCLUSIVE | 1)) {
496 			/*
497 			 * Last release, exclusive lock, no exclusive waiters.
498 			 * Wake up any shared waiters.
499 			 */
500 			mtx->mtx_owner = NULL;
501 			nlock = lock & MTX_EXLINK;
502 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
503 				if (lock & MTX_SHWANTED) {
504 					wakeup(mtx);
505 					++mtx_wakeup_count;
506 				}
507 				break;
508 			}
509 		} else if (nlock == (MTX_EXWANTED | 1)) {
510 			/*
511 			 * Last release, shared lock, with exclusive
512 			 * waiters.
513 			 *
514 			 * Wait for EXLINK to clear, then acquire it.
515 			 * We could use the cmpset for this but polling
516 			 * is better on the cpu caches.
517 			 *
518 			 * Acquire an exclusive lock leaving the lockcount
519 			 * set to 1, and get EXLINK for access to mtx_link.
520 			 */
521 			thread_t td;
522 
523 			if (lock & MTX_EXLINK) {
524 				cpu_pause();
525 				++mtx_collision_count;
526 				continue;
527 			}
528 			td = curthread;
529 			/*lock &= ~MTX_EXLINK;*/
530 			nlock |= MTX_EXLINK | MTX_EXCLUSIVE;
531 			nlock |= (lock & MTX_SHWANTED);
532 			++td->td_critcount;
533 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
534 				mtx_chain_link(mtx);
535 				--td->td_critcount;
536 				break;
537 			}
538 			--td->td_critcount;
539 		} else if (nlock == (MTX_EXCLUSIVE | MTX_EXWANTED | 1)) {
540 			/*
541 			 * Last release, exclusive lock, with exclusive
542 			 * waiters.
543 			 *
544 			 * leave the exclusive lock intact and the lockcount
545 			 * set to 1, and get EXLINK for access to mtx_link.
546 			 */
547 			thread_t td;
548 
549 			if (lock & MTX_EXLINK) {
550 				cpu_pause();
551 				++mtx_collision_count;
552 				continue;
553 			}
554 			td = curthread;
555 			/*lock &= ~MTX_EXLINK;*/
556 			nlock |= MTX_EXLINK;
557 			nlock |= (lock & MTX_SHWANTED);
558 			++td->td_critcount;
559 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
560 				mtx_chain_link(mtx);
561 				--td->td_critcount;
562 				break;
563 			}
564 			--td->td_critcount;
565 		} else {
566 			/*
567 			 * Not the last release (shared or exclusive)
568 			 */
569 			nlock = lock - 1;
570 			KKASSERT((nlock & MTX_MASK) != MTX_MASK);
571 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
572 				break;
573 		}
574 		cpu_pause();
575 		++mtx_collision_count;
576 	}
577 }
578 
579 /*
580  * Chain mtx_chain_link.  Called with the lock held exclusively with a
581  * single ref count, and also with MTX_EXLINK held.
582  */
583 static void
584 mtx_chain_link(mtx_t mtx)
585 {
586 	mtx_link_t link;
587 	u_int	lock;
588 	u_int	nlock;
589 	u_int	clock;	/* bits we own and want to clear */
590 
591 	/*
592 	 * Chain the exclusive lock to the next link.  The caller cleared
593 	 * SHWANTED so if there is no link we have to wake up any shared
594 	 * waiters.
595 	 */
596 	clock = MTX_EXLINK;
597 	if ((link = mtx->mtx_link) != NULL) {
598 		KKASSERT(link->state == MTX_LINK_LINKED);
599 		if (link->next == link) {
600 			mtx->mtx_link = NULL;
601 			clock |= MTX_EXWANTED;
602 		} else {
603 			mtx->mtx_link = link->next;
604 			link->next->prev = link->prev;
605 			link->prev->next = link->next;
606 		}
607 		link->state = MTX_LINK_ACQUIRED;
608 		mtx->mtx_owner = link->owner;
609 	} else {
610 		/*
611 		 * Chain was empty, release the exclusive lock's last count
612 		 * as well the bits shown.
613 		 */
614 		clock |= MTX_EXCLUSIVE | MTX_EXWANTED | MTX_SHWANTED | 1;
615 	}
616 
617 	/*
618 	 * We have to uset cmpset here to deal with MTX_SHWANTED.  If
619 	 * we just clear the bits we can miss a wakeup or, worse,
620 	 * leave mtx_lock unlocked with MTX_SHWANTED still set.
621 	 */
622 	for (;;) {
623 		lock = mtx->mtx_lock;
624 		nlock = lock & ~clock;
625 
626 		if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
627 			if (link) {
628 				/*
629 				 * Wakeup new exclusive holder.  Leave
630 				 * SHWANTED intact.
631 				 */
632 				wakeup(link);
633 			} else if (lock & MTX_SHWANTED) {
634 				/*
635 				 * Signal any shared waiters (and we also
636 				 * clear SHWANTED).
637 				 */
638 				mtx->mtx_owner = NULL;
639 				wakeup(mtx);
640 				++mtx_wakeup_count;
641 			}
642 			break;
643 		}
644 		cpu_pause();
645 		++mtx_collision_count;
646 	}
647 }
648 
649 /*
650  * Delete a link structure after tsleep has failed.  This code is not
651  * in the critical path as most exclusive waits are chained.
652  */
653 static
654 void
655 mtx_delete_link(mtx_t mtx, mtx_link_t link)
656 {
657 	thread_t td = curthread;
658 	u_int	lock;
659 	u_int	nlock;
660 
661 	/*
662 	 * Acquire MTX_EXLINK.
663 	 *
664 	 * Do not use cmpxchg to wait for EXLINK to clear as this might
665 	 * result in too much cpu cache traffic.
666 	 */
667 	++td->td_critcount;
668 	for (;;) {
669 		lock = mtx->mtx_lock;
670 		if (lock & MTX_EXLINK) {
671 			cpu_pause();
672 			++mtx_collision_count;
673 			continue;
674 		}
675 		/* lock &= ~MTX_EXLINK; */
676 		nlock = lock | MTX_EXLINK;
677 		if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
678 			break;
679 		cpu_pause();
680 		++mtx_collision_count;
681 	}
682 
683 	/*
684 	 * Delete the link and release EXLINK.
685 	 */
686 	if (link->state == MTX_LINK_LINKED) {
687 		if (link->next == link) {
688 			mtx->mtx_link = NULL;
689 		} else {
690 			mtx->mtx_link = link->next;
691 			link->next->prev = link->prev;
692 			link->prev->next = link->next;
693 		}
694 		link->state = MTX_LINK_IDLE;
695 	}
696 	atomic_clear_int(&mtx->mtx_lock, MTX_EXLINK);
697 	--td->td_critcount;
698 }
699 
700 /*
701  * Abort a mutex locking operation, causing mtx_lock_ex_link() to
702  * return ENOLCK.  This may be called at any time after the
703  * mtx_link is initialized, including both before and after the call
704  * to mtx_lock_ex_link().
705  */
706 void
707 mtx_abort_ex_link(mtx_t mtx, mtx_link_t link)
708 {
709 	thread_t td = curthread;
710 	u_int	lock;
711 	u_int	nlock;
712 
713 	/*
714 	 * Acquire MTX_EXLINK
715 	 */
716 	++td->td_critcount;
717 	for (;;) {
718 		lock = mtx->mtx_lock;
719 		if (lock & MTX_EXLINK) {
720 			cpu_pause();
721 			++mtx_collision_count;
722 			continue;
723 		}
724 		/* lock &= ~MTX_EXLINK; */
725 		nlock = lock | MTX_EXLINK;
726 		if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
727 			break;
728 		cpu_pause();
729 		++mtx_collision_count;
730 	}
731 
732 	/*
733 	 * Do the abort
734 	 */
735 	switch(link->state) {
736 	case MTX_LINK_IDLE:
737 		/*
738 		 * Link not started yet
739 		 */
740 		link->state = MTX_LINK_ABORTED;
741 		break;
742 	case MTX_LINK_LINKED:
743 		/*
744 		 * de-link, mark aborted, and wakeup the thread.
745 		 */
746 		if (link->next == link) {
747 			mtx->mtx_link = NULL;
748 		} else {
749 			mtx->mtx_link = link->next;
750 			link->next->prev = link->prev;
751 			link->prev->next = link->next;
752 		}
753 		link->state = MTX_LINK_ABORTED;
754 		wakeup(link);
755 		break;
756 	case MTX_LINK_ACQUIRED:
757 		/*
758 		 * Too late, the lock was acquired.  Let it complete.
759 		 */
760 		break;
761 	default:
762 		/*
763 		 * link already aborted, do nothing.
764 		 */
765 		break;
766 	}
767 	atomic_clear_int(&mtx->mtx_lock, MTX_EXLINK);
768 	--td->td_critcount;
769 }
770