xref: /dflybsd-src/sys/kern/kern_mutex.c (revision 6cef7136f04e2b24a6db289e78720d6d8c60274e)
1 /*
2  * Copyright (c) 2009 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Matthew Dillon <dillon@backplane.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 /*
35  * Implement fast persistent locks based on atomic_cmpset_int() with
36  * semantics similar to lockmgr locks but faster and taking up much less
37  * space.  Taken from HAMMER's lock implementation.
38  *
39  * These are meant to complement our LWKT tokens.  Tokens are only held
40  * while the thread is running.  Mutexes can be held across blocking
41  * conditions.
42  *
43  * Most of the support is in sys/mutex[2].h.  We mostly provide backoff
44  * functions here.
45  */
46 
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/kernel.h>
50 #include <sys/sysctl.h>
51 #include <sys/thread.h>
52 #include <sys/mutex.h>
53 
54 #include <machine/cpufunc.h>
55 
56 #include <sys/thread2.h>
57 #include <sys/mutex2.h>
58 
59 static __int64_t mtx_contention_count;
60 static __int64_t mtx_collision_count;
61 static __int64_t mtx_wakeup_count;
62 
63 SYSCTL_QUAD(_kern, OID_AUTO, mtx_contention_count, CTLFLAG_RW,
64 	    &mtx_contention_count, 0, "");
65 SYSCTL_QUAD(_kern, OID_AUTO, mtx_collision_count, CTLFLAG_RW,
66 	    &mtx_collision_count, 0, "");
67 SYSCTL_QUAD(_kern, OID_AUTO, mtx_wakeup_count, CTLFLAG_RW,
68 	    &mtx_wakeup_count, 0, "");
69 
70 static void mtx_chain_link(mtx_t mtx);
71 static void mtx_delete_link(mtx_t mtx, mtx_link_t link);
72 
73 /*
74  * Exclusive-lock a mutex, block until acquired.  Recursion is allowed.
75  *
76  * Returns 0 on success, or the tsleep() return code on failure.
77  * An error can only be returned if PCATCH is specified in the flags.
78  */
79 static __inline int
80 __mtx_lock_ex(mtx_t mtx, mtx_link_t link, const char *ident, int flags, int to)
81 {
82 	u_int	lock;
83 	u_int	nlock;
84 	int	error;
85 
86 	for (;;) {
87 		lock = mtx->mtx_lock;
88 		if (lock == 0) {
89 			nlock = MTX_EXCLUSIVE | 1;
90 			if (atomic_cmpset_int(&mtx->mtx_lock, 0, nlock)) {
91 				mtx->mtx_owner = curthread;
92 				error = 0;
93 				break;
94 			}
95 		} else if ((lock & MTX_EXCLUSIVE) &&
96 			   mtx->mtx_owner == curthread) {
97 			KKASSERT((lock & MTX_MASK) != MTX_MASK);
98 			nlock = lock + 1;
99 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
100 				error = 0;
101 				break;
102 			}
103 		} else {
104 			/*
105 			 * Clearing MTX_EXLINK in lock causes us to loop until
106 			 * MTX_EXLINK is available.  However, to avoid
107 			 * unnecessary cpu cache traffic we poll instead.
108 			 *
109 			 * Setting MTX_EXLINK in nlock causes us to loop until
110 			 * we can acquire MTX_EXLINK.
111 			 *
112 			 * Also set MTX_EXWANTED coincident with EXLINK, if
113 			 * not already set.
114 			 */
115 			thread_t td;
116 
117 			if (lock & MTX_EXLINK) {
118 				cpu_pause();
119 				++mtx_collision_count;
120 				continue;
121 			}
122 			td = curthread;
123 			/*lock &= ~MTX_EXLINK;*/
124 			nlock = lock | MTX_EXWANTED | MTX_EXLINK;
125 			++td->td_critcount;
126 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
127 				/*
128 				 * Check for early abort
129 				 */
130 				if (link->state == MTX_LINK_ABORTED) {
131 					atomic_clear_int(&mtx->mtx_lock,
132 							 MTX_EXLINK);
133 					--td->td_critcount;
134 					error = ENOLCK;
135 					if (mtx->mtx_link == NULL) {
136 						atomic_clear_int(&mtx->mtx_lock,
137 								 MTX_EXWANTED);
138 					}
139 					break;
140 				}
141 
142 				/*
143 				 * Success.  Link in our structure then
144 				 * release EXLINK and sleep.
145 				 */
146 				link->owner = td;
147 				link->state = MTX_LINK_LINKED;
148 				if (mtx->mtx_link) {
149 					link->next = mtx->mtx_link;
150 					link->prev = link->next->prev;
151 					link->next->prev = link;
152 					link->prev->next = link;
153 				} else {
154 					link->next = link;
155 					link->prev = link;
156 					mtx->mtx_link = link;
157 				}
158 				tsleep_interlock(link, 0);
159 				atomic_clear_int(&mtx->mtx_lock, MTX_EXLINK);
160 				--td->td_critcount;
161 
162 				error = tsleep(link, flags, ident, to);
163 				++mtx_contention_count;
164 
165 				/*
166 				 * Normal unlink, we should own the exclusive
167 				 * lock now.
168 				 */
169 				if (link->state == MTX_LINK_LINKED)
170 					mtx_delete_link(mtx, link);
171 				if (link->state == MTX_LINK_ACQUIRED) {
172 					KKASSERT(mtx->mtx_owner == link->owner);
173 					error = 0;
174 					break;
175 				}
176 
177 				/*
178 				 * Aborted lock (mtx_abort_ex called).
179 				 */
180 				if (link->state == MTX_LINK_ABORTED) {
181 					error = ENOLCK;
182 					break;
183 				}
184 
185 				/*
186 				 * tsleep error, else retry.
187 				 */
188 				if (error)
189 					break;
190 			} else {
191 				--td->td_critcount;
192 			}
193 		}
194 		++mtx_collision_count;
195 	}
196 	return (error);
197 }
198 
199 int
200 _mtx_lock_ex_link(mtx_t mtx, mtx_link_t link,
201 		  const char *ident, int flags, int to)
202 {
203 	return(__mtx_lock_ex(mtx, link, ident, flags, to));
204 }
205 
206 int
207 _mtx_lock_ex(mtx_t mtx, const char *ident, int flags, int to)
208 {
209 	struct mtx_link link;
210 
211 	mtx_link_init(&link);
212 	return(__mtx_lock_ex(mtx, &link, ident, flags, to));
213 }
214 
215 int
216 _mtx_lock_ex_quick(mtx_t mtx, const char *ident)
217 {
218 	struct mtx_link link;
219 
220 	mtx_link_init(&link);
221 	return(__mtx_lock_ex(mtx, &link, ident, 0, 0));
222 }
223 
224 /*
225  * Share-lock a mutex, block until acquired.  Recursion is allowed.
226  *
227  * Returns 0 on success, or the tsleep() return code on failure.
228  * An error can only be returned if PCATCH is specified in the flags.
229  *
230  * NOTE: Shared locks get a mass-wakeup so if the tsleep fails we
231  *	 do not have to chain the wakeup().
232  */
233 static __inline int
234 __mtx_lock_sh(mtx_t mtx, const char *ident, int flags, int to)
235 {
236 	u_int	lock;
237 	u_int	nlock;
238 	int	error;
239 
240 	for (;;) {
241 		lock = mtx->mtx_lock;
242 		if ((lock & MTX_EXCLUSIVE) == 0) {
243 			KKASSERT((lock & MTX_MASK) != MTX_MASK);
244 			nlock = lock + 1;
245 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
246 				error = 0;
247 				break;
248 			}
249 		} else {
250 			nlock = lock | MTX_SHWANTED;
251 			tsleep_interlock(mtx, 0);
252 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
253 				error = tsleep(mtx, flags, ident, to);
254 				if (error)
255 					break;
256 				++mtx_contention_count;
257 				/* retry */
258 			} else {
259 				tsleep_remove(curthread);
260 			}
261 		}
262 		++mtx_collision_count;
263 	}
264 	return (error);
265 }
266 
267 int
268 _mtx_lock_sh(mtx_t mtx, const char *ident, int flags, int to)
269 {
270 	return (__mtx_lock_sh(mtx, ident, flags, to));
271 }
272 
273 int
274 _mtx_lock_sh_quick(mtx_t mtx, const char *ident)
275 {
276 	return (__mtx_lock_sh(mtx, ident, 0, 0));
277 }
278 
279 void
280 _mtx_spinlock_ex(mtx_t mtx)
281 {
282 	u_int	lock;
283 	u_int	nlock;
284 	int	bb = 1;
285 	int	bo;
286 
287 	for (;;) {
288 		lock = mtx->mtx_lock;
289 		if (lock == 0) {
290 			nlock = MTX_EXCLUSIVE | 1;
291 			if (atomic_cmpset_int(&mtx->mtx_lock, 0, nlock)) {
292 				mtx->mtx_owner = curthread;
293 				break;
294 			}
295 		} else if ((lock & MTX_EXCLUSIVE) &&
296 			   mtx->mtx_owner == curthread) {
297 			KKASSERT((lock & MTX_MASK) != MTX_MASK);
298 			nlock = lock + 1;
299 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
300 				break;
301 		} else {
302 			/* MWAIT here */
303 			if (bb < 1000)
304 				++bb;
305 			cpu_pause();
306 			for (bo = 0; bo < bb; ++bo)
307 				;
308 			++mtx_contention_count;
309 		}
310 		cpu_pause();
311 		++mtx_collision_count;
312 	}
313 }
314 
315 void
316 _mtx_spinlock_sh(mtx_t mtx)
317 {
318 	u_int	lock;
319 	u_int	nlock;
320 	int	bb = 1;
321 	int	bo;
322 
323 	for (;;) {
324 		lock = mtx->mtx_lock;
325 		if ((lock & MTX_EXCLUSIVE) == 0) {
326 			KKASSERT((lock & MTX_MASK) != MTX_MASK);
327 			nlock = lock + 1;
328 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
329 				break;
330 		} else {
331 			/* MWAIT here */
332 			if (bb < 1000)
333 				++bb;
334 			cpu_pause();
335 			for (bo = 0; bo < bb; ++bo)
336 				;
337 			++mtx_contention_count;
338 		}
339 		cpu_pause();
340 		++mtx_collision_count;
341 	}
342 }
343 
344 int
345 _mtx_lock_ex_try(mtx_t mtx)
346 {
347 	u_int	lock;
348 	u_int	nlock;
349 	int	error = 0;
350 
351 	for (;;) {
352 		lock = mtx->mtx_lock;
353 		if (lock == 0) {
354 			nlock = MTX_EXCLUSIVE | 1;
355 			if (atomic_cmpset_int(&mtx->mtx_lock, 0, nlock)) {
356 				mtx->mtx_owner = curthread;
357 				break;
358 			}
359 		} else if ((lock & MTX_EXCLUSIVE) &&
360 			   mtx->mtx_owner == curthread) {
361 			KKASSERT((lock & MTX_MASK) != MTX_MASK);
362 			nlock = lock + 1;
363 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
364 				break;
365 		} else {
366 			error = EAGAIN;
367 			break;
368 		}
369 		cpu_pause();
370 		++mtx_collision_count;
371 	}
372 	return (error);
373 }
374 
375 int
376 _mtx_lock_sh_try(mtx_t mtx)
377 {
378 	u_int	lock;
379 	u_int	nlock;
380 	int	error = 0;
381 
382 	for (;;) {
383 		lock = mtx->mtx_lock;
384 		if ((lock & MTX_EXCLUSIVE) == 0) {
385 			KKASSERT((lock & MTX_MASK) != MTX_MASK);
386 			nlock = lock + 1;
387 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
388 				break;
389 		} else {
390 			error = EAGAIN;
391 			break;
392 		}
393 		cpu_pause();
394 		++mtx_collision_count;
395 	}
396 	return (error);
397 }
398 
399 /*
400  * If the lock is held exclusively it must be owned by the caller.  If the
401  * lock is already a shared lock this operation is a NOP.  A panic will
402  * occur if the lock is not held either shared or exclusive.
403  *
404  * The exclusive count is converted to a shared count.
405  */
406 void
407 _mtx_downgrade(mtx_t mtx)
408 {
409 	u_int	lock;
410 	u_int	nlock;
411 
412 	for (;;) {
413 		lock = mtx->mtx_lock;
414 		if ((lock & MTX_EXCLUSIVE) == 0) {
415 			KKASSERT((lock & MTX_MASK) > 0);
416 			break;
417 		}
418 		KKASSERT(mtx->mtx_owner == curthread);
419 		nlock = lock & ~(MTX_EXCLUSIVE | MTX_SHWANTED);
420 		if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
421 			if (lock & MTX_SHWANTED) {
422 				wakeup(mtx);
423 				++mtx_wakeup_count;
424 			}
425 			break;
426 		}
427 		cpu_pause();
428 		++mtx_collision_count;
429 	}
430 }
431 
432 /*
433  * Upgrade a shared lock to an exclusive lock.  The upgrade will fail if
434  * the shared lock has a count other then 1.  Optimize the most likely case
435  * but note that a single cmpset can fail due to WANTED races.
436  *
437  * If the lock is held exclusively it must be owned by the caller and
438  * this function will simply return without doing anything.   A panic will
439  * occur if the lock is held exclusively by someone other then the caller.
440  *
441  * Returns 0 on success, EDEADLK on failure.
442  */
443 int
444 _mtx_upgrade_try(mtx_t mtx)
445 {
446 	u_int	lock;
447 	u_int	nlock;
448 	int	error = 0;
449 
450 	for (;;) {
451 		lock = mtx->mtx_lock;
452 
453 		if ((lock & ~MTX_EXWANTED) == 1) {
454 			nlock = lock | MTX_EXCLUSIVE;
455 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
456 				mtx->mtx_owner = curthread;
457 				break;
458 			}
459 		} else if (lock & MTX_EXCLUSIVE) {
460 			KKASSERT(mtx->mtx_owner == curthread);
461 			break;
462 		} else {
463 			error = EDEADLK;
464 			break;
465 		}
466 		cpu_pause();
467 		++mtx_collision_count;
468 	}
469 	return (error);
470 }
471 
472 /*
473  * Unlock a lock.  The caller must hold the lock either shared or exclusive.
474  *
475  * Any release which makes the lock available when others want an exclusive
476  * lock causes us to chain the owner to the next exclusive lock instead of
477  * releasing the lock.
478  */
479 void
480 _mtx_unlock(mtx_t mtx)
481 {
482 	u_int	lock;
483 	u_int	nlock;
484 
485 	for (;;) {
486 		lock = mtx->mtx_lock;
487 		nlock = lock & ~(MTX_SHWANTED | MTX_EXLINK);
488 
489 		if (nlock == 1) {
490 			/*
491 			 * Last release, shared lock, no exclusive waiters.
492 			 */
493 			nlock = lock & MTX_EXLINK;
494 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
495 				break;
496 		} else if (nlock == (MTX_EXCLUSIVE | 1)) {
497 			/*
498 			 * Last release, exclusive lock, no exclusive waiters.
499 			 * Wake up any shared waiters.
500 			 */
501 			mtx->mtx_owner = NULL;
502 			nlock = lock & MTX_EXLINK;
503 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
504 				if (lock & MTX_SHWANTED) {
505 					wakeup(mtx);
506 					++mtx_wakeup_count;
507 				}
508 				break;
509 			}
510 		} else if (nlock == (MTX_EXWANTED | 1)) {
511 			/*
512 			 * Last release, shared lock, with exclusive
513 			 * waiters.
514 			 *
515 			 * Wait for EXLINK to clear, then acquire it.
516 			 * We could use the cmpset for this but polling
517 			 * is better on the cpu caches.
518 			 *
519 			 * Acquire an exclusive lock leaving the lockcount
520 			 * set to 1, and get EXLINK for access to mtx_link.
521 			 */
522 			thread_t td;
523 
524 			if (lock & MTX_EXLINK) {
525 				cpu_pause();
526 				++mtx_collision_count;
527 				continue;
528 			}
529 			td = curthread;
530 			/*lock &= ~MTX_EXLINK;*/
531 			nlock |= MTX_EXLINK | MTX_EXCLUSIVE;
532 			nlock |= (lock & MTX_SHWANTED);
533 			++td->td_critcount;
534 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
535 				mtx_chain_link(mtx);
536 				--td->td_critcount;
537 				break;
538 			}
539 			--td->td_critcount;
540 		} else if (nlock == (MTX_EXCLUSIVE | MTX_EXWANTED | 1)) {
541 			/*
542 			 * Last release, exclusive lock, with exclusive
543 			 * waiters.
544 			 *
545 			 * leave the exclusive lock intact and the lockcount
546 			 * set to 1, and get EXLINK for access to mtx_link.
547 			 */
548 			thread_t td;
549 
550 			if (lock & MTX_EXLINK) {
551 				cpu_pause();
552 				++mtx_collision_count;
553 				continue;
554 			}
555 			td = curthread;
556 			/*lock &= ~MTX_EXLINK;*/
557 			nlock |= MTX_EXLINK;
558 			nlock |= (lock & MTX_SHWANTED);
559 			++td->td_critcount;
560 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
561 				mtx_chain_link(mtx);
562 				--td->td_critcount;
563 				break;
564 			}
565 			--td->td_critcount;
566 		} else {
567 			/*
568 			 * Not the last release (shared or exclusive)
569 			 */
570 			nlock = lock - 1;
571 			KKASSERT((nlock & MTX_MASK) != MTX_MASK);
572 			if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
573 				break;
574 		}
575 		cpu_pause();
576 		++mtx_collision_count;
577 	}
578 }
579 
580 /*
581  * Chain mtx_chain_link.  Called with the lock held exclusively with a
582  * single ref count, and also with MTX_EXLINK held.
583  */
584 static void
585 mtx_chain_link(mtx_t mtx)
586 {
587 	mtx_link_t link;
588 	u_int	lock;
589 	u_int	nlock;
590 	u_int	clock;	/* bits we own and want to clear */
591 
592 	/*
593 	 * Chain the exclusive lock to the next link.  The caller cleared
594 	 * SHWANTED so if there is no link we have to wake up any shared
595 	 * waiters.
596 	 */
597 	clock = MTX_EXLINK;
598 	if ((link = mtx->mtx_link) != NULL) {
599 		KKASSERT(link->state == MTX_LINK_LINKED);
600 		if (link->next == link) {
601 			mtx->mtx_link = NULL;
602 			clock |= MTX_EXWANTED;
603 		} else {
604 			mtx->mtx_link = link->next;
605 			link->next->prev = link->prev;
606 			link->prev->next = link->next;
607 		}
608 		link->state = MTX_LINK_ACQUIRED;
609 		mtx->mtx_owner = link->owner;
610 	} else {
611 		/*
612 		 * Chain was empty, release the exclusive lock's last count
613 		 * as well the bits shown.
614 		 */
615 		clock |= MTX_EXCLUSIVE | MTX_EXWANTED | MTX_SHWANTED | 1;
616 	}
617 
618 	/*
619 	 * We have to uset cmpset here to deal with MTX_SHWANTED.  If
620 	 * we just clear the bits we can miss a wakeup or, worse,
621 	 * leave mtx_lock unlocked with MTX_SHWANTED still set.
622 	 */
623 	for (;;) {
624 		lock = mtx->mtx_lock;
625 		nlock = lock & ~clock;
626 
627 		if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock)) {
628 			if (link) {
629 				/*
630 				 * Wakeup new exclusive holder.  Leave
631 				 * SHWANTED intact.
632 				 */
633 				wakeup(link);
634 			} else if (lock & MTX_SHWANTED) {
635 				/*
636 				 * Signal any shared waiters (and we also
637 				 * clear SHWANTED).
638 				 */
639 				mtx->mtx_owner = NULL;
640 				wakeup(mtx);
641 				++mtx_wakeup_count;
642 			}
643 			break;
644 		}
645 		cpu_pause();
646 		++mtx_collision_count;
647 	}
648 }
649 
650 /*
651  * Delete a link structure after tsleep has failed.  This code is not
652  * in the critical path as most exclusive waits are chained.
653  */
654 static
655 void
656 mtx_delete_link(mtx_t mtx, mtx_link_t link)
657 {
658 	thread_t td = curthread;
659 	u_int	lock;
660 	u_int	nlock;
661 
662 	/*
663 	 * Acquire MTX_EXLINK.
664 	 *
665 	 * Do not use cmpxchg to wait for EXLINK to clear as this might
666 	 * result in too much cpu cache traffic.
667 	 */
668 	++td->td_critcount;
669 	for (;;) {
670 		lock = mtx->mtx_lock;
671 		if (lock & MTX_EXLINK) {
672 			cpu_pause();
673 			++mtx_collision_count;
674 			continue;
675 		}
676 		/* lock &= ~MTX_EXLINK; */
677 		nlock = lock | MTX_EXLINK;
678 		if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
679 			break;
680 		cpu_pause();
681 		++mtx_collision_count;
682 	}
683 
684 	/*
685 	 * Delete the link and release EXLINK.
686 	 */
687 	if (link->state == MTX_LINK_LINKED) {
688 		if (link->next == link) {
689 			mtx->mtx_link = NULL;
690 		} else {
691 			mtx->mtx_link = link->next;
692 			link->next->prev = link->prev;
693 			link->prev->next = link->next;
694 		}
695 		link->state = MTX_LINK_IDLE;
696 	}
697 	atomic_clear_int(&mtx->mtx_lock, MTX_EXLINK);
698 	--td->td_critcount;
699 }
700 
701 /*
702  * Abort a mutex locking operation, causing mtx_lock_ex_link() to
703  * return ENOLCK.  This may be called at any time after the
704  * mtx_link is initialized, including both before and after the call
705  * to mtx_lock_ex_link().
706  */
707 void
708 mtx_abort_ex_link(mtx_t mtx, mtx_link_t link)
709 {
710 	thread_t td = curthread;
711 	u_int	lock;
712 	u_int	nlock;
713 
714 	/*
715 	 * Acquire MTX_EXLINK
716 	 */
717 	++td->td_critcount;
718 	for (;;) {
719 		lock = mtx->mtx_lock;
720 		if (lock & MTX_EXLINK) {
721 			cpu_pause();
722 			++mtx_collision_count;
723 			continue;
724 		}
725 		/* lock &= ~MTX_EXLINK; */
726 		nlock = lock | MTX_EXLINK;
727 		if (atomic_cmpset_int(&mtx->mtx_lock, lock, nlock))
728 			break;
729 		cpu_pause();
730 		++mtx_collision_count;
731 	}
732 
733 	/*
734 	 * Do the abort
735 	 */
736 	switch(link->state) {
737 	case MTX_LINK_IDLE:
738 		/*
739 		 * Link not started yet
740 		 */
741 		link->state = MTX_LINK_ABORTED;
742 		break;
743 	case MTX_LINK_LINKED:
744 		/*
745 		 * de-link, mark aborted, and wakeup the thread.
746 		 */
747 		if (link->next == link) {
748 			mtx->mtx_link = NULL;
749 		} else {
750 			mtx->mtx_link = link->next;
751 			link->next->prev = link->prev;
752 			link->prev->next = link->next;
753 		}
754 		link->state = MTX_LINK_ABORTED;
755 		wakeup(link);
756 		break;
757 	case MTX_LINK_ACQUIRED:
758 		/*
759 		 * Too late, the lock was acquired.  Let it complete.
760 		 */
761 		break;
762 	default:
763 		/*
764 		 * link already aborted, do nothing.
765 		 */
766 		break;
767 	}
768 	atomic_clear_int(&mtx->mtx_lock, MTX_EXLINK);
769 	--td->td_critcount;
770 }
771