xref: /openbsd-src/lib/libc/arch/amd64/gen/_atomic_lock.c (revision 7e321ac128fdcd388c62dfa54aca790ebbd73ce1)
1*7e321ac1Sguenther /*	$OpenBSD: _atomic_lock.c,v 1.1 2017/08/15 06:13:24 guenther Exp $	*/
2*7e321ac1Sguenther 
3*7e321ac1Sguenther /* David Leonard, <d@csee.uq.edu.au>. Public domain. */
4*7e321ac1Sguenther 
5*7e321ac1Sguenther /*
6*7e321ac1Sguenther  * Atomic lock for amd64 -- taken from i386 code.
7*7e321ac1Sguenther  */
8*7e321ac1Sguenther 
9*7e321ac1Sguenther #include <machine/spinlock.h>
10*7e321ac1Sguenther 
11*7e321ac1Sguenther int
_atomic_lock(volatile _atomic_lock_t * lock)12*7e321ac1Sguenther _atomic_lock(volatile _atomic_lock_t *lock)
13*7e321ac1Sguenther {
14*7e321ac1Sguenther 	_atomic_lock_t old;
15*7e321ac1Sguenther 
16*7e321ac1Sguenther 	/*
17*7e321ac1Sguenther 	 * Use the eXCHanGe instruction to swap the lock value with
18*7e321ac1Sguenther 	 * a local variable containing the locked state.
19*7e321ac1Sguenther 	 */
20*7e321ac1Sguenther 	old = _ATOMIC_LOCK_LOCKED;
21*7e321ac1Sguenther 	__asm__("xchg %0,(%2)"
22*7e321ac1Sguenther 		: "=r" (old)
23*7e321ac1Sguenther 		: "0"  (old), "r"  (lock));
24*7e321ac1Sguenther 
25*7e321ac1Sguenther 	return (old != _ATOMIC_LOCK_UNLOCKED);
26*7e321ac1Sguenther }
27