xref: /netbsd-src/lib/libpthread/arch/x86_64/pthread_md.h (revision 82d56013d7b633d116a93943de88e08335357a7c)
1 /*	$NetBSD: pthread_md.h,v 1.12 2011/01/25 19:12:06 christos Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001, 2007, 2008 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Nathan J. Williams.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  * Adapted for x86_64 by fvdl@NetBSD.org
32  */
33 
34 #ifndef _LIB_PTHREAD_X86_64_MD_H
35 #define _LIB_PTHREAD_X86_64_MD_H
36 
37 #include <sys/ucontext.h>
38 
39 static inline unsigned long
40 pthread__sp(void)
41 {
42 	unsigned long ret;
43 	__asm("movq %%rsp, %0" : "=g" (ret));
44 
45 	return ret;
46 }
47 
48 #define pthread__uc_sp(ucp) ((ucp)->uc_mcontext.__gregs[_REG_URSP])
49 
50 /*
51  * Set initial, sane values for registers whose values aren't just
52  * "don't care".
53  * 0x23 is GSEL(GUDATA_SEL, SEL_UPL), and
54  * 0x1b is GSEL(GUCODE_SEL, SEL_UPL).
55  * 0x202 is PSL_USERSET.
56  */
57 #define _INITCONTEXT_U_MD(ucp)						\
58 	(ucp)->uc_mcontext.__gregs[_REG_GS] = 0x23,			\
59 	(ucp)->uc_mcontext.__gregs[_REG_FS] = 0x23,			\
60 	(ucp)->uc_mcontext.__gregs[_REG_ES] = 0x23,			\
61 	(ucp)->uc_mcontext.__gregs[_REG_DS] = 0x23,			\
62 	(ucp)->uc_mcontext.__gregs[_REG_CS] = 0x1b,			\
63 	(ucp)->uc_mcontext.__gregs[_REG_SS] = 0x23,			\
64 	(ucp)->uc_mcontext.__gregs[_REG_RFL] = 0x202;
65 
66 #define	pthread__smt_pause()	__asm __volatile("rep; nop" ::: "memory")
67 
68 /* Don't need additional memory barriers. */
69 #define	PTHREAD__ATOMIC_IS_MEMBAR
70 
71 static inline void *
72 _atomic_cas_ptr(volatile void *ptr, void *old, void *new)
73 {
74 	volatile uintptr_t *cast = ptr;
75 	void *ret;
76 
77 	__asm __volatile ("lock; cmpxchgq %2, %1"
78 		: "=a" (ret), "=m" (*cast)
79 		: "r" (new), "m" (*cast), "0" (old));
80 
81 	return ret;
82 }
83 
84 static inline void *
85 _atomic_cas_ptr_ni(volatile void *ptr, void *old, void *new)
86 {
87 	volatile uintptr_t *cast = ptr;
88 	void *ret;
89 
90 	__asm __volatile ("cmpxchgq %2, %1"
91 		: "=a" (ret), "=m" (*cast)
92 		: "r" (new), "m" (*cast), "0" (old));
93 
94 	return ret;
95 }
96 
97 #endif /* _LIB_PTHREAD_X86_64_MD_H */
98