xref: /netbsd-src/lib/libpthread/arch/x86_64/pthread_md.h (revision 2a673dcfdbc7752f4586dab5e1ed926fd4866bbe)
1 /*	$NetBSD: pthread_md.h,v 1.13 2023/05/25 14:30:03 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001, 2007, 2008 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Nathan J. Williams.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  * Adapted for x86_64 by fvdl@NetBSD.org
32  */
33 
34 #ifndef _LIB_PTHREAD_X86_64_MD_H
35 #define _LIB_PTHREAD_X86_64_MD_H
36 
37 #include <sys/ucontext.h>
38 
39 static inline unsigned long
pthread__sp(void)40 pthread__sp(void)
41 {
42 	unsigned long ret;
43 	__asm("movq %%rsp, %0" : "=g" (ret));
44 
45 	return ret;
46 }
47 
48 #define pthread__uc_sp(ucp) ((ucp)->uc_mcontext.__gregs[_REG_URSP])
49 
50 /*
51  * Set initial, sane values for registers whose values aren't just
52  * "don't care".
53  * 0x23 is GSEL(GUDATA_SEL, SEL_UPL), and
54  * 0x1b is GSEL(GUCODE_SEL, SEL_UPL).
55  * 0x202 is PSL_USERSET.
56  */
57 #define _INITCONTEXT_U_MD(ucp)						\
58 	(ucp)->uc_mcontext.__gregs[_REG_GS] = 0x23,			\
59 	(ucp)->uc_mcontext.__gregs[_REG_FS] = 0x23,			\
60 	(ucp)->uc_mcontext.__gregs[_REG_ES] = 0x23,			\
61 	(ucp)->uc_mcontext.__gregs[_REG_DS] = 0x23,			\
62 	(ucp)->uc_mcontext.__gregs[_REG_CS] = 0x1b,			\
63 	(ucp)->uc_mcontext.__gregs[_REG_SS] = 0x23,			\
64 	(ucp)->uc_mcontext.__gregs[_REG_RFL] = 0x202;
65 
66 #define	pthread__smt_pause()	__asm __volatile("rep; nop" ::: "memory")
67 #define	pthread__smt_wait()	__asm __volatile("rep; nop" ::: "memory")
68 
69 /* Don't need additional memory barriers. */
70 #define	PTHREAD__ATOMIC_IS_MEMBAR
71 
72 static inline void *
_atomic_cas_ptr(volatile void * ptr,void * old,void * new)73 _atomic_cas_ptr(volatile void *ptr, void *old, void *new)
74 {
75 	volatile uintptr_t *cast = ptr;
76 	void *ret;
77 
78 	__asm __volatile ("lock; cmpxchgq %2, %1"
79 		: "=a" (ret), "=m" (*cast)
80 		: "r" (new), "m" (*cast), "0" (old));
81 
82 	return ret;
83 }
84 
85 static inline void *
_atomic_cas_ptr_ni(volatile void * ptr,void * old,void * new)86 _atomic_cas_ptr_ni(volatile void *ptr, void *old, void *new)
87 {
88 	volatile uintptr_t *cast = ptr;
89 	void *ret;
90 
91 	__asm __volatile ("cmpxchgq %2, %1"
92 		: "=a" (ret), "=m" (*cast)
93 		: "r" (new), "m" (*cast), "0" (old));
94 
95 	return ret;
96 }
97 
98 #endif /* _LIB_PTHREAD_X86_64_MD_H */
99