1 /* $NetBSD: pthread_md.h,v 1.21 2023/05/25 14:30:02 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2001, 2007, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nathan J. Williams, and by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #ifndef _LIB_PTHREAD_I386_MD_H
33 #define _LIB_PTHREAD_I386_MD_H
34
35 #include <sys/ucontext.h>
36 #include <ucontext.h>
37
38 static inline unsigned long
pthread__sp(void)39 pthread__sp(void)
40 {
41 unsigned long ret;
42 __asm("movl %%esp, %0" : "=g" (ret));
43
44 return ret;
45 }
46
47 #define pthread__uc_sp(ucp) ((ucp)->uc_mcontext.__gregs[_REG_UESP])
48
49 static inline void
_initcontext_u_md(ucontext_t * ucp)50 _initcontext_u_md(ucontext_t *ucp)
51 {
52 __asm ("pushfl; popl %0" : "=a" (ucp->uc_mcontext.__gregs[_REG_EFL]));
53 __asm ("pushl %%cs; popl %0" : "=a" (ucp->uc_mcontext.__gregs[_REG_CS]));
54 __asm ("movl %%ds, %0" : "=a" (ucp->uc_mcontext.__gregs[_REG_DS]));
55 __asm ("movl %%es, %0" : "=a" (ucp->uc_mcontext.__gregs[_REG_ES]));
56 __asm ("movl %%fs, %0" : "=a" (ucp->uc_mcontext.__gregs[_REG_FS]));
57 __asm ("movl %%gs, %0" : "=a" (ucp->uc_mcontext.__gregs[_REG_GS]));
58 __asm ("movl %%ss, %0" : "=a" (ucp->uc_mcontext.__gregs[_REG_SS]));
59 }
60
61 #define _INITCONTEXT_U_MD(ucp) _initcontext_u_md(ucp);
62
63 #define pthread__smt_pause() __asm __volatile("rep; nop" ::: "memory")
64 #define pthread__smt_wait() __asm __volatile("rep; nop" ::: "memory")
65
66 /* Don't need additional memory barriers. */
67 #define PTHREAD__ATOMIC_IS_MEMBAR
68
69 static inline void *
_atomic_cas_ptr(volatile void * ptr,void * old,void * new)70 _atomic_cas_ptr(volatile void *ptr, void *old, void *new)
71 {
72 volatile uintptr_t *cast = ptr;
73 void *ret;
74
75 __asm __volatile ("lock; cmpxchgl %2, %1"
76 : "=a" (ret), "=m" (*cast)
77 : "r" (new), "m" (*cast), "0" (old));
78
79 return ret;
80 }
81
82 static inline void *
_atomic_cas_ptr_ni(volatile void * ptr,void * old,void * new)83 _atomic_cas_ptr_ni(volatile void *ptr, void *old, void *new)
84 {
85 volatile uintptr_t *cast = ptr;
86 void *ret;
87
88 __asm __volatile ("cmpxchgl %2, %1"
89 : "=a" (ret), "=m" (*cast)
90 : "r" (new), "m" (*cast), "0" (old));
91
92 return ret;
93 }
94
95 #endif /* _LIB_PTHREAD_I386_MD_H */
96