xref: /netbsd-src/sys/arch/amd64/include/profile.h (revision b1c86f5f087524e68db12794ee9c3e3da1ab17a0)
1 /*	$NetBSD: profile.h,v 1.15 2008/10/26 00:08:15 mrg Exp $	*/
2 
3 /*
4  * Copyright (c) 1992, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  *	@(#)profile.h	8.1 (Berkeley) 6/11/93
32  */
33 
34 #ifdef __x86_64__
35 
36 #ifdef _KERNEL_OPT
37 #include "opt_multiprocessor.h"
38 #include "opt_xen.h"
39 #endif
40 
41 #ifdef _KERNEL
42 #include <machine/lock.h>
43 #endif
44 
45 #define	_MCOUNT_DECL void _mcount
46 
47 #define EPROL_EXPORT	__asm(".globl _eprol")
48 
49 #ifdef PIC
50 #define __MCPLT	"@PLT"
51 #else
52 #define __MCPLT
53 #endif
54 
55 #define	MCOUNT						\
56 __weak_alias(mcount, __mcount)				\
57 __asm(" .globl __mcount		\n"			\
58 "	.type __mcount,@function\n"			\
59 "__mcount:			\n"			\
60 "	pushq	%rbp		\n"			\
61 "	movq	%rsp,%rbp	\n"			\
62 "	subq	$56,%rsp	\n"			\
63 "	movq	%rdi,0(%rsp)	\n"			\
64 "	movq	%rsi,8(%rsp)	\n"			\
65 "	movq	%rdx,16(%rsp)	\n"			\
66 "	movq	%rcx,24(%rsp)	\n"			\
67 "	movq	%r8,32(%rsp)	\n"			\
68 "	movq	%r9,40(%rsp)	\n"			\
69 "	movq	%rax,48(%rsp)	\n"			\
70 "	movq	0(%rbp),%r11	\n"			\
71 "	movq	8(%r11),%rdi	\n"			\
72 "	movq	8(%rbp),%rsi	\n"			\
73 "	call	_mcount"__MCPLT "	\n"			\
74 "	movq	0(%rsp),%rdi	\n"			\
75 "	movq	8(%rsp),%rsi	\n"			\
76 "	movq	16(%rsp),%rdx	\n"			\
77 "	movq	24(%rsp),%rcx	\n"			\
78 "	movq	32(%rsp),%r8	\n"			\
79 "	movq	40(%rsp),%r9	\n"			\
80 "	movq	48(%rsp),%rax	\n"			\
81 "	leave			\n"			\
82 "	ret			\n"			\
83 "	.size __mcount,.-__mcount");
84 
85 
86 #ifdef _KERNEL
87 #ifdef MULTIPROCESSOR
88 __cpu_simple_lock_t __mcount_lock;
89 
90 static inline void
91 MCOUNT_ENTER_MP(void)
92 {
93 	__cpu_simple_lock(&__mcount_lock);
94 	__insn_barrier();
95 }
96 
97 static inline void
98 MCOUNT_EXIT_MP(void)
99 {
100 	__insn_barrier();
101 	__mcount_lock = __SIMPLELOCK_UNLOCKED;
102 }
103 #else
104 #define MCOUNT_ENTER_MP()
105 #define MCOUNT_EXIT_MP()
106 #endif
107 
108 #ifdef XEN
109 static inline void
110 mcount_disable_intr(void)
111 {
112 	/* works because __cli is a macro */
113 	__cli();
114 }
115 
116 static inline u_long
117 mcount_read_psl(void)
118 {
119 	return (curcpu()->ci_vcpu->evtchn_upcall_mask);
120 }
121 
122 static inline void
123 mcount_write_psl(u_long psl)
124 {
125 	curcpu()->ci_vcpu->evtchn_upcall_mask = psl;
126 	x86_lfence();
127 	/* XXX can't call hypervisor_force_callback() because we're in mcount*/
128 }
129 
130 #else /* XEN */
131 static inline void
132 mcount_disable_intr(void)
133 {
134 	__asm volatile("cli");
135 }
136 
137 static inline u_long
138 mcount_read_psl(void)
139 {
140 	u_long	ef;
141 
142 	__asm volatile("pushfq; popq %0" : "=r" (ef));
143 	return (ef);
144 }
145 
146 static inline void
147 mcount_write_psl(u_long ef)
148 {
149 	__asm volatile("pushq %0; popfq" : : "r" (ef));
150 }
151 
152 #endif /* XEN */
153 #define	MCOUNT_ENTER							\
154 	s = (int)mcount_read_psl();					\
155 	mcount_disable_intr();						\
156 	MCOUNT_ENTER_MP();
157 
158 #define	MCOUNT_EXIT							\
159 	MCOUNT_EXIT_MP();						\
160 	mcount_write_psl(s);
161 
162 #endif /* _KERNEL */
163 
164 #else	/*	__x86_64__	*/
165 
166 #include <i386/profile.h>
167 
168 #endif	/*	__x86_64__	*/
169