xref: /netbsd-src/sys/arch/i386/include/profile.h (revision 8b0f9554ff8762542c4defc4f70e1eb76fb508fa)
1 /*	$NetBSD: profile.h,v 1.32 2007/10/17 19:54:57 garbled Exp $	*/
2 
3 /*
4  * Copyright (c) 1992, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. Neither the name of the University nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29  * SUCH DAMAGE.
30  *
31  *	@(#)profile.h	8.1 (Berkeley) 6/11/93
32  */
33 
34 #ifdef _KERNEL_OPT
35 #include "opt_multiprocessor.h"
36 #endif
37 
38 #ifdef _KERNEL
39 #include <machine/cpufunc.h>
40 #include <machine/atomic.h>
41 #endif
42 
43 #define	_MCOUNT_DECL static __inline void _mcount
44 
45 #ifdef __ELF__
46 #define MCOUNT_ENTRY	"__mcount"
47 #define MCOUNT_COMPAT	__weak_alias(mcount, __mcount)
48 #else
49 #define MCOUNT_ENTRY	"mcount"
50 #define MCOUNT_COMPAT	/* nothing */
51 #endif
52 
53 #define	MCOUNT \
54 MCOUNT_COMPAT								\
55 extern void mcount(void) __asm(MCOUNT_ENTRY)				\
56 	__attribute__((__no_instrument_function__));			\
57 void									\
58 mcount(void)								\
59 {									\
60 	int selfpc, frompcindex;					\
61 	int eax, ecx, edx;						\
62 									\
63 	__asm volatile("movl %%eax,%0" : "=g" (eax));			\
64 	__asm volatile("movl %%ecx,%0" : "=g" (ecx));			\
65 	__asm volatile("movl %%edx,%0" : "=g" (edx));			\
66 	/*								\
67 	 * find the return address for mcount,				\
68 	 * and the return address for mcount's caller.			\
69 	 *								\
70 	 * selfpc = pc pushed by mcount call				\
71 	 */								\
72 	__asm volatile("movl 4(%%ebp),%0" : "=r" (selfpc));		\
73 	/*								\
74 	 * frompcindex = pc pushed by call into self.			\
75 	 */								\
76 	__asm volatile("movl (%%ebp),%0;movl 4(%0),%0"			\
77 	    : "=r" (frompcindex));					\
78 	_mcount((u_long)frompcindex, (u_long)selfpc);			\
79 									\
80 	__asm volatile("movl %0,%%edx" : : "g" (edx));			\
81 	__asm volatile("movl %0,%%ecx" : : "g" (ecx));			\
82 	__asm volatile("movl %0,%%eax" : : "g" (eax));			\
83 }
84 
85 #ifdef _KERNEL
86 #ifdef MULTIPROCESSOR
87 __cpu_simple_lock_t __mcount_lock;
88 
89 static inline void
90 MCOUNT_ENTER_MP(void)
91 {
92 	while (x86_atomic_testset_b(&__mcount_lock, __SIMPLELOCK_LOCKED)
93 	    != __SIMPLELOCK_UNLOCKED) {
94 		while (__mcount_lock == __SIMPLELOCK_LOCKED)
95 			;
96 	}
97 	__insn_barrier();
98 }
99 
100 static inline void
101 MCOUNT_EXIT_MP(void)
102 {
103 	__insn_barrier();
104 	__mcount_lock = __SIMPLELOCK_UNLOCKED;
105 }
106 #else
107 #define MCOUNT_ENTER_MP()
108 #define MCOUNT_EXIT_MP()
109 #endif
110 
111 static inline void
112 mcount_disable_intr(void)
113 {
114 	__asm volatile("cli");
115 }
116 
117 static inline u_long
118 mcount_read_psl(void)
119 {
120 	u_long	ef;
121 
122 	__asm volatile("pushfl; popl %0" : "=r" (ef));
123 	return (ef);
124 }
125 
126 static inline void
127 mcount_write_psl(u_long ef)
128 {
129 	__asm volatile("pushl %0; popfl" : : "r" (ef));
130 }
131 
132 #define	MCOUNT_ENTER							\
133 	s = (int)mcount_read_psl();					\
134 	mcount_disable_intr();						\
135 	MCOUNT_ENTER_MP();
136 
137 #define	MCOUNT_EXIT							\
138 	MCOUNT_EXIT_MP();						\
139 	mcount_write_psl(s);
140 
141 #endif /* _KERNEL */
142