1*33a78575Sryo /* $NetBSD: profile.h,v 1.21 2021/11/02 11:26:03 ryo Exp $ */
281918bf8Sfvdl
381918bf8Sfvdl /*
481918bf8Sfvdl * Copyright (c) 1992, 1993
581918bf8Sfvdl * The Regents of the University of California. All rights reserved.
681918bf8Sfvdl *
781918bf8Sfvdl * Redistribution and use in source and binary forms, with or without
881918bf8Sfvdl * modification, are permitted provided that the following conditions
981918bf8Sfvdl * are met:
1081918bf8Sfvdl * 1. Redistributions of source code must retain the above copyright
1181918bf8Sfvdl * notice, this list of conditions and the following disclaimer.
1281918bf8Sfvdl * 2. Redistributions in binary form must reproduce the above copyright
1381918bf8Sfvdl * notice, this list of conditions and the following disclaimer in the
1481918bf8Sfvdl * documentation and/or other materials provided with the distribution.
15aad01611Sagc * 3. Neither the name of the University nor the names of its contributors
1681918bf8Sfvdl * may be used to endorse or promote products derived from this software
1781918bf8Sfvdl * without specific prior written permission.
1881918bf8Sfvdl *
1981918bf8Sfvdl * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
2081918bf8Sfvdl * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
2181918bf8Sfvdl * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
2281918bf8Sfvdl * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
2381918bf8Sfvdl * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
2481918bf8Sfvdl * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
2581918bf8Sfvdl * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
2681918bf8Sfvdl * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
2781918bf8Sfvdl * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
2881918bf8Sfvdl * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
2981918bf8Sfvdl * SUCH DAMAGE.
3081918bf8Sfvdl *
3181918bf8Sfvdl * @(#)profile.h 8.1 (Berkeley) 6/11/93
3281918bf8Sfvdl */
3381918bf8Sfvdl
34433b5ddeSmrg #ifdef __x86_64__
35433b5ddeSmrg
366a0a597fSchs #ifdef _KERNEL_OPT
37f76460d9Sbouyer #include "opt_xen.h"
386a0a597fSchs #endif
396a0a597fSchs
40ebc6dbadSfvdl #define _MCOUNT_DECL void _mcount
4181918bf8Sfvdl
42ebc6dbadSfvdl #define EPROL_EXPORT __asm(".globl _eprol")
4381918bf8Sfvdl
444d12bfcdSjoerg #ifdef __PIC__
45ebc6dbadSfvdl #define __MCPLT "@PLT"
46ebc6dbadSfvdl #else
47ebc6dbadSfvdl #define __MCPLT
48ebc6dbadSfvdl #endif
49ebc6dbadSfvdl
5081918bf8Sfvdl #define MCOUNT \
51ebc6dbadSfvdl __weak_alias(mcount, __mcount) \
52ebc6dbadSfvdl __asm(" .globl __mcount \n" \
53ebc6dbadSfvdl " .type __mcount,@function\n" \
54ebc6dbadSfvdl "__mcount: \n" \
55ebc6dbadSfvdl " pushq %rbp \n" \
56ebc6dbadSfvdl " movq %rsp,%rbp \n" \
57ebc6dbadSfvdl " subq $56,%rsp \n" \
58ebc6dbadSfvdl " movq %rdi,0(%rsp) \n" \
59ebc6dbadSfvdl " movq %rsi,8(%rsp) \n" \
60ebc6dbadSfvdl " movq %rdx,16(%rsp) \n" \
61ebc6dbadSfvdl " movq %rcx,24(%rsp) \n" \
62ebc6dbadSfvdl " movq %r8,32(%rsp) \n" \
63ebc6dbadSfvdl " movq %r9,40(%rsp) \n" \
64ebc6dbadSfvdl " movq %rax,48(%rsp) \n" \
65ebc6dbadSfvdl " movq 0(%rbp),%r11 \n" \
66ebc6dbadSfvdl " movq 8(%r11),%rdi \n" \
67ebc6dbadSfvdl " movq 8(%rbp),%rsi \n" \
68cffdc557Sskrll " call _mcount"__MCPLT " \n" \
69ebc6dbadSfvdl " movq 0(%rsp),%rdi \n" \
70ebc6dbadSfvdl " movq 8(%rsp),%rsi \n" \
71ebc6dbadSfvdl " movq 16(%rsp),%rdx \n" \
72ebc6dbadSfvdl " movq 24(%rsp),%rcx \n" \
73ebc6dbadSfvdl " movq 32(%rsp),%r8 \n" \
74ebc6dbadSfvdl " movq 40(%rsp),%r9 \n" \
75ebc6dbadSfvdl " movq 48(%rsp),%rax \n" \
76ebc6dbadSfvdl " leave \n" \
77ebc6dbadSfvdl " ret \n" \
78ebc6dbadSfvdl " .size __mcount,.-__mcount");
7981918bf8Sfvdl
8081918bf8Sfvdl
8181918bf8Sfvdl #ifdef _KERNEL
82427af037Scherry #ifdef XENPV
83*33a78575Sryo static inline __always_inline void
mcount_disable_intr(void)84f76460d9Sbouyer mcount_disable_intr(void)
85f76460d9Sbouyer {
862f53f613Sbouyer /* should be __cli() but this calls x86_lfence() which calls mcount */
872f53f613Sbouyer curcpu()->ci_vcpu->evtchn_upcall_mask = 1;
882f53f613Sbouyer __asm volatile("lfence" ::: "memory"); /* x86_lfence() */
89f76460d9Sbouyer }
90f76460d9Sbouyer
91*33a78575Sryo static inline __always_inline u_long
mcount_read_psl(void)92f76460d9Sbouyer mcount_read_psl(void)
93f76460d9Sbouyer {
9420161b72Scegger return (curcpu()->ci_vcpu->evtchn_upcall_mask);
95f76460d9Sbouyer }
96f76460d9Sbouyer
97*33a78575Sryo static inline __always_inline void
mcount_write_psl(u_long psl)98f76460d9Sbouyer mcount_write_psl(u_long psl)
99f76460d9Sbouyer {
10020161b72Scegger curcpu()->ci_vcpu->evtchn_upcall_mask = psl;
1012f53f613Sbouyer /* can't call x86_lfence because it calls mcount() */
1022f53f613Sbouyer __asm volatile("lfence" ::: "memory"); /* x86_lfence() */
103f76460d9Sbouyer /* XXX can't call hypervisor_force_callback() because we're in mcount*/
104f76460d9Sbouyer }
105f76460d9Sbouyer
106427af037Scherry #else /* XENPV */
107*33a78575Sryo static inline __always_inline void
mcount_disable_intr(void)108bd63f75dSad mcount_disable_intr(void)
109bd63f75dSad {
110bd63f75dSad __asm volatile("cli");
111bd63f75dSad }
112bd63f75dSad
113*33a78575Sryo static inline __always_inline u_long
mcount_read_psl(void)114bd63f75dSad mcount_read_psl(void)
115bd63f75dSad {
116bd63f75dSad u_long ef;
117bd63f75dSad
118eedfb3ccSchs __asm volatile("pushfq; popq %0" : "=r" (ef));
119bd63f75dSad return (ef);
120bd63f75dSad }
121bd63f75dSad
122*33a78575Sryo static inline __always_inline void
mcount_write_psl(u_long ef)123bd63f75dSad mcount_write_psl(u_long ef)
124bd63f75dSad {
125eedfb3ccSchs __asm volatile("pushq %0; popfq" : : "r" (ef));
126bd63f75dSad }
127bd63f75dSad
128427af037Scherry #endif /* XENPV */
1296a0a597fSchs
130c2acaf5fSryo #define MCOUNT_ENTER \
131c2acaf5fSryo do { s = (int)mcount_read_psl(); mcount_disable_intr(); } while (0)
132c2acaf5fSryo #define MCOUNT_EXIT do { mcount_write_psl(s); } while (0)
1336a0a597fSchs
13481918bf8Sfvdl #endif /* _KERNEL */
135433b5ddeSmrg
136433b5ddeSmrg #else /* __x86_64__ */
137433b5ddeSmrg
138433b5ddeSmrg #include <i386/profile.h>
139433b5ddeSmrg
140433b5ddeSmrg #endif /* __x86_64__ */
141