xref: /llvm-project/llvm/test/CodeGen/X86/cfguard-checks.ll (revision 1d8c8f11699ef03e8cc299245a16b2bd141b2ba7)
1; RUN: llc < %s -mtriple=i686-pc-windows-msvc | FileCheck %s -check-prefix=X86
2; RUN: llc < %s -mtriple=x86_64-pc-windows-msvc | FileCheck %s -check-prefixes=X64,X64_MSVC
3; RUN: llc < %s -mtriple=i686-w64-windows-gnu | FileCheck %s -check-prefixes=X86,X86_MINGW
4; RUN: llc < %s -mtriple=x86_64-w64-windows-gnu | FileCheck %s -check-prefixes=X64,X64_MINGW
5; Control Flow Guard is currently only available on Windows
6
7; Test that Control Flow Guard checks are correctly added when required.
8
9
10declare i32 @target_func()
11
12
13; Test that Control Flow Guard checks are not added on calls with the "guard_nocf" attribute.
14define i32 @func_guard_nocf() {
15entry:
16  %func_ptr = alloca ptr, align 8
17  store ptr @target_func, ptr %func_ptr, align 8
18  %0 = load ptr, ptr %func_ptr, align 8
19  %1 = call i32 %0() #0
20  ret i32 %1
21
22  ; X86-LABEL: func_guard_nocf
23  ; X86:         movl  $_target_func, %eax
24  ; X86-NOT: __guard_check_icall_fptr
25  ; X86:         calll *%eax
26
27  ; X64-LABEL: func_guard_nocf
28  ; X64:       leaq	target_func(%rip), %rax
29  ; X64-NOT: __guard_dispatch_icall_fptr
30  ; X64:       callq	*%rax
31}
32attributes #0 = { "guard_nocf" }
33
34
35; Test that Control Flow Guard checks are added even at -O0.
36; FIXME Ideally these checks should be added as a single call instruction, as in the optimized case.
37define i32 @func_optnone_cf() #1 {
38entry:
39  %func_ptr = alloca ptr, align 8
40  store ptr @target_func, ptr %func_ptr, align 8
41  %0 = load ptr, ptr %func_ptr, align 8
42  %1 = call i32 %0()
43  ret i32 %1
44
45  ; On i686, the call to __guard_check_icall_fptr should come immediately before the call to the target function.
46  ; X86-LABEL: func_optnone_cf
47  ; X86:         leal  _target_func, %eax
48  ; X86:         movl  %eax, (%esp)
49  ; X86:         movl  (%esp), %ecx
50  ; X86:         calll *___guard_check_icall_fptr
51  ; X86-NEXT:  calll *%ecx
52
53  ; On x86_64, __guard_dispatch_icall_fptr tail calls the function, so there should be only one call instruction.
54  ; X64-LABEL: func_optnone_cf
55  ; X64:       leaq	target_func(%rip), %rax
56  ; X64:       movq __guard_dispatch_icall_fptr(%rip), %rcx
57  ; X64:       callq *%rcx
58  ; X64-NOT:   callq
59}
60attributes #1 = { noinline optnone }
61
62
63; Test that Control Flow Guard checks are correctly added in optimized code (common case).
64define i32 @func_cf() {
65entry:
66  %func_ptr = alloca ptr, align 8
67  store ptr @target_func, ptr %func_ptr, align 8
68  %0 = load ptr, ptr %func_ptr, align 8
69  %1 = call i32 %0()
70  ret i32 %1
71
72  ; On i686, the call to __guard_check_icall_fptr should come immediately before the call to the target function.
73  ; X86-LABEL: func_cf
74  ; X86:         movl  $_target_func, %esi
75  ; X86:         movl  $_target_func, %ecx
76  ; X86:         calll *___guard_check_icall_fptr
77  ; X86-NEXT:  calll *%esi
78
79  ; On x86_64, __guard_dispatch_icall_fptr tail calls the function, so there should be only one call instruction.
80  ; X64-LABEL: func_cf
81  ; X64:       leaq	target_func(%rip), %rax
82  ; X64:       callq *__guard_dispatch_icall_fptr(%rip)
83  ; X64-NOT:   callq
84}
85
86
87; Test that Control Flow Guard checks are correctly added on invoke instructions.
88define i32 @func_cf_invoke() personality ptr @h {
89entry:
90  %0 = alloca i32, align 4
91  %func_ptr = alloca ptr, align 8
92  store ptr @target_func, ptr %func_ptr, align 8
93  %1 = load ptr, ptr %func_ptr, align 8
94  %2 = invoke i32 %1()
95          to label %invoke.cont unwind label %lpad
96invoke.cont:                                      ; preds = %entry
97  ret i32 %2
98
99lpad:                                             ; preds = %entry
100  %tmp = landingpad { ptr, i32 }
101          catch ptr null
102  ret i32 -1
103
104  ; On i686, the call to __guard_check_icall_fptr should come immediately before the call to the target function.
105  ; X86-LABEL: func_cf_invoke
106  ; X86:         movl  $_target_func, %esi
107  ; X86:         movl  $_target_func, %ecx
108  ; X86:         calll *___guard_check_icall_fptr
109  ; X86_MINGW-NEXT: Ltmp0:
110  ; X86-NEXT:  calll *%esi
111  ; X86:       # %invoke.cont
112  ; X86:       # %lpad
113
114  ; On x86_64, __guard_dispatch_icall_fptr tail calls the function, so there should be only one call instruction.
115  ; X64-LABEL: func_cf_invoke
116  ; X64:       leaq	target_func(%rip), %rax
117  ; X64:       callq *__guard_dispatch_icall_fptr(%rip)
118  ; X64-NOT:   callq
119  ; X64:       # %invoke.cont
120  ; X64:       # %lpad
121}
122
123declare void @h()
124
125
126; Test that Control Flow Guard preserves floating point arguments.
127declare double @target_func_doubles(double, double, double, double)
128
129define double @func_cf_doubles() {
130entry:
131  %func_ptr = alloca ptr, align 8
132  store ptr @target_func_doubles, ptr %func_ptr, align 8
133  %0 = load ptr, ptr %func_ptr, align 8
134  %1 = call double %0(double 1.000000e+00, double 2.000000e+00, double 3.000000e+00, double 4.000000e+00)
135  ret double %1
136
137  ; On i686, the call to __guard_check_icall_fptr should come immediately before the call to the target function.
138  ; X86-LABEL: func_cf_doubles
139  ; X86:         movl  $_target_func_doubles, %esi
140  ; X86:         movl  $_target_func_doubles, %ecx
141  ; X86:         calll *___guard_check_icall_fptr
142  ; X86:         calll *%esi
143
144
145  ; On x86_64, __guard_dispatch_icall_fptr tail calls the function, so there should be only one call instruction.
146  ; X64-LABEL: func_cf_doubles
147  ; X64:       leaq	target_func_doubles(%rip), %rax
148  ; X64_MSVC:  movsd __real@3ff0000000000000(%rip), %xmm0
149  ; X64_MSVC:  movsd __real@4000000000000000(%rip), %xmm1
150  ; X64_MSVC:  movsd __real@4008000000000000(%rip), %xmm2
151  ; X64_MSVC:  movsd __real@4010000000000000(%rip), %xmm3
152  ; X64_MINGW: movsd .LCPI4_0(%rip), %xmm0
153  ; X64_MINGW: movsd .LCPI4_1(%rip), %xmm1
154  ; X64_MINGW: movsd .LCPI4_2(%rip), %xmm2
155  ; X64_MINGW: movsd .LCPI4_3(%rip), %xmm3
156  ; X64:       callq *__guard_dispatch_icall_fptr(%rip)
157  ; X64-NOT:   callq
158
159}
160
161
162; Test that Control Flow Guard checks are correctly added for tail calls.
163define i32 @func_cf_tail() {
164entry:
165  %func_ptr = alloca ptr, align 8
166  store ptr @target_func, ptr %func_ptr, align 8
167  %0 = load ptr, ptr %func_ptr, align 8
168  %1 = musttail call i32 %0()
169  ret i32 %1
170
171  ; On i686, the call to __guard_check_icall_fptr should come immediately before the call to the target function.
172  ; X86-LABEL: func_cf_tail
173  ; X86:         movl  $_target_func, %ecx
174  ; X86:         calll *___guard_check_icall_fptr
175  ; X86:         movl $_target_func, %eax
176  ; X86:         jmpl	*%eax                  # TAILCALL
177  ; X86-NOT:   calll
178
179  ; X64-LABEL: func_cf_tail
180  ; X64:       leaq	target_func(%rip), %rax
181  ; X64:       rex64 jmpq *__guard_dispatch_icall_fptr(%rip)         # TAILCALL
182  ; X64-NOT:   callq
183}
184
185
186%struct.Foo = type { ptr }
187
188; Test that Control Flow Guard checks are correctly added for variadic musttail
189; calls. These are used for MS C++ ABI virtual member pointer thunks.
190; PR44049
191define i32 @vmptr_thunk(ptr inreg %p) {
192entry:
193  %vptr = load ptr, ptr %p
194  %slot = getelementptr inbounds ptr, ptr %vptr, i32 1
195  %vmethod = load ptr, ptr %slot
196  %rv = musttail call i32 %vmethod(ptr inreg %p)
197  ret i32 %rv
198
199  ; On i686, the call to __guard_check_icall_fptr should come immediately before the call to the target function.
200  ; X86-LABEL: _vmptr_thunk:
201  ; X86:       movl %eax, %esi
202  ; X86:       movl (%eax), %eax
203  ; X86:       movl 4(%eax), %ecx
204  ; X86:       calll *___guard_check_icall_fptr
205  ; X86:       movl %esi, %eax
206  ; X86:       jmpl       *%ecx                  # TAILCALL
207  ; X86-NOT:   calll
208
209  ; Use NEXT here because we previously had an extra instruction in this sequence.
210  ; X64-LABEL: vmptr_thunk:
211  ; X64:            movq (%rcx), %rax
212  ; X64-NEXT:       movq 8(%rax), %rax
213  ; X64-NEXT:       movq __guard_dispatch_icall_fptr(%rip), %rdx
214  ; X64-NEXT:       rex64 jmpq *%rdx            # TAILCALL
215  ; X64-NOT:   callq
216}
217
218; Test that longjmp targets have public labels and are included in the .gljmp section.
219%struct._SETJMP_FLOAT128 = type { [2 x i64] }
220@buf1 = internal global [16 x %struct._SETJMP_FLOAT128] zeroinitializer, align 16
221
222define i32 @func_cf_setjmp() {
223  %1 = alloca i32, align 4
224  %2 = alloca i32, align 4
225  store i32 0, ptr %1, align 4
226  store i32 -1, ptr %2, align 4
227  %3 = call ptr @llvm.frameaddress(i32 0)
228  %4 = call i32 @_setjmp(ptr @buf1, ptr %3) #2
229
230  ; X86-LABEL: func_cf_setjmp
231  ; X86:       calll __setjmp
232  ; X86-NEXT:  $cfgsj_func_cf_setjmp0:
233
234  ; X64-LABEL: func_cf_setjmp
235  ; X64:       callq _setjmp
236  ; X64-NEXT:  $cfgsj_func_cf_setjmp0:
237
238  %5 = call ptr @llvm.frameaddress(i32 0)
239  %6 = call i32 @_setjmp(ptr @buf1, ptr %5) #2
240
241  ; X86:       calll __setjmp
242  ; X86-NEXT:  $cfgsj_func_cf_setjmp1:
243
244  ; X64:       callq _setjmp
245  ; X64-NEXT:  $cfgsj_func_cf_setjmp1:
246
247  store i32 1, ptr %2, align 4
248  %7 = load i32, ptr %2, align 4
249  ret i32 %7
250
251  ; X86:       .section .gljmp$y,"dr"
252  ; X86-NEXT:  .symidx $cfgsj_func_cf_setjmp0
253  ; X86-NEXT:  .symidx $cfgsj_func_cf_setjmp1
254
255  ; X64:       .section .gljmp$y,"dr"
256  ; X64-NEXT:  .symidx $cfgsj_func_cf_setjmp0
257  ; X64-NEXT:  .symidx $cfgsj_func_cf_setjmp1
258}
259
260declare ptr @llvm.frameaddress(i32)
261
262; Function Attrs: returns_twice
263declare dso_local i32 @_setjmp(ptr, ptr) #2
264
265attributes #2 = { returns_twice }
266
267
268!llvm.module.flags = !{!0}
269!0 = !{i32 2, !"cfguard", i32 2}
270