xref: /llvm-project/llvm/test/CodeGen/X86/lvi-hardening-inline-asm.ll (revision 0cbe713c69f7c12682855ffd61ecb0d7edc262e3)
1; RUN: llc -verify-machineinstrs -mtriple=x86_64-unknown -mattr=+lvi-load-hardening -mattr=+lvi-cfi -x86-experimental-lvi-inline-asm-hardening < %s -o %t.out 2> %t.err
2; RUN: FileCheck %s --check-prefix=X86 < %t.out
3; RUN: FileCheck %s --check-prefix=WARN < %t.err
4
5; Test module-level assembly
6module asm "pop %rbx"
7module asm "ret"
8; X86:      popq %rbx
9; X86-NEXT: lfence
10; X86-NEXT: shlq $0, (%rsp)
11; X86-NEXT: lfence
12; X86-NEXT: retq
13
14; Function Attrs: noinline nounwind optnone uwtable
15define dso_local void @test_inline_asm() {
16entry:
17; X86-LABEL: test_inline_asm:
18  call void asm sideeffect "mov 0x3fed(%rip),%rax", "~{dirflag},~{fpsr},~{flags}"() #1
19; X86:      movq  16365(%rip), %rax
20; X86-NEXT: lfence
21  call void asm sideeffect "movdqa 0x0(%rip),%xmm0", "~{dirflag},~{fpsr},~{flags}"() #1
22; X86:      movdqa  (%rip), %xmm0
23; X86-NEXT: lfence
24  call void asm sideeffect "movslq 0x3e5d(%rip),%rbx", "~{dirflag},~{fpsr},~{flags}"() #1
25; X86:      movslq  15965(%rip), %rbx
26; X86-NEXT: lfence
27  call void asm sideeffect "mov (%r12,%rax,8),%rax", "~{dirflag},~{fpsr},~{flags}"() #1
28; X86:      movq  (%r12,%rax,8), %rax
29; X86-NEXT: lfence
30  call void asm sideeffect "movq (24)(%rsi), %r11", "~{dirflag},~{fpsr},~{flags}"() #1
31; X86:      movq  24(%rsi), %r11
32; X86-NEXT: lfence
33  call void asm sideeffect "cmove %r12,%rax", "~{dirflag},~{fpsr},~{flags}"() #1
34; X86:      cmoveq  %r12, %rax
35; X86-NOT:  lfence
36  call void asm sideeffect "cmove (%r12),%rax", "~{dirflag},~{fpsr},~{flags}"() #1
37; X86:      cmoveq  (%r12), %rax
38; X86-NEXT: lfence
39  call void asm sideeffect "pop %rbx", "~{dirflag},~{fpsr},~{flags}"() #1
40; X86:      popq  %rbx
41; X86-NEXT: lfence
42  call void asm sideeffect "popq %rbx", "~{dirflag},~{fpsr},~{flags}"() #1
43; X86:      popq  %rbx
44; X86-NEXT: lfence
45  call void asm sideeffect "xchg (%r12),%rax", "~{dirflag},~{fpsr},~{flags}"() #1
46; X86:      xchgq %rax, (%r12)
47; X86-NEXT: lfence
48  call void asm sideeffect "cmpxchg %r12,(%rax)", "~{dirflag},~{fpsr},~{flags}"() #1
49; X86:      cmpxchgq  %r12, (%rax)
50; X86-NEXT: lfence
51  call void asm sideeffect "vpxor (%rcx,%rdx,1),%ymm1,%ymm0", "~{dirflag},~{fpsr},~{flags}"() #1
52; X86:      vpxor (%rcx,%rdx), %ymm1, %ymm0
53; X86-NEXT: lfence
54  call void asm sideeffect "vpmuludq 0x20(%rsi),%ymm0,%ymm12", "~{dirflag},~{fpsr},~{flags}"() #1
55; X86:      vpmuludq  32(%rsi), %ymm0, %ymm12
56; X86-NEXT: lfence
57  call void asm sideeffect "vpexpandq 0x40(%rdi),%zmm8{%k2}{z}", "~{dirflag},~{fpsr},~{flags}"() #1
58; X86:      vpexpandq 64(%rdi), %zmm8 {%k2} {z}
59; X86-NEXT: lfence
60  call void asm sideeffect "addq (%r12),%rax", "~{dirflag},~{fpsr},~{flags}"() #1
61; X86:      addq  (%r12), %rax
62; X86-NEXT: lfence
63  call void asm sideeffect "subq Lpoly+0(%rip), %rax", "~{dirflag},~{fpsr},~{flags}"() #1
64; X86:      subq  Lpoly+0(%rip), %rax
65; X86-NEXT: lfence
66  call void asm sideeffect "adcq %r12,(%rax)", "~{dirflag},~{fpsr},~{flags}"() #1
67; X86:      adcq  %r12, (%rax)
68; X86-NEXT: lfence
69  call void asm sideeffect "negq (%rax)", "~{dirflag},~{fpsr},~{flags}"() #1
70; X86:      negq  (%rax)
71; X86-NEXT: lfence
72  call void asm sideeffect "incq %rax", "~{dirflag},~{fpsr},~{flags}"() #1
73; X86:      incq  %rax
74; X86-NOT:  lfence
75  call void asm sideeffect "mulq (%rax)", "~{dirflag},~{fpsr},~{flags}"() #1
76; X86:      mulq  (%rax)
77; X86-NEXT: lfence
78  call void asm sideeffect "imulq (%rax),%rdx", "~{dirflag},~{fpsr},~{flags}"() #1
79; X86:      imulq (%rax), %rdx
80; X86-NEXT: lfence
81  call void asm sideeffect "shlq $$1,(%rax)", "~{dirflag},~{fpsr},~{flags}"() #1
82; X86:      shlq  (%rax)
83; X86-NEXT: lfence
84  call void asm sideeffect "shrq $$1,(%rax)", "~{dirflag},~{fpsr},~{flags}"() #1
85; X86:      shrq  (%rax)
86; X86-NEXT: lfence
87  call void asm sideeffect "repz cmpsb %es:(%rdi),%ds:(%rsi)", "~{dirflag},~{fpsr},~{flags}"() #1
88; WARN:      warning: Instruction may be vulnerable to LVI
89; WARN-NEXT: repz cmpsb %es:(%rdi),%ds:(%rsi)
90; WARN-NEXT: ^
91; WARN-NEXT: note: See https://software.intel.com/security-software-guidance/insights/deep-dive-load-value-injection#specialinstructions for more information
92; X86:      rep cmpsb %es:(%rdi), %ds:(%rsi)
93; X86-NOT:  lfence
94  call void asm sideeffect "repnz scasb", "~{dirflag},~{fpsr},~{flags}"() #1
95; WARN:      warning: Instruction may be vulnerable to LVI
96; WARN-NEXT: repnz scasb
97; WARN-NEXT: ^
98; WARN-NEXT: note: See https://software.intel.com/security-software-guidance/insights/deep-dive-load-value-injection#specialinstructions for more information
99; X86:      repne scasb %es:(%rdi), %al
100; X86-NOT:  lfence
101  call void asm sideeffect "repnz", ""() #1
102; WARN:      warning: Instruction may be vulnerable to LVI
103; WARN-NEXT: repnz
104; WARN-NEXT: ^
105; WARN-NEXT: note: See https://software.intel.com/security-software-guidance/insights/deep-dive-load-value-injection#specialinstructions for more information
106  call void asm sideeffect "pinsrw $$0x6,(%eax),%xmm0", "~{dirflag},~{fpsr},~{flags}"() #1
107; X86:      pinsrw  $6, (%eax), %xmm0
108; X86-NEXT: lfence
109  call void asm sideeffect "ret", "~{dirflag},~{fpsr},~{flags}"() #1
110; X86:      shlq $0, (%rsp)
111; X86-NEXT: lfence
112; X86-NEXT: retq
113; X86-NOT:  lfence
114  call void asm sideeffect "ret $$8", "~{dirflag},~{fpsr},~{flags}"() #1
115; X86:      shlq $0, (%rsp)
116; X86-NEXT: lfence
117; X86-NEXT: retq $8
118; X86-NOT:  lfence
119  call void asm sideeffect "jmpq *(%rdx)", "~{dirflag},~{fpsr},~{flags}"() #1
120; WARN:      warning: Instruction may be vulnerable to LVI
121; WARN-NEXT: jmpq *(%rdx)
122; WARN-NEXT: ^
123; WARN-NEXT: note: See https://software.intel.com/security-software-guidance/insights/deep-dive-load-value-injection#specialinstructions for more information
124; X86:      jmpq *(%rdx)
125; X86-NOT:  lfence
126  call void asm sideeffect "jmpq *0x100(%rdx)", "~{dirflag},~{fpsr},~{flags}"() #1
127; WARN:      warning: Instruction may be vulnerable to LVI
128; WARN-NEXT: jmpq *0x100(%rdx)
129; WARN-NEXT: ^
130; WARN-NEXT: note: See https://software.intel.com/security-software-guidance/insights/deep-dive-load-value-injection#specialinstructions for more information
131; X86:      jmpq *256(%rdx)
132; X86-NOT:  lfence
133  call void asm sideeffect "callq *200(%rdx)", "~{dirflag},~{fpsr},~{flags}"() #1
134; WARN:      warning: Instruction may be vulnerable to LVI
135; WARN-NEXT: callq *200(%rdx)
136; WARN-NEXT: ^
137; WARN-NEXT: note: See https://software.intel.com/security-software-guidance/insights/deep-dive-load-value-injection#specialinstructions for more information
138; X86:      callq *200(%rdx)
139; X86-NOT:  lfence
140  call void asm sideeffect "fldt 0x8(%rbp)", "~{dirflag},~{fpsr},~{flags}"() #1
141; X86:      fldt  8(%rbp)
142; X86-NEXT: lfence
143  call void asm sideeffect "fld %st(0)", "~{dirflag},~{fpsr},~{flags}"() #1
144; X86:      fld %st(0)
145; X86-NOT:  lfence
146; Test assembler macros
147  call void asm sideeffect ".macro mplus1 x\0Aincq (\5Cx)\0A.endm\0Amplus1 %rcx", "~{dirflag},~{fpsr},~{flags}"() #1
148; X86:      incq (%rcx)
149; X86-NEXT: lfence
150  ret void
151}
152
153attributes #1 = { nounwind }
154