xref: /llvm-project/llvm/test/CodeGen/X86/apx/no-rex2-general.ll (revision 7652a59407018c057cdc1163c9f64b5b6f0954eb)
1; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2; RUN: llc < %s -mtriple=x86_64-unknown -stop-after=x86-isel -mattr=+sse2,+ssse3,+egpr  | FileCheck %s --check-prefix=SSE
3; RUN: llc < %s -mtriple=x86_64-unknown -stop-after=x86-isel -mattr=+sse2,+ssse3,+egpr,+avx | FileCheck %s --check-prefix=AVX
4; RUN: llc < %s -enable-new-pm -mtriple=x86_64-unknown -stop-after=x86-isel -mattr=+sse2,+ssse3,+egpr  | FileCheck %s --check-prefix=SSE
5; RUN: llc < %s -enable-new-pm -mtriple=x86_64-unknown -stop-after=x86-isel -mattr=+sse2,+ssse3,+egpr,+avx | FileCheck %s --check-prefix=AVX
6
7define i32 @map0(ptr nocapture noundef readonly %a, i64 noundef %b) {
8  ; SSE-LABEL: name: map0
9  ; SSE: bb.0.entry:
10  ; SSE-NEXT:   liveins: $rdi, $rsi
11  ; SSE-NEXT: {{  $}}
12  ; SSE-NEXT:   [[COPY:%[0-9]+]]:gr64_nosp = COPY $rsi
13  ; SSE-NEXT:   [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
14  ; SSE-NEXT:   [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY1]], 4, [[COPY]], 0, $noreg :: (load (s32) from %ir.add.ptr)
15  ; SSE-NEXT:   $eax = COPY [[MOV32rm]]
16  ; SSE-NEXT:   RET 0, $eax
17  ; AVX-LABEL: name: map0
18  ; AVX: bb.0.entry:
19  ; AVX-NEXT:   liveins: $rdi, $rsi
20  ; AVX-NEXT: {{  $}}
21  ; AVX-NEXT:   [[COPY:%[0-9]+]]:gr64_nosp = COPY $rsi
22  ; AVX-NEXT:   [[COPY1:%[0-9]+]]:gr64 = COPY $rdi
23  ; AVX-NEXT:   [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm [[COPY1]], 4, [[COPY]], 0, $noreg :: (load (s32) from %ir.add.ptr)
24  ; AVX-NEXT:   $eax = COPY [[MOV32rm]]
25  ; AVX-NEXT:   RET 0, $eax
26entry:
27  %add.ptr = getelementptr inbounds i32, ptr %a, i64 %b
28  %0 = load i32, ptr %add.ptr
29  ret i32 %0
30}
31
32define i32 @map1_or_vex(<2 x double> noundef %a) {
33  ; SSE-LABEL: name: map1_or_vex
34  ; SSE: bb.0.entry:
35  ; SSE-NEXT:   liveins: $xmm0
36  ; SSE-NEXT: {{  $}}
37  ; SSE-NEXT:   [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
38  ; SSE-NEXT:   [[CVTSD2SIrr_Int:%[0-9]+]]:gr32 = nofpexcept CVTSD2SIrr_Int [[COPY]], implicit $mxcsr
39  ; SSE-NEXT:   $eax = COPY [[CVTSD2SIrr_Int]]
40  ; SSE-NEXT:   RET 0, $eax
41  ; AVX-LABEL: name: map1_or_vex
42  ; AVX: bb.0.entry:
43  ; AVX-NEXT:   liveins: $xmm0
44  ; AVX-NEXT: {{  $}}
45  ; AVX-NEXT:   [[COPY:%[0-9]+]]:vr128 = COPY $xmm0
46  ; AVX-NEXT:   [[VCVTSD2SIrr_Int:%[0-9]+]]:gr32_norex2 = nofpexcept VCVTSD2SIrr_Int [[COPY]], implicit $mxcsr
47  ; AVX-NEXT:   $eax = COPY [[VCVTSD2SIrr_Int]]
48  ; AVX-NEXT:   RET 0, $eax
49entry:
50  %0 = tail call i32 @llvm.x86.sse2.cvtsd2si(<2 x double> %a)
51  ret i32 %0
52}
53
54define <2 x i64> @map2_or_vex(ptr nocapture noundef readonly %b, i64 noundef %c) {
55  ; SSE-LABEL: name: map2_or_vex
56  ; SSE: bb.0.entry:
57  ; SSE-NEXT:   liveins: $rdi, $rsi
58  ; SSE-NEXT: {{  $}}
59  ; SSE-NEXT:   [[COPY:%[0-9]+]]:gr64_norex2_nosp = COPY $rsi
60  ; SSE-NEXT:   [[COPY1:%[0-9]+]]:gr64_norex2 = COPY $rdi
61  ; SSE-NEXT:   [[PABSBrm:%[0-9]+]]:vr128 = PABSBrm [[COPY1]], 4, [[COPY]], 0, $noreg :: (load (s128) from %ir.add.ptr)
62  ; SSE-NEXT:   $xmm0 = COPY [[PABSBrm]]
63  ; SSE-NEXT:   RET 0, $xmm0
64  ; AVX-LABEL: name: map2_or_vex
65  ; AVX: bb.0.entry:
66  ; AVX-NEXT:   liveins: $rdi, $rsi
67  ; AVX-NEXT: {{  $}}
68  ; AVX-NEXT:   [[COPY:%[0-9]+]]:gr64_norex2_nosp = COPY $rsi
69  ; AVX-NEXT:   [[COPY1:%[0-9]+]]:gr64_norex2 = COPY $rdi
70  ; AVX-NEXT:   [[VPABSBrm:%[0-9]+]]:vr128 = VPABSBrm [[COPY1]], 4, [[COPY]], 0, $noreg :: (load (s128) from %ir.add.ptr)
71  ; AVX-NEXT:   $xmm0 = COPY [[VPABSBrm]]
72  ; AVX-NEXT:   RET 0, $xmm0
73entry:
74  %add.ptr = getelementptr inbounds i32, ptr %b, i64 %c
75  %a = load <2 x i64>, ptr %add.ptr
76  %0 = bitcast <2 x i64> %a to <16 x i8>
77  %elt.abs.i = tail call <16 x i8> @llvm.abs.v16i8(<16 x i8> %0, i1 false)
78  %1 = bitcast <16 x i8> %elt.abs.i to <2 x i64>
79  ret <2 x i64> %1
80}
81
82declare i32 @llvm.x86.sse2.cvtsd2si(<2 x double>)
83declare <16 x i8> @llvm.abs.v16i8(<16 x i8>, i1 immarg)
84