xref: /llvm-project/llvm/test/Transforms/PhaseOrdering/X86/vector-math.ll (revision a266af721153fab6452094207b09ed265ab0be7b)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -O2 -S -mattr=sse < %s | FileCheck %s
3
4; This file should represent the nearly raw (mem2reg was run to make it more direct)
5; IR for code written using x86 SSE intrinsics to compute integer abs/max functions.
6;
7; https://llvm.org/PR34047
8
9define available_externally <2 x i64> @cmpgt_i32_sel_m128i(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c, <2 x i64> %d) {
10  %call = call <2 x i64> @_mm_cmpgt_epi32(<2 x i64> %a, <2 x i64> %b)
11  %call1 = call <2 x i64> @_mm_andnot_si128(<2 x i64> %call, <2 x i64> %c)
12  %call2 = call <2 x i64> @_mm_and_si128(<2 x i64> %call, <2 x i64> %d)
13  %call3 = call <2 x i64> @_mm_or_si128(<2 x i64> %call1, <2 x i64> %call2)
14  ret <2 x i64> %call3
15}
16
17define internal <2 x i64> @_mm_set1_epi32(i32 %__i) {
18  %call = call <2 x i64> @_mm_set_epi32(i32 %__i, i32 %__i, i32 %__i, i32 %__i)
19  ret <2 x i64> %call
20}
21
22define internal <2 x i64> @_mm_sub_epi32(<2 x i64> %__a, <2 x i64> %__b) {
23  %t0 = bitcast <2 x i64> %__a to <4 x i32>
24  %t1 = bitcast <2 x i64> %__b to <4 x i32>
25  %sub = sub <4 x i32> %t0, %t1
26  %t2 = bitcast <4 x i32> %sub to <2 x i64>
27  ret <2 x i64> %t2
28}
29
30define internal <2 x i64> @_mm_setzero_si128() {
31  ret <2 x i64> zeroinitializer
32}
33
34define internal <2 x i64> @_mm_cmpgt_epi32(<2 x i64> %__a, <2 x i64> %__b) {
35  %t0 = bitcast <2 x i64> %__a to <4 x i32>
36  %t1 = bitcast <2 x i64> %__b to <4 x i32>
37  %cmp = icmp sgt <4 x i32> %t0, %t1
38  %sext = sext <4 x i1> %cmp to <4 x i32>
39  %t2 = bitcast <4 x i32> %sext to <2 x i64>
40  ret <2 x i64> %t2
41}
42
43define internal <2 x i64> @_mm_or_si128(<2 x i64> %__a, <2 x i64> %__b) {
44  %or = or <2 x i64> %__a, %__b
45  ret <2 x i64> %or
46}
47
48define internal <2 x i64> @_mm_andnot_si128(<2 x i64> %__a, <2 x i64> %__b) {
49  %neg = xor <2 x i64> %__a, <i64 -1, i64 -1>
50  %and = and <2 x i64> %neg, %__b
51  ret <2 x i64> %and
52}
53
54define internal <2 x i64> @_mm_and_si128(<2 x i64> %__a, <2 x i64> %__b) {
55  %and = and <2 x i64> %__a, %__b
56  ret <2 x i64> %and
57}
58
59define internal <2 x i64> @_mm_set_epi32(i32 %__i3, i32 %__i2, i32 %__i1, i32 %__i0) {
60  %vecinit = insertelement <4 x i32> undef, i32 %__i0, i32 0
61  %vecinit1 = insertelement <4 x i32> %vecinit, i32 %__i1, i32 1
62  %vecinit2 = insertelement <4 x i32> %vecinit1, i32 %__i2, i32 2
63  %vecinit3 = insertelement <4 x i32> %vecinit2, i32 %__i3, i32 3
64  %t0 = bitcast <4 x i32> %vecinit3 to <2 x i64>
65  ret <2 x i64> %t0
66}
67
68define <2 x i64> @abs_v4i32(<2 x i64> %x) {
69; CHECK-LABEL: @abs_v4i32(
70; CHECK-NEXT:    [[T1_I:%.*]] = bitcast <2 x i64> [[X:%.*]] to <4 x i32>
71; CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.abs.v4i32(<4 x i32> [[T1_I]], i1 false)
72; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to <2 x i64>
73; CHECK-NEXT:    ret <2 x i64> [[TMP2]]
74;
75  %call = call <2 x i64> @_mm_set1_epi32(i32 -1)
76  %call1 = call <2 x i64> @_mm_setzero_si128()
77  %call2 = call <2 x i64> @_mm_sub_epi32(<2 x i64> %call1, <2 x i64> %x)
78  %call3 = call <2 x i64> @cmpgt_i32_sel_m128i(<2 x i64> %x, <2 x i64> %call, <2 x i64> %call2, <2 x i64> %x)
79  ret <2 x i64> %call3
80}
81
82define <2 x i64> @max_v4i32(<2 x i64> %x, <2 x i64> %y) {
83; CHECK-LABEL: @max_v4i32(
84; CHECK-NEXT:    [[T0_I_I:%.*]] = bitcast <2 x i64> [[X:%.*]] to <4 x i32>
85; CHECK-NEXT:    [[T1_I_I:%.*]] = bitcast <2 x i64> [[Y:%.*]] to <4 x i32>
86; CHECK-NEXT:    [[TMP1:%.*]] = tail call <4 x i32> @llvm.smax.v4i32(<4 x i32> [[T0_I_I]], <4 x i32> [[T1_I_I]])
87; CHECK-NEXT:    [[TMP2:%.*]] = bitcast <4 x i32> [[TMP1]] to <2 x i64>
88; CHECK-NEXT:    ret <2 x i64> [[TMP2]]
89;
90  %call = call <2 x i64> @cmpgt_i32_sel_m128i(<2 x i64> %x, <2 x i64> %y, <2 x i64> %y, <2 x i64> %x)
91  ret <2 x i64> %call
92}
93