xref: /llvm-project/llvm/test/Transforms/SLPVectorizer/X86/cast.ll (revision 580210a0c938531ef9fd79f9ffedb93eeb2e66c2)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7 -passes=slp-vectorizer,dce -S | FileCheck %s --check-prefixes=CHECK,SSE42
3; RUN: opt < %s -mtriple=x86_64-apple-macosx10.8.0 -mcpu=corei7-avx -passes=slp-vectorizer,dce -S | FileCheck %s --check-prefixes=CHECK,AVX
4
5target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
6
7; int test_sext_4i8_to_4i32(int * restrict A, char * restrict B) {
8;     A[0] = B[0];
9;     A[1] = B[1];
10;     A[2] = B[2];
11;     A[3] = B[3];
12; }
13
14define i32 @test_sext_4i8_to_4i32(ptr noalias nocapture %A, ptr noalias nocapture %B) {
15; CHECK-LABEL: @test_sext_4i8_to_4i32(
16; CHECK-NEXT:  entry:
17; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i8>, ptr [[B:%.*]], align 1
18; CHECK-NEXT:    [[TMP2:%.*]] = sext <4 x i8> [[TMP1]] to <4 x i32>
19; CHECK-NEXT:    store <4 x i32> [[TMP2]], ptr [[A:%.*]], align 4
20; CHECK-NEXT:    ret i32 undef
21;
22entry:
23  %0 = load i8, ptr %B, align 1
24  %conv = sext i8 %0 to i32
25  store i32 %conv, ptr %A, align 4
26  %arrayidx2 = getelementptr inbounds i8, ptr %B, i64 1
27  %1 = load i8, ptr %arrayidx2, align 1
28  %conv3 = sext i8 %1 to i32
29  %arrayidx4 = getelementptr inbounds i32, ptr %A, i64 1
30  store i32 %conv3, ptr %arrayidx4, align 4
31  %arrayidx5 = getelementptr inbounds i8, ptr %B, i64 2
32  %2 = load i8, ptr %arrayidx5, align 1
33  %conv6 = sext i8 %2 to i32
34  %arrayidx7 = getelementptr inbounds i32, ptr %A, i64 2
35  store i32 %conv6, ptr %arrayidx7, align 4
36  %arrayidx8 = getelementptr inbounds i8, ptr %B, i64 3
37  %3 = load i8, ptr %arrayidx8, align 1
38  %conv9 = sext i8 %3 to i32
39  %arrayidx10 = getelementptr inbounds i32, ptr %A, i64 3
40  store i32 %conv9, ptr %arrayidx10, align 4
41  ret i32 undef
42}
43
44define i32 @test_zext_4i16_to_4i32(ptr noalias nocapture %A, ptr noalias nocapture %B) {
45; CHECK-LABEL: @test_zext_4i16_to_4i32(
46; CHECK-NEXT:  entry:
47; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i16>, ptr [[B:%.*]], align 1
48; CHECK-NEXT:    [[TMP2:%.*]] = zext <4 x i16> [[TMP1]] to <4 x i32>
49; CHECK-NEXT:    store <4 x i32> [[TMP2]], ptr [[A:%.*]], align 4
50; CHECK-NEXT:    ret i32 undef
51;
52entry:
53  %0 = load i16, ptr %B, align 1
54  %conv = zext i16 %0 to i32
55  store i32 %conv, ptr %A, align 4
56  %arrayidx2 = getelementptr inbounds i16, ptr %B, i64 1
57  %1 = load i16, ptr %arrayidx2, align 1
58  %conv3 = zext i16 %1 to i32
59  %arrayidx4 = getelementptr inbounds i32, ptr %A, i64 1
60  store i32 %conv3, ptr %arrayidx4, align 4
61  %arrayidx5 = getelementptr inbounds i16, ptr %B, i64 2
62  %2 = load i16, ptr %arrayidx5, align 1
63  %conv6 = zext i16 %2 to i32
64  %arrayidx7 = getelementptr inbounds i32, ptr %A, i64 2
65  store i32 %conv6, ptr %arrayidx7, align 4
66  %arrayidx8 = getelementptr inbounds i16, ptr %B, i64 3
67  %3 = load i16, ptr %arrayidx8, align 1
68  %conv9 = zext i16 %3 to i32
69  %arrayidx10 = getelementptr inbounds i32, ptr %A, i64 3
70  store i32 %conv9, ptr %arrayidx10, align 4
71  ret i32 undef
72}
73
74define i64 @test_sext_4i16_to_4i64(ptr noalias nocapture %A, ptr noalias nocapture %B) {
75; SSE42-LABEL: @test_sext_4i16_to_4i64(
76; SSE42-NEXT:  entry:
77; SSE42-NEXT:    [[TMP1:%.*]] = load <2 x i16>, ptr [[B:%.*]], align 1
78; SSE42-NEXT:    [[TMP2:%.*]] = sext <2 x i16> [[TMP1]] to <2 x i64>
79; SSE42-NEXT:    store <2 x i64> [[TMP2]], ptr [[A:%.*]], align 4
80; SSE42-NEXT:    [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, ptr [[B]], i64 2
81; SSE42-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 2
82; SSE42-NEXT:    [[TMP5:%.*]] = load <2 x i16>, ptr [[ARRAYIDX5]], align 1
83; SSE42-NEXT:    [[TMP6:%.*]] = sext <2 x i16> [[TMP5]] to <2 x i64>
84; SSE42-NEXT:    store <2 x i64> [[TMP6]], ptr [[ARRAYIDX7]], align 4
85; SSE42-NEXT:    ret i64 undef
86;
87; AVX-LABEL: @test_sext_4i16_to_4i64(
88; AVX-NEXT:  entry:
89; AVX-NEXT:    [[TMP1:%.*]] = load <4 x i16>, ptr [[B:%.*]], align 1
90; AVX-NEXT:    [[TMP2:%.*]] = sext <4 x i16> [[TMP1]] to <4 x i64>
91; AVX-NEXT:    store <4 x i64> [[TMP2]], ptr [[A:%.*]], align 4
92; AVX-NEXT:    ret i64 undef
93;
94entry:
95  %0 = load i16, ptr %B, align 1
96  %conv = sext i16 %0 to i64
97  store i64 %conv, ptr %A, align 4
98  %arrayidx2 = getelementptr inbounds i16, ptr %B, i64 1
99  %1 = load i16, ptr %arrayidx2, align 1
100  %conv3 = sext i16 %1 to i64
101  %arrayidx4 = getelementptr inbounds i64, ptr %A, i64 1
102  store i64 %conv3, ptr %arrayidx4, align 4
103  %arrayidx5 = getelementptr inbounds i16, ptr %B, i64 2
104  %2 = load i16, ptr %arrayidx5, align 1
105  %conv6 = sext i16 %2 to i64
106  %arrayidx7 = getelementptr inbounds i64, ptr %A, i64 2
107  store i64 %conv6, ptr %arrayidx7, align 4
108  %arrayidx8 = getelementptr inbounds i16, ptr %B, i64 3
109  %3 = load i16, ptr %arrayidx8, align 1
110  %conv9 = sext i16 %3 to i64
111  %arrayidx10 = getelementptr inbounds i64, ptr %A, i64 3
112  store i64 %conv9, ptr %arrayidx10, align 4
113  ret i64 undef
114}
115