xref: /llvm-project/llvm/test/Transforms/SLPVectorizer/RISCV/loads-ordering.ll (revision af3295bd3dccd91c102d6a9b0d30c30844967e02)
1c1cec8c0SAlexey Bataev; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
2c1cec8c0SAlexey Bataev; RUN: opt -S --passes=slp-vectorizer -mtriple=riscv64-unknown-linux-gnu -mattr=+v < %s | FileCheck %s
3c1cec8c0SAlexey Bataev
4c1cec8c0SAlexey Bataevdefine fastcc void @rephase(ptr %phases_in, ptr %157, i64 %158) {
5c1cec8c0SAlexey Bataev; CHECK-LABEL: define fastcc void @rephase(
6c1cec8c0SAlexey Bataev; CHECK-SAME: ptr [[PHASES_IN:%.*]], ptr [[TMP0:%.*]], i64 [[TMP1:%.*]]) #[[ATTR0:[0-9]+]] {
7c1cec8c0SAlexey Bataev; CHECK-NEXT:  [[ENTRY:.*:]]
8c1cec8c0SAlexey Bataev; CHECK-NEXT:    [[IND_END11:%.*]] = getelementptr i8, ptr [[TMP0]], i64 [[TMP1]]
9c1cec8c0SAlexey Bataev; CHECK-NEXT:    [[TMP2:%.*]] = load double, ptr [[TMP0]], align 8
10c1cec8c0SAlexey Bataev; CHECK-NEXT:    [[IMAG_247:%.*]] = getelementptr i8, ptr [[IND_END11]], i64 408
11c1cec8c0SAlexey Bataev; CHECK-NEXT:    [[IMAG_1_251:%.*]] = getelementptr i8, ptr [[IND_END11]], i64 424
12*af3295bdSAlexey Bataev; CHECK-NEXT:    [[TMP3:%.*]] = load <2 x double>, ptr [[IMAG_1_251]], align 8
13*af3295bdSAlexey Bataev; CHECK-NEXT:    [[TMP4:%.*]] = insertelement <4 x double> poison, double [[TMP2]], i32 0
14*af3295bdSAlexey Bataev; CHECK-NEXT:    [[TMP5:%.*]] = shufflevector <4 x double> [[TMP4]], <4 x double> poison, <4 x i32> zeroinitializer
15*af3295bdSAlexey Bataev; CHECK-NEXT:    [[TMP6:%.*]] = call <4 x double> @llvm.vector.insert.v4f64.v2f64(<4 x double> <double 0.000000e+00, double 0.000000e+00, double poison, double poison>, <2 x double> [[TMP3]], i64 2)
16*af3295bdSAlexey Bataev; CHECK-NEXT:    [[TMP7:%.*]] = fmul <4 x double> [[TMP5]], [[TMP6]]
17*af3295bdSAlexey Bataev; CHECK-NEXT:    store <4 x double> [[TMP7]], ptr [[IMAG_247]], align 8
18c1cec8c0SAlexey Bataev; CHECK-NEXT:    store double [[TMP2]], ptr [[PHASES_IN]], align 8
19c1cec8c0SAlexey Bataev; CHECK-NEXT:    ret void
20c1cec8c0SAlexey Bataev;
21c1cec8c0SAlexey Bataeventry:
22c1cec8c0SAlexey Bataev  %ind.end11 = getelementptr i8, ptr %157, i64 %158
23c1cec8c0SAlexey Bataev  %186 = load double, ptr %157, align 8
24c1cec8c0SAlexey Bataev  %imag.247 = getelementptr i8, ptr %ind.end11, i64 408
25c1cec8c0SAlexey Bataev  %mul35.248 = fmul double %186, 0.000000e+00
26c1cec8c0SAlexey Bataev  store double %mul35.248, ptr %imag.247, align 8
27c1cec8c0SAlexey Bataev  %arrayidx23.1.249 = getelementptr i8, ptr %ind.end11, i64 416
28c1cec8c0SAlexey Bataev  %mul.1.250 = fmul double %186, 0.000000e+00
29c1cec8c0SAlexey Bataev  store double %mul.1.250, ptr %arrayidx23.1.249, align 8
30c1cec8c0SAlexey Bataev  %imag.1.251 = getelementptr i8, ptr %ind.end11, i64 424
31c1cec8c0SAlexey Bataev  %187 = load double, ptr %imag.1.251, align 8
32c1cec8c0SAlexey Bataev  %mul35.1.252 = fmul double %186, %187
33c1cec8c0SAlexey Bataev  store double %mul35.1.252, ptr %imag.1.251, align 8
34c1cec8c0SAlexey Bataev  %arrayidx23.2.253 = getelementptr i8, ptr %ind.end11, i64 432
35c1cec8c0SAlexey Bataev  %188 = load double, ptr %arrayidx23.2.253, align 8
36c1cec8c0SAlexey Bataev  %mul.2.254 = fmul double %186, %188
37c1cec8c0SAlexey Bataev  store double %mul.2.254, ptr %arrayidx23.2.253, align 8
38c1cec8c0SAlexey Bataev  store double %186, ptr %phases_in, align 8
39c1cec8c0SAlexey Bataev  ret void
40c1cec8c0SAlexey Bataev}
41c1cec8c0SAlexey Bataev
42