xref: /llvm-project/llvm/test/Transforms/SLPVectorizer/RISCV/segmented-loads.ll (revision f9bc00e4bb6e09a9f8e4add8e3988cb44b193cdb)
15ab7b0deSAlexey Bataev; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
25ab7b0deSAlexey Bataev; RUN: opt < %s -mtriple=riscv64-unknown-linux -mattr=+v -passes=slp-vectorizer -S | FileCheck %s
35ab7b0deSAlexey Bataev
45ab7b0deSAlexey Bataev@src = common global [8 x double] zeroinitializer, align 64
55ab7b0deSAlexey Bataev@dst = common global [4 x double] zeroinitializer, align 64
65ab7b0deSAlexey Bataev
75ab7b0deSAlexey Bataevdefine void @test() {
85ab7b0deSAlexey Bataev; CHECK-LABEL: @test(
9*f9bc00e4SAlexey Bataev; CHECK-NEXT:    [[TMP4:%.*]] = load <8 x double>, ptr @src, align 8
10*f9bc00e4SAlexey Bataev; CHECK-NEXT:    [[TMP1:%.*]] = shufflevector <8 x double> [[TMP4]], <8 x double> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
11*f9bc00e4SAlexey Bataev; CHECK-NEXT:    [[TMP2:%.*]] = shufflevector <8 x double> [[TMP4]], <8 x double> poison, <4 x i32> <i32 1, i32 3, i32 5, i32 7>
125ab7b0deSAlexey Bataev; CHECK-NEXT:    [[TMP3:%.*]] = fsub fast <4 x double> [[TMP1]], [[TMP2]]
135ab7b0deSAlexey Bataev; CHECK-NEXT:    store <4 x double> [[TMP3]], ptr @dst, align 8
145ab7b0deSAlexey Bataev; CHECK-NEXT:    ret void
155ab7b0deSAlexey Bataev;
165ab7b0deSAlexey Bataev  %a0 = load double, ptr @src, align 8
175ab7b0deSAlexey Bataev  %a1 = load double, ptr getelementptr inbounds ([8 x double], ptr @src, i32 0, i64 1), align 8
185ab7b0deSAlexey Bataev  %a2 = load double, ptr getelementptr inbounds ([8 x double], ptr @src, i32 0, i64 2), align 8
195ab7b0deSAlexey Bataev  %a3 = load double, ptr getelementptr inbounds ([8 x double], ptr @src, i32 0, i64 3), align 8
205ab7b0deSAlexey Bataev  %a4 = load double, ptr getelementptr inbounds ([8 x double], ptr @src, i32 0, i64 4), align 8
215ab7b0deSAlexey Bataev  %a5 = load double, ptr getelementptr inbounds ([8 x double], ptr @src, i32 0, i64 5), align 8
225ab7b0deSAlexey Bataev  %a6 = load double, ptr getelementptr inbounds ([8 x double], ptr @src, i32 0, i64 6), align 8
235ab7b0deSAlexey Bataev  %a7 = load double, ptr getelementptr inbounds ([8 x double], ptr @src, i32 0, i64 7), align 8
245ab7b0deSAlexey Bataev  %res1 = fsub fast double %a0, %a1
255ab7b0deSAlexey Bataev  %res2 = fsub fast double %a2, %a3
265ab7b0deSAlexey Bataev  %res3 = fsub fast double %a4, %a5
275ab7b0deSAlexey Bataev  %res4 = fsub fast double %a6, %a7
285ab7b0deSAlexey Bataev  store double %res1, ptr @dst, align 8
295ab7b0deSAlexey Bataev  store double %res2, ptr getelementptr inbounds ([8 x double], ptr @dst, i32 0, i64 1), align 8
305ab7b0deSAlexey Bataev  store double %res3, ptr getelementptr inbounds ([8 x double], ptr @dst, i32 0, i64 2), align 8
315ab7b0deSAlexey Bataev  store double %res4, ptr getelementptr inbounds ([8 x double], ptr @dst, i32 0, i64 3), align 8
325ab7b0deSAlexey Bataev  ret void
335ab7b0deSAlexey Bataev}
34