xref: /llvm-project/llvm/test/Transforms/LoopVectorize/AArch64/sve-vector-reverse-mask4.ll (revision bfc0317153dca75137fba00b5c28758d6f720963)
1; This is the loop in c++ being vectorize in this file with
2; vector.reverse
3
4;#pragma clang loop vectorize_width(4, scalable)
5;  for (long int i = N - 1; i >= 0; i--)
6;  {
7;    if (cond[i])
8;      a[i] += 1;
9;  }
10
11; The test checks if the mask is being correctly created, reverted and used
12
13; RUN: opt -passes=loop-vectorize,dce -mtriple aarch64-linux-gnu -S \
14; RUN:   -prefer-predicate-over-epilogue=scalar-epilogue < %s | FileCheck %s
15
16target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
17target triple = "aarch64-unknown-linux-gnu"
18
19define void @vector_reverse_mask_nxv4i1(ptr %a, ptr %cond, i64 %N) #0 {
20; CHECK-LABEL: vector.body:
21; CHECK: %[[REVERSE6:.*]] = call <vscale x 4 x i1> @llvm.vector.reverse.nxv4i1(<vscale x 4 x i1> %{{.*}})
22; CHECK: %[[WIDEMSKLOAD:.*]] = call <vscale x 4 x double> @llvm.masked.load.nxv4f64.p0(ptr %{{.*}}, i32 8, <vscale x 4 x i1> %[[REVERSE6]], <vscale x 4 x double> poison)
23; CHECK: %[[REVERSE7:.*]] = call <vscale x 4 x double> @llvm.vector.reverse.nxv4f64(<vscale x 4 x double> %[[WIDEMSKLOAD]])
24; CHECK: %[[FADD:.*]] = fadd <vscale x 4 x double> %[[REVERSE7]]
25; CHECK: %[[REVERSE9:.*]] = call <vscale x 4 x i1> @llvm.vector.reverse.nxv4i1(<vscale x 4 x i1> %{{.*}})
26; CHECK: %[[REVERSE8:.*]] = call <vscale x 4 x double> @llvm.vector.reverse.nxv4f64(<vscale x 4 x double> %[[FADD]])
27; CHECK: call void @llvm.masked.store.nxv4f64.p0(<vscale x 4 x double> %[[REVERSE8]], ptr %{{.*}}, i32 8, <vscale x 4 x i1> %[[REVERSE9]]
28
29entry:
30  %cmp7 = icmp sgt i64 %N, 0
31  br i1 %cmp7, label %for.body, label %for.cond.cleanup
32
33for.cond.cleanup:                                 ; preds = %for.cond.cleanup, %entry
34  ret void
35
36for.body:                                         ; preds = %for.body, %entry
37  %i.08.in = phi i64 [ %i.08, %for.inc ], [ %N, %entry ]
38  %i.08 = add nsw i64 %i.08.in, -1
39  %arrayidx = getelementptr inbounds double, ptr %cond, i64 %i.08
40  %0 = load double, ptr %arrayidx, align 8
41  %tobool = fcmp une double %0, 0.000000e+00
42  br i1 %tobool, label %if.then, label %for.inc
43
44if.then:                                          ; preds = %for.body
45  %arrayidx1 = getelementptr inbounds double, ptr %a, i64 %i.08
46  %1 = load double, ptr %arrayidx1, align 8
47  %add = fadd double %1, 1.000000e+00
48  store double %add, ptr %arrayidx1, align 8
49  br label %for.inc
50
51for.inc:                                          ; preds = %for.body, %if.then
52  %cmp = icmp sgt i64 %i.08.in, 1
53  br i1 %cmp, label %for.body, label %for.cond.cleanup, !llvm.loop !0
54}
55
56attributes #0 = {"target-cpu"="generic" "target-features"="+neon,+sve"}
57
58
59!0 = distinct !{!0, !1, !2, !3, !4}
60!1 = !{!"llvm.loop.mustprogress"}
61!2 = !{!"llvm.loop.vectorize.width", i32 4}
62!3 = !{!"llvm.loop.vectorize.scalable.enable", i1 true}
63!4 = !{!"llvm.loop.vectorize.enable", i1 true}
64