xref: /llvm-project/llvm/test/CodeGen/RISCV/rvv/vleff-vlseg2ff-output.ll (revision b4adce0056bac9f650ec883a1dc5e082aa649b5c)
1; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2; RUN: llc -mtriple=riscv64 -mattr=+v -stop-after=finalize-isel -target-abi=lp64 < %s | FileCheck %s
3
4declare { <vscale x 8 x i8>, i64 } @llvm.riscv.vleff.nxv8i8(<vscale x 8 x i8>, ptr, i64)
5declare { <vscale x 8 x i8>, i64 } @llvm.riscv.vleff.mask.nxv8i8.i64(<vscale x 8 x i8>, ptr, <vscale x 8 x i1>, i64, i64 immarg)
6
7declare {target("riscv.vector.tuple", <vscale x 8 x i8>, 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr , i64, i64)
8declare {target("riscv.vector.tuple", <vscale x 8 x i8>, 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr, <vscale x 8 x i1>, i64, i64, i64)
9
10define i64 @test_vleff_nxv8i8(ptr %p, i64 %vl) {
11  ; CHECK-LABEL: name: test_vleff_nxv8i8
12  ; CHECK: bb.0.entry:
13  ; CHECK-NEXT:   liveins: $x10, $x11
14  ; CHECK-NEXT: {{  $}}
15  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
16  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
17  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M1_:%[0-9]+]]:vr, [[PseudoVLE8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLE8FF_V_M1 $noreg, [[COPY1]], [[COPY]], 3 /* e8 */, 2 /* tu, ma */, implicit-def dead $vl :: (load unknown-size from %ir.p, align 1)
18  ; CHECK-NEXT:   $x10 = COPY [[PseudoVLE8FF_V_M1_1]]
19  ; CHECK-NEXT:   PseudoRET implicit $x10
20entry:
21  %0 = call { <vscale x 8 x i8>, i64 } @llvm.riscv.vleff.nxv8i8(<vscale x 8 x i8> undef, ptr %p, i64 %vl)
22  %1 = extractvalue { <vscale x 8 x i8>, i64 } %0, 1
23  ret i64 %1
24}
25
26define i64 @test_vleff_nxv8i8_tu(<vscale x 8 x i8> %passthru, ptr %p, i64 %vl) {
27  ; CHECK-LABEL: name: test_vleff_nxv8i8_tu
28  ; CHECK: bb.0.entry:
29  ; CHECK-NEXT:   liveins: $v8, $x10, $x11
30  ; CHECK-NEXT: {{  $}}
31  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
32  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
33  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vr = COPY $v8
34  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M1_:%[0-9]+]]:vr, [[PseudoVLE8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLE8FF_V_M1 [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, 2 /* tu, ma */, implicit-def dead $vl :: (load unknown-size from %ir.p, align 1)
35  ; CHECK-NEXT:   $x10 = COPY [[PseudoVLE8FF_V_M1_1]]
36  ; CHECK-NEXT:   PseudoRET implicit $x10
37entry:
38  %0 = call { <vscale x 8 x i8>, i64 } @llvm.riscv.vleff.nxv8i8(<vscale x 8 x i8> %passthru, ptr %p, i64 %vl)
39  %1 = extractvalue { <vscale x 8 x i8>, i64 } %0, 1
40  ret i64 %1
41}
42
43define i64 @test_vleff_nxv8i8_mask(<vscale x 8 x i8> %maskedoff, ptr %p, <vscale x 8 x i1> %m, i64 %vl) {
44  ; CHECK-LABEL: name: test_vleff_nxv8i8_mask
45  ; CHECK: bb.0.entry:
46  ; CHECK-NEXT:   liveins: $v8, $x10, $v0, $x11
47  ; CHECK-NEXT: {{  $}}
48  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
49  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
50  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
51  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vrnov0 = COPY $v8
52  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
53  ; CHECK-NEXT:   [[PseudoVLE8FF_V_M1_MASK:%[0-9]+]]:vrnov0, [[PseudoVLE8FF_V_M1_MASK1:%[0-9]+]]:gpr = PseudoVLE8FF_V_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 3 /* e8 */, 0 /* tu, mu */, implicit-def dead $vl :: (load unknown-size from %ir.p, align 1)
54  ; CHECK-NEXT:   $x10 = COPY [[PseudoVLE8FF_V_M1_MASK1]]
55  ; CHECK-NEXT:   PseudoRET implicit $x10
56entry:
57  %0 = call { <vscale x 8 x i8>, i64 } @llvm.riscv.vleff.mask.nxv8i8.i64(<vscale x 8 x i8> %maskedoff, ptr %p, <vscale x 8 x i1> %m, i64 %vl, i64 0)
58  %1 = extractvalue { <vscale x 8 x i8>, i64 } %0, 1
59  ret i64 %1
60}
61
62define i64 @test_vlseg2ff_nxv8i8(ptr %base, i64 %vl, ptr %outvl) {
63  ; CHECK-LABEL: name: test_vlseg2ff_nxv8i8
64  ; CHECK: bb.0.entry:
65  ; CHECK-NEXT:   liveins: $x10, $x11
66  ; CHECK-NEXT: {{  $}}
67  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
68  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
69  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M1_:%[0-9]+]]:vrn2m1, [[PseudoVLSEG2E8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLSEG2E8FF_V_M1 $noreg, [[COPY1]], [[COPY]], 3 /* e8 */, 2 /* tu, ma */, implicit-def dead $vl :: (load unknown-size from %ir.base, align 1)
70  ; CHECK-NEXT:   $x10 = COPY [[PseudoVLSEG2E8FF_V_M1_1]]
71  ; CHECK-NEXT:   PseudoRET implicit $x10
72entry:
73  %0 = tail call {target("riscv.vector.tuple", <vscale x 8 x i8>, 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) undef, ptr %base, i64 %vl, i64 3)
74  %1 = extractvalue {target("riscv.vector.tuple", <vscale x 8 x i8>, 2), i64} %0, 1
75  ret i64 %1
76}
77
78define i64 @test_vlseg2ff_nxv8i8_tu(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %vl, ptr %outvl) {
79  ; CHECK-LABEL: name: test_vlseg2ff_nxv8i8_tu
80  ; CHECK: bb.0.entry:
81  ; CHECK-NEXT:   liveins: $v8_v9, $x10, $x11
82  ; CHECK-NEXT: {{  $}}
83  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
84  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:gpr = COPY $x10
85  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:vrn2m1 = COPY $v8_v9
86  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M1_:%[0-9]+]]:vrn2m1, [[PseudoVLSEG2E8FF_V_M1_1:%[0-9]+]]:gpr = PseudoVLSEG2E8FF_V_M1 [[COPY2]], [[COPY1]], [[COPY]], 3 /* e8 */, 2 /* tu, ma */, implicit-def dead $vl :: (load unknown-size from %ir.base, align 1)
87  ; CHECK-NEXT:   $x10 = COPY [[PseudoVLSEG2E8FF_V_M1_1]]
88  ; CHECK-NEXT:   PseudoRET implicit $x10
89entry:
90  %0 = tail call {target("riscv.vector.tuple", <vscale x 8 x i8>, 2), i64} @llvm.riscv.vlseg2ff.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, i64 %vl, i64 3)
91  %1 = extractvalue {target("riscv.vector.tuple", <vscale x 8 x i8>, 2), i64} %0, 1
92  ret i64 %1
93}
94
95define i64 @test_vlseg2ff_nxv8i8_mask(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl, ptr %outvl) {
96  ; CHECK-LABEL: name: test_vlseg2ff_nxv8i8_mask
97  ; CHECK: bb.0.entry:
98  ; CHECK-NEXT:   liveins: $v8_v9, $x10, $v0, $x11
99  ; CHECK-NEXT: {{  $}}
100  ; CHECK-NEXT:   [[COPY:%[0-9]+]]:gprnox0 = COPY $x11
101  ; CHECK-NEXT:   [[COPY1:%[0-9]+]]:vr = COPY $v0
102  ; CHECK-NEXT:   [[COPY2:%[0-9]+]]:gpr = COPY $x10
103  ; CHECK-NEXT:   [[COPY3:%[0-9]+]]:vrn2m1nov0 = COPY $v8_v9
104  ; CHECK-NEXT:   $v0 = COPY [[COPY1]]
105  ; CHECK-NEXT:   [[PseudoVLSEG2E8FF_V_M1_MASK:%[0-9]+]]:vrn2m1nov0, [[PseudoVLSEG2E8FF_V_M1_MASK1:%[0-9]+]]:gpr = PseudoVLSEG2E8FF_V_M1_MASK [[COPY3]], [[COPY2]], $v0, [[COPY]], 3 /* e8 */, 0 /* tu, mu */, implicit-def dead $vl :: (load unknown-size from %ir.base, align 1)
106  ; CHECK-NEXT:   $x10 = COPY [[PseudoVLSEG2E8FF_V_M1_MASK1]]
107  ; CHECK-NEXT:   PseudoRET implicit $x10
108entry:
109  %0 = tail call {target("riscv.vector.tuple", <vscale x 8 x i8>, 2), i64} @llvm.riscv.vlseg2ff.mask.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl, i64 0, i64 3)
110  %1 = extractvalue {target("riscv.vector.tuple", <vscale x 8 x i8>, 2), i64} %0, 1
111  ret i64 %1
112}
113