1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=riscv64 -mattr=+m,+v -verify-machineinstrs -riscv-v-vector-bits-min=128 \ 3; RUN: < %s | FileCheck %s 4 5define <2 x i64> @test_vp_reverse_v2i64_masked(<2 x i64> %src, <2 x i1> %mask, i32 zeroext %evl) { 6; CHECK-LABEL: test_vp_reverse_v2i64_masked: 7; CHECK: # %bb.0: 8; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma 9; CHECK-NEXT: vid.v v9, v0.t 10; CHECK-NEXT: addi a0, a0, -1 11; CHECK-NEXT: vrsub.vx v10, v9, a0, v0.t 12; CHECK-NEXT: vrgather.vv v9, v8, v10, v0.t 13; CHECK-NEXT: vmv.v.v v8, v9 14; CHECK-NEXT: ret 15 %dst = call <2 x i64> @llvm.experimental.vp.reverse.v2i64(<2 x i64> %src, <2 x i1> %mask, i32 %evl) 16 ret <2 x i64> %dst 17} 18 19define <2 x i64> @test_vp_reverse_v2i64(<2 x i64> %src, i32 zeroext %evl) { 20; CHECK-LABEL: test_vp_reverse_v2i64: 21; CHECK: # %bb.0: 22; CHECK-NEXT: addi a1, a0, -1 23; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma 24; CHECK-NEXT: vid.v v9 25; CHECK-NEXT: vrsub.vx v10, v9, a1 26; CHECK-NEXT: vrgather.vv v9, v8, v10 27; CHECK-NEXT: vmv.v.v v8, v9 28; CHECK-NEXT: ret 29 30 %dst = call <2 x i64> @llvm.experimental.vp.reverse.v2i64(<2 x i64> %src, <2 x i1> splat (i1 1), i32 %evl) 31 ret <2 x i64> %dst 32} 33 34define <4 x i32> @test_vp_reverse_v4i32_masked(<4 x i32> %src, <4 x i1> %mask, i32 zeroext %evl) { 35; CHECK-LABEL: test_vp_reverse_v4i32_masked: 36; CHECK: # %bb.0: 37; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 38; CHECK-NEXT: vid.v v9, v0.t 39; CHECK-NEXT: addi a0, a0, -1 40; CHECK-NEXT: vrsub.vx v10, v9, a0, v0.t 41; CHECK-NEXT: vrgather.vv v9, v8, v10, v0.t 42; CHECK-NEXT: vmv.v.v v8, v9 43; CHECK-NEXT: ret 44 %dst = call <4 x i32> @llvm.experimental.vp.reverse.v4i32(<4 x i32> %src, <4 x i1> %mask, i32 %evl) 45 ret <4 x i32> %dst 46} 47 48define <4 x i32> @test_vp_reverse_v4i32(<4 x i32> %src, i32 zeroext %evl) { 49; CHECK-LABEL: test_vp_reverse_v4i32: 50; CHECK: # %bb.0: 51; CHECK-NEXT: addi a1, a0, -1 52; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma 53; CHECK-NEXT: vid.v v9 54; CHECK-NEXT: vrsub.vx v10, v9, a1 55; CHECK-NEXT: vrgather.vv v9, v8, v10 56; CHECK-NEXT: vmv.v.v v8, v9 57; CHECK-NEXT: ret 58 59 %dst = call <4 x i32> @llvm.experimental.vp.reverse.v4i32(<4 x i32> %src, <4 x i1> splat (i1 1), i32 %evl) 60 ret <4 x i32> %dst 61} 62 63define <8 x i16> @test_vp_reverse_v8i16_masked(<8 x i16> %src, <8 x i1> %mask, i32 zeroext %evl) { 64; CHECK-LABEL: test_vp_reverse_v8i16_masked: 65; CHECK: # %bb.0: 66; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma 67; CHECK-NEXT: vid.v v9, v0.t 68; CHECK-NEXT: addi a0, a0, -1 69; CHECK-NEXT: vrsub.vx v10, v9, a0, v0.t 70; CHECK-NEXT: vrgather.vv v9, v8, v10, v0.t 71; CHECK-NEXT: vmv.v.v v8, v9 72; CHECK-NEXT: ret 73 %dst = call <8 x i16> @llvm.experimental.vp.reverse.v8i16(<8 x i16> %src, <8 x i1> %mask, i32 %evl) 74 ret <8 x i16> %dst 75} 76 77define <8 x i16> @test_vp_reverse_v8i16(<8 x i16> %src, i32 zeroext %evl) { 78; CHECK-LABEL: test_vp_reverse_v8i16: 79; CHECK: # %bb.0: 80; CHECK-NEXT: addi a1, a0, -1 81; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma 82; CHECK-NEXT: vid.v v9 83; CHECK-NEXT: vrsub.vx v10, v9, a1 84; CHECK-NEXT: vrgather.vv v9, v8, v10 85; CHECK-NEXT: vmv.v.v v8, v9 86; CHECK-NEXT: ret 87 88 %dst = call <8 x i16> @llvm.experimental.vp.reverse.v8i16(<8 x i16> %src, <8 x i1> splat (i1 1), i32 %evl) 89 ret <8 x i16> %dst 90} 91 92define <16 x i8> @test_vp_reverse_v16i8_masked(<16 x i8> %src, <16 x i1> %mask, i32 zeroext %evl) { 93; CHECK-LABEL: test_vp_reverse_v16i8_masked: 94; CHECK: # %bb.0: 95; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma 96; CHECK-NEXT: vid.v v10, v0.t 97; CHECK-NEXT: addi a0, a0, -1 98; CHECK-NEXT: vrsub.vx v10, v10, a0, v0.t 99; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma 100; CHECK-NEXT: vrgatherei16.vv v9, v8, v10, v0.t 101; CHECK-NEXT: vmv.v.v v8, v9 102; CHECK-NEXT: ret 103 %dst = call <16 x i8> @llvm.experimental.vp.reverse.v16i8(<16 x i8> %src, <16 x i1> %mask, i32 %evl) 104 ret <16 x i8> %dst 105} 106 107define <16 x i8> @test_vp_reverse_v16i8(<16 x i8> %src, i32 zeroext %evl) { 108; CHECK-LABEL: test_vp_reverse_v16i8: 109; CHECK: # %bb.0: 110; CHECK-NEXT: addi a1, a0, -1 111; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma 112; CHECK-NEXT: vid.v v10 113; CHECK-NEXT: vrsub.vx v10, v10, a1 114; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma 115; CHECK-NEXT: vrgatherei16.vv v9, v8, v10 116; CHECK-NEXT: vmv.v.v v8, v9 117; CHECK-NEXT: ret 118 119 %dst = call <16 x i8> @llvm.experimental.vp.reverse.v16i8(<16 x i8> %src, <16 x i1> splat (i1 1), i32 %evl) 120 ret <16 x i8> %dst 121} 122 123declare <2 x i64> @llvm.experimental.vp.reverse.v2i64(<2 x i64>,<2 x i1>,i32) 124declare <4 x i32> @llvm.experimental.vp.reverse.v4i32(<4 x i32>,<4 x i1>,i32) 125declare <8 x i16> @llvm.experimental.vp.reverse.v8i16(<8 x i16>,<8 x i1>,i32) 126declare <16 x i8> @llvm.experimental.vp.reverse.v16i8(<16 x i8>,<16 x i1>,i32) 127