1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=riscv64 -mattr=+f,+v -verify-machineinstrs < %s | FileCheck %s 3 4define void @test_store_reverse_combiner(<vscale x 2 x float> %val, <vscale x 2 x float>* %ptr, i32 zeroext %evl) { 5; CHECK-LABEL: test_store_reverse_combiner: 6; CHECK: # %bb.0: 7; CHECK-NEXT: slli a2, a1, 2 8; CHECK-NEXT: add a0, a2, a0 9; CHECK-NEXT: addi a0, a0, -4 10; CHECK-NEXT: li a2, -4 11; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 12; CHECK-NEXT: vsse32.v v8, (a0), a2 13; CHECK-NEXT: ret 14 %rev = call <vscale x 2 x float> @llvm.experimental.vp.reverse.nxv2f32(<vscale x 2 x float> %val, <vscale x 2 x i1> splat (i1 true), i32 %evl) 15 call void @llvm.vp.store.nxv2f32.p0nxv2f32(<vscale x 2 x float> %rev, <vscale x 2 x float>* %ptr, <vscale x 2 x i1> splat (i1 true), i32 %evl) 16 ret void 17} 18 19define void @test_store_mask_is_vp_reverse(<vscale x 2 x float> %val, <vscale x 2 x float>* %ptr, <vscale x 2 x i1> %mask, i32 zeroext %evl) { 20; CHECK-LABEL: test_store_mask_is_vp_reverse: 21; CHECK: # %bb.0: 22; CHECK-NEXT: slli a2, a1, 2 23; CHECK-NEXT: add a0, a2, a0 24; CHECK-NEXT: addi a0, a0, -4 25; CHECK-NEXT: li a2, -4 26; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 27; CHECK-NEXT: vsse32.v v8, (a0), a2, v0.t 28; CHECK-NEXT: ret 29 %storemask = call <vscale x 2 x i1> @llvm.experimental.vp.reverse.nxv2i1(<vscale x 2 x i1> %mask, <vscale x 2 x i1> splat (i1 true), i32 %evl) 30 %rev = call <vscale x 2 x float> @llvm.experimental.vp.reverse.nxv2f32(<vscale x 2 x float> %val, <vscale x 2 x i1> splat (i1 true), i32 %evl) 31 call void @llvm.vp.store.nxv2f32.p0nxv2f32(<vscale x 2 x float> %rev, <vscale x 2 x float>* %ptr, <vscale x 2 x i1> %storemask, i32 %evl) 32 ret void 33} 34 35define void @test_store_mask_not_all_one(<vscale x 2 x float> %val, <vscale x 2 x float>* %ptr, <vscale x 2 x i1> %notallones, i32 zeroext %evl) { 36; CHECK-LABEL: test_store_mask_not_all_one: 37; CHECK: # %bb.0: 38; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma 39; CHECK-NEXT: vid.v v9, v0.t 40; CHECK-NEXT: addi a1, a1, -1 41; CHECK-NEXT: vrsub.vx v9, v9, a1, v0.t 42; CHECK-NEXT: vrgather.vv v10, v8, v9, v0.t 43; CHECK-NEXT: vse32.v v10, (a0), v0.t 44; CHECK-NEXT: ret 45 %rev = call <vscale x 2 x float> @llvm.experimental.vp.reverse.nxv2f32(<vscale x 2 x float> %val, <vscale x 2 x i1> %notallones, i32 %evl) 46 call void @llvm.vp.store.nxv2f32.p0nxv2f32(<vscale x 2 x float> %rev, <vscale x 2 x float>* %ptr, <vscale x 2 x i1> %notallones, i32 %evl) 47 ret void 48} 49 50define void @test_different_evl(<vscale x 2 x float> %val, <vscale x 2 x float>* %ptr, <vscale x 2 x i1> %mask, i32 zeroext %evl1, i32 zeroext %evl2) { 51; CHECK-LABEL: test_different_evl: 52; CHECK: # %bb.0: 53; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma 54; CHECK-NEXT: vid.v v9 55; CHECK-NEXT: addi a1, a1, -1 56; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma 57; CHECK-NEXT: vmv.v.i v10, 0 58; CHECK-NEXT: vmerge.vim v10, v10, 1, v0 59; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma 60; CHECK-NEXT: vid.v v11 61; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma 62; CHECK-NEXT: vrsub.vx v9, v9, a1 63; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma 64; CHECK-NEXT: vrsub.vx v11, v11, a1 65; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma 66; CHECK-NEXT: vrgatherei16.vv v12, v10, v9 67; CHECK-NEXT: vmsne.vi v0, v12, 0 68; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma 69; CHECK-NEXT: vrgather.vv v9, v8, v11 70; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma 71; CHECK-NEXT: vse32.v v9, (a0), v0.t 72; CHECK-NEXT: ret 73 %storemask = call <vscale x 2 x i1> @llvm.experimental.vp.reverse.nxv2i1(<vscale x 2 x i1> %mask, <vscale x 2 x i1> splat (i1 true), i32 %evl1) 74 %rev = call <vscale x 2 x float> @llvm.experimental.vp.reverse.nxv2f32(<vscale x 2 x float> %val, <vscale x 2 x i1> splat (i1 true), i32 %evl1) 75 call void @llvm.vp.store.nxv2f32.p0nxv2f32(<vscale x 2 x float> %rev, <vscale x 2 x float>* %ptr, <vscale x 2 x i1> %storemask, i32 %evl2) 76 ret void 77} 78 79declare <vscale x 2 x float> @llvm.experimental.vp.reverse.nxv2f32(<vscale x 2 x float>, <vscale x 2 x i1>, i32) 80declare <vscale x 2 x i1> @llvm.experimental.vp.reverse.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>, i32) 81declare void @llvm.vp.store.nxv2f32.p0nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>* nocapture, <vscale x 2 x i1>, i32) 82