; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+v,+m -target-abi=ilp32d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 ; RUN: llc -mtriple=riscv64 -mattr=+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 ; RUN: llc -mtriple=riscv32 -mattr=+v,+zvbb,+m -target-abi=ilp32d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVBB ; RUN: llc -mtriple=riscv64 -mattr=+v,+zvbb,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-ZVBB declare @llvm.vp.bitreverse.nxv1i8(, , i32) define @vp_bitreverse_nxv1i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vand.vi v9, v8, 15, v0.t ; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vsll.vi v9, v9, 4, v0.t ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t ; CHECK-NEXT: vand.vi v8, v8, 15, v0.t ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t ; CHECK-NEXT: vsrl.vi v9, v8, 2, v0.t ; CHECK-NEXT: vand.vx v9, v9, a0, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vsll.vi v8, v8, 2, v0.t ; CHECK-NEXT: vor.vv v8, v9, v8, v0.t ; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t ; CHECK-NEXT: vand.vx v9, v9, a0, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t ; CHECK-NEXT: vor.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret ; ; CHECK-ZVBB-LABEL: vp_bitreverse_nxv1i8: ; CHECK-ZVBB: # %bb.0: ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-ZVBB-NEXT: vbrev.v v8, v8, v0.t ; CHECK-ZVBB-NEXT: ret %v = call @llvm.vp.bitreverse.nxv1i8( %va, %m, i32 %evl) ret %v } define @vp_bitreverse_nxv1i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv1i8_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vand.vi v9, v8, 15 ; CHECK-NEXT: vsrl.vi v8, v8, 4 ; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vsll.vi v9, v9, 4 ; CHECK-NEXT: vand.vi v8, v8, 15 ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: vsrl.vi v9, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vsll.vi v8, v8, 2 ; CHECK-NEXT: vor.vv v8, v9, v8 ; CHECK-NEXT: vsrl.vi v9, v8, 1 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vor.vv v8, v9, v8 ; CHECK-NEXT: ret ; ; CHECK-ZVBB-LABEL: vp_bitreverse_nxv1i8_unmasked: ; CHECK-ZVBB: # %bb.0: ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-ZVBB-NEXT: vbrev.v v8, v8 ; CHECK-ZVBB-NEXT: ret %v = call @llvm.vp.bitreverse.nxv1i8( %va, splat (i1 true), i32 %evl) ret %v } declare @llvm.vp.bitreverse.nxv2i8(, , i32) define @vp_bitreverse_nxv2i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vand.vi v9, v8, 15, v0.t ; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vsll.vi v9, v9, 4, v0.t ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t ; CHECK-NEXT: vand.vi v8, v8, 15, v0.t ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t ; CHECK-NEXT: vsrl.vi v9, v8, 2, v0.t ; CHECK-NEXT: vand.vx v9, v9, a0, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vsll.vi v8, v8, 2, v0.t ; CHECK-NEXT: vor.vv v8, v9, v8, v0.t ; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t ; CHECK-NEXT: vand.vx v9, v9, a0, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t ; CHECK-NEXT: vor.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret ; ; CHECK-ZVBB-LABEL: vp_bitreverse_nxv2i8: ; CHECK-ZVBB: # %bb.0: ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-ZVBB-NEXT: vbrev.v v8, v8, v0.t ; CHECK-ZVBB-NEXT: ret %v = call @llvm.vp.bitreverse.nxv2i8( %va, %m, i32 %evl) ret %v } define @vp_bitreverse_nxv2i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv2i8_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vand.vi v9, v8, 15 ; CHECK-NEXT: vsrl.vi v8, v8, 4 ; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vsll.vi v9, v9, 4 ; CHECK-NEXT: vand.vi v8, v8, 15 ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: vsrl.vi v9, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vsll.vi v8, v8, 2 ; CHECK-NEXT: vor.vv v8, v9, v8 ; CHECK-NEXT: vsrl.vi v9, v8, 1 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vor.vv v8, v9, v8 ; CHECK-NEXT: ret ; ; CHECK-ZVBB-LABEL: vp_bitreverse_nxv2i8_unmasked: ; CHECK-ZVBB: # %bb.0: ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-ZVBB-NEXT: vbrev.v v8, v8 ; CHECK-ZVBB-NEXT: ret %v = call @llvm.vp.bitreverse.nxv2i8( %va, splat (i1 true), i32 %evl) ret %v } declare @llvm.vp.bitreverse.nxv4i8(, , i32) define @vp_bitreverse_nxv4i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vand.vi v9, v8, 15, v0.t ; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vsll.vi v9, v9, 4, v0.t ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t ; CHECK-NEXT: vand.vi v8, v8, 15, v0.t ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t ; CHECK-NEXT: vsrl.vi v9, v8, 2, v0.t ; CHECK-NEXT: vand.vx v9, v9, a0, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vsll.vi v8, v8, 2, v0.t ; CHECK-NEXT: vor.vv v8, v9, v8, v0.t ; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t ; CHECK-NEXT: vand.vx v9, v9, a0, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t ; CHECK-NEXT: vor.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret ; ; CHECK-ZVBB-LABEL: vp_bitreverse_nxv4i8: ; CHECK-ZVBB: # %bb.0: ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-ZVBB-NEXT: vbrev.v v8, v8, v0.t ; CHECK-ZVBB-NEXT: ret %v = call @llvm.vp.bitreverse.nxv4i8( %va, %m, i32 %evl) ret %v } define @vp_bitreverse_nxv4i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv4i8_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vand.vi v9, v8, 15 ; CHECK-NEXT: vsrl.vi v8, v8, 4 ; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vsll.vi v9, v9, 4 ; CHECK-NEXT: vand.vi v8, v8, 15 ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: vsrl.vi v9, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vsll.vi v8, v8, 2 ; CHECK-NEXT: vor.vv v8, v9, v8 ; CHECK-NEXT: vsrl.vi v9, v8, 1 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vor.vv v8, v9, v8 ; CHECK-NEXT: ret ; ; CHECK-ZVBB-LABEL: vp_bitreverse_nxv4i8_unmasked: ; CHECK-ZVBB: # %bb.0: ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-ZVBB-NEXT: vbrev.v v8, v8 ; CHECK-ZVBB-NEXT: ret %v = call @llvm.vp.bitreverse.nxv4i8( %va, splat (i1 true), i32 %evl) ret %v } declare @llvm.vp.bitreverse.nxv8i8(, , i32) define @vp_bitreverse_nxv8i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv8i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vand.vi v9, v8, 15, v0.t ; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vsll.vi v9, v9, 4, v0.t ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t ; CHECK-NEXT: vand.vi v8, v8, 15, v0.t ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t ; CHECK-NEXT: vsrl.vi v9, v8, 2, v0.t ; CHECK-NEXT: vand.vx v9, v9, a0, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vsll.vi v8, v8, 2, v0.t ; CHECK-NEXT: vor.vv v8, v9, v8, v0.t ; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t ; CHECK-NEXT: vand.vx v9, v9, a0, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t ; CHECK-NEXT: vor.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret ; ; CHECK-ZVBB-LABEL: vp_bitreverse_nxv8i8: ; CHECK-ZVBB: # %bb.0: ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-ZVBB-NEXT: vbrev.v v8, v8, v0.t ; CHECK-ZVBB-NEXT: ret %v = call @llvm.vp.bitreverse.nxv8i8( %va, %m, i32 %evl) ret %v } define @vp_bitreverse_nxv8i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv8i8_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vand.vi v9, v8, 15 ; CHECK-NEXT: vsrl.vi v8, v8, 4 ; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vsll.vi v9, v9, 4 ; CHECK-NEXT: vand.vi v8, v8, 15 ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: vsrl.vi v9, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vsll.vi v8, v8, 2 ; CHECK-NEXT: vor.vv v8, v9, v8 ; CHECK-NEXT: vsrl.vi v9, v8, 1 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vor.vv v8, v9, v8 ; CHECK-NEXT: ret ; ; CHECK-ZVBB-LABEL: vp_bitreverse_nxv8i8_unmasked: ; CHECK-ZVBB: # %bb.0: ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-ZVBB-NEXT: vbrev.v v8, v8 ; CHECK-ZVBB-NEXT: ret %v = call @llvm.vp.bitreverse.nxv8i8( %va, splat (i1 true), i32 %evl) ret %v } declare @llvm.vp.bitreverse.nxv16i8(, , i32) define @vp_bitreverse_nxv16i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv16i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vand.vi v10, v8, 15, v0.t ; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vsll.vi v10, v10, 4, v0.t ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t ; CHECK-NEXT: vand.vi v8, v8, 15, v0.t ; CHECK-NEXT: vor.vv v8, v8, v10, v0.t ; CHECK-NEXT: vsrl.vi v10, v8, 2, v0.t ; CHECK-NEXT: vand.vx v10, v10, a0, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vsll.vi v8, v8, 2, v0.t ; CHECK-NEXT: vor.vv v8, v10, v8, v0.t ; CHECK-NEXT: vsrl.vi v10, v8, 1, v0.t ; CHECK-NEXT: vand.vx v10, v10, a0, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t ; CHECK-NEXT: vor.vv v8, v10, v8, v0.t ; CHECK-NEXT: ret ; ; CHECK-ZVBB-LABEL: vp_bitreverse_nxv16i8: ; CHECK-ZVBB: # %bb.0: ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-ZVBB-NEXT: vbrev.v v8, v8, v0.t ; CHECK-ZVBB-NEXT: ret %v = call @llvm.vp.bitreverse.nxv16i8( %va, %m, i32 %evl) ret %v } define @vp_bitreverse_nxv16i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv16i8_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vand.vi v10, v8, 15 ; CHECK-NEXT: vsrl.vi v8, v8, 4 ; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vsll.vi v10, v10, 4 ; CHECK-NEXT: vand.vi v8, v8, 15 ; CHECK-NEXT: vor.vv v8, v8, v10 ; CHECK-NEXT: vsrl.vi v10, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vand.vx v10, v10, a0 ; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vsll.vi v8, v8, 2 ; CHECK-NEXT: vor.vv v8, v10, v8 ; CHECK-NEXT: vsrl.vi v10, v8, 1 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vand.vx v10, v10, a0 ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vor.vv v8, v10, v8 ; CHECK-NEXT: ret ; ; CHECK-ZVBB-LABEL: vp_bitreverse_nxv16i8_unmasked: ; CHECK-ZVBB: # %bb.0: ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-ZVBB-NEXT: vbrev.v v8, v8 ; CHECK-ZVBB-NEXT: ret %v = call @llvm.vp.bitreverse.nxv16i8( %va, splat (i1 true), i32 %evl) ret %v } declare @llvm.vp.bitreverse.nxv32i8(, , i32) define @vp_bitreverse_nxv32i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv32i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vand.vi v12, v8, 15, v0.t ; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vsll.vi v12, v12, 4, v0.t ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t ; CHECK-NEXT: vand.vi v8, v8, 15, v0.t ; CHECK-NEXT: vor.vv v8, v8, v12, v0.t ; CHECK-NEXT: vsrl.vi v12, v8, 2, v0.t ; CHECK-NEXT: vand.vx v12, v12, a0, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vsll.vi v8, v8, 2, v0.t ; CHECK-NEXT: vor.vv v8, v12, v8, v0.t ; CHECK-NEXT: vsrl.vi v12, v8, 1, v0.t ; CHECK-NEXT: vand.vx v12, v12, a0, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t ; CHECK-NEXT: vor.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret ; ; CHECK-ZVBB-LABEL: vp_bitreverse_nxv32i8: ; CHECK-ZVBB: # %bb.0: ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-ZVBB-NEXT: vbrev.v v8, v8, v0.t ; CHECK-ZVBB-NEXT: ret %v = call @llvm.vp.bitreverse.nxv32i8( %va, %m, i32 %evl) ret %v } define @vp_bitreverse_nxv32i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv32i8_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vand.vi v12, v8, 15 ; CHECK-NEXT: vsrl.vi v8, v8, 4 ; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vsll.vi v12, v12, 4 ; CHECK-NEXT: vand.vi v8, v8, 15 ; CHECK-NEXT: vor.vv v8, v8, v12 ; CHECK-NEXT: vsrl.vi v12, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vand.vx v12, v12, a0 ; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vsll.vi v8, v8, 2 ; CHECK-NEXT: vor.vv v8, v12, v8 ; CHECK-NEXT: vsrl.vi v12, v8, 1 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vand.vx v12, v12, a0 ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vor.vv v8, v12, v8 ; CHECK-NEXT: ret ; ; CHECK-ZVBB-LABEL: vp_bitreverse_nxv32i8_unmasked: ; CHECK-ZVBB: # %bb.0: ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-ZVBB-NEXT: vbrev.v v8, v8 ; CHECK-ZVBB-NEXT: ret %v = call @llvm.vp.bitreverse.nxv32i8( %va, splat (i1 true), i32 %evl) ret %v } declare @llvm.vp.bitreverse.nxv64i8(, , i32) define @vp_bitreverse_nxv64i8( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv64i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vand.vi v16, v8, 15, v0.t ; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vsll.vi v16, v16, 4, v0.t ; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t ; CHECK-NEXT: vand.vi v8, v8, 15, v0.t ; CHECK-NEXT: vor.vv v16, v8, v16, v0.t ; CHECK-NEXT: vsrl.vi v8, v16, 2, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: vand.vx v16, v16, a0, v0.t ; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vsll.vi v16, v16, 2, v0.t ; CHECK-NEXT: vor.vv v8, v8, v16, v0.t ; CHECK-NEXT: vsrl.vi v16, v8, 1, v0.t ; CHECK-NEXT: vand.vx v16, v16, a0, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t ; CHECK-NEXT: vor.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret ; ; CHECK-ZVBB-LABEL: vp_bitreverse_nxv64i8: ; CHECK-ZVBB: # %bb.0: ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-ZVBB-NEXT: vbrev.v v8, v8, v0.t ; CHECK-ZVBB-NEXT: ret %v = call @llvm.vp.bitreverse.nxv64i8( %va, %m, i32 %evl) ret %v } define @vp_bitreverse_nxv64i8_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv64i8_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vand.vi v16, v8, 15 ; CHECK-NEXT: vsrl.vi v8, v8, 4 ; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vsll.vi v16, v16, 4 ; CHECK-NEXT: vand.vi v8, v8, 15 ; CHECK-NEXT: vor.vv v8, v8, v16 ; CHECK-NEXT: vsrl.vi v16, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vand.vx v16, v16, a0 ; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vsll.vi v8, v8, 2 ; CHECK-NEXT: vor.vv v8, v16, v8 ; CHECK-NEXT: vsrl.vi v16, v8, 1 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vand.vx v16, v16, a0 ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vor.vv v8, v16, v8 ; CHECK-NEXT: ret ; ; CHECK-ZVBB-LABEL: vp_bitreverse_nxv64i8_unmasked: ; CHECK-ZVBB: # %bb.0: ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-ZVBB-NEXT: vbrev.v v8, v8 ; CHECK-ZVBB-NEXT: ret %v = call @llvm.vp.bitreverse.nxv64i8( %va, splat (i1 true), i32 %evl) ret %v } declare @llvm.vp.bitreverse.nxv1i16(, , i32) define @vp_bitreverse_nxv1i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv1i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsrl.vi v9, v8, 8, v0.t ; CHECK-NEXT: lui a0, 1 ; CHECK-NEXT: vsll.vi v8, v8, 8, v0.t ; CHECK-NEXT: addi a0, a0, -241 ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t ; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t ; CHECK-NEXT: vand.vx v9, v9, a0, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: lui a0, 3 ; CHECK-NEXT: addi a0, a0, 819 ; CHECK-NEXT: vsll.vi v8, v8, 4, v0.t ; CHECK-NEXT: vor.vv v8, v9, v8, v0.t ; CHECK-NEXT: vsrl.vi v9, v8, 2, v0.t ; CHECK-NEXT: vand.vx v9, v9, a0, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: lui a0, 5 ; CHECK-NEXT: addi a0, a0, 1365 ; CHECK-NEXT: vsll.vi v8, v8, 2, v0.t ; CHECK-NEXT: vor.vv v8, v9, v8, v0.t ; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t ; CHECK-NEXT: vand.vx v9, v9, a0, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t ; CHECK-NEXT: vor.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret ; ; CHECK-ZVBB-LABEL: vp_bitreverse_nxv1i16: ; CHECK-ZVBB: # %bb.0: ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-ZVBB-NEXT: vbrev.v v8, v8, v0.t ; CHECK-ZVBB-NEXT: ret %v = call @llvm.vp.bitreverse.nxv1i16( %va, %m, i32 %evl) ret %v } define @vp_bitreverse_nxv1i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv1i16_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsrl.vi v9, v8, 8 ; CHECK-NEXT: vsll.vi v8, v8, 8 ; CHECK-NEXT: lui a0, 1 ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: addi a0, a0, -241 ; CHECK-NEXT: vsrl.vi v9, v8, 4 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: lui a0, 3 ; CHECK-NEXT: addi a0, a0, 819 ; CHECK-NEXT: vsll.vi v8, v8, 4 ; CHECK-NEXT: vor.vv v8, v9, v8 ; CHECK-NEXT: vsrl.vi v9, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: lui a0, 5 ; CHECK-NEXT: addi a0, a0, 1365 ; CHECK-NEXT: vsll.vi v8, v8, 2 ; CHECK-NEXT: vor.vv v8, v9, v8 ; CHECK-NEXT: vsrl.vi v9, v8, 1 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vor.vv v8, v9, v8 ; CHECK-NEXT: ret ; ; CHECK-ZVBB-LABEL: vp_bitreverse_nxv1i16_unmasked: ; CHECK-ZVBB: # %bb.0: ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-ZVBB-NEXT: vbrev.v v8, v8 ; CHECK-ZVBB-NEXT: ret %v = call @llvm.vp.bitreverse.nxv1i16( %va, splat (i1 true), i32 %evl) ret %v } declare @llvm.vp.bitreverse.nxv2i16(, , i32) define @vp_bitreverse_nxv2i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsrl.vi v9, v8, 8, v0.t ; CHECK-NEXT: lui a0, 1 ; CHECK-NEXT: vsll.vi v8, v8, 8, v0.t ; CHECK-NEXT: addi a0, a0, -241 ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t ; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t ; CHECK-NEXT: vand.vx v9, v9, a0, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: lui a0, 3 ; CHECK-NEXT: addi a0, a0, 819 ; CHECK-NEXT: vsll.vi v8, v8, 4, v0.t ; CHECK-NEXT: vor.vv v8, v9, v8, v0.t ; CHECK-NEXT: vsrl.vi v9, v8, 2, v0.t ; CHECK-NEXT: vand.vx v9, v9, a0, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: lui a0, 5 ; CHECK-NEXT: addi a0, a0, 1365 ; CHECK-NEXT: vsll.vi v8, v8, 2, v0.t ; CHECK-NEXT: vor.vv v8, v9, v8, v0.t ; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t ; CHECK-NEXT: vand.vx v9, v9, a0, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t ; CHECK-NEXT: vor.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret ; ; CHECK-ZVBB-LABEL: vp_bitreverse_nxv2i16: ; CHECK-ZVBB: # %bb.0: ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-ZVBB-NEXT: vbrev.v v8, v8, v0.t ; CHECK-ZVBB-NEXT: ret %v = call @llvm.vp.bitreverse.nxv2i16( %va, %m, i32 %evl) ret %v } define @vp_bitreverse_nxv2i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv2i16_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsrl.vi v9, v8, 8 ; CHECK-NEXT: vsll.vi v8, v8, 8 ; CHECK-NEXT: lui a0, 1 ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: addi a0, a0, -241 ; CHECK-NEXT: vsrl.vi v9, v8, 4 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: lui a0, 3 ; CHECK-NEXT: addi a0, a0, 819 ; CHECK-NEXT: vsll.vi v8, v8, 4 ; CHECK-NEXT: vor.vv v8, v9, v8 ; CHECK-NEXT: vsrl.vi v9, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: lui a0, 5 ; CHECK-NEXT: addi a0, a0, 1365 ; CHECK-NEXT: vsll.vi v8, v8, 2 ; CHECK-NEXT: vor.vv v8, v9, v8 ; CHECK-NEXT: vsrl.vi v9, v8, 1 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vor.vv v8, v9, v8 ; CHECK-NEXT: ret ; ; CHECK-ZVBB-LABEL: vp_bitreverse_nxv2i16_unmasked: ; CHECK-ZVBB: # %bb.0: ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-ZVBB-NEXT: vbrev.v v8, v8 ; CHECK-ZVBB-NEXT: ret %v = call @llvm.vp.bitreverse.nxv2i16( %va, splat (i1 true), i32 %evl) ret %v } declare @llvm.vp.bitreverse.nxv4i16(, , i32) define @vp_bitreverse_nxv4i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsrl.vi v9, v8, 8, v0.t ; CHECK-NEXT: lui a0, 1 ; CHECK-NEXT: vsll.vi v8, v8, 8, v0.t ; CHECK-NEXT: addi a0, a0, -241 ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t ; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t ; CHECK-NEXT: vand.vx v9, v9, a0, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: lui a0, 3 ; CHECK-NEXT: addi a0, a0, 819 ; CHECK-NEXT: vsll.vi v8, v8, 4, v0.t ; CHECK-NEXT: vor.vv v8, v9, v8, v0.t ; CHECK-NEXT: vsrl.vi v9, v8, 2, v0.t ; CHECK-NEXT: vand.vx v9, v9, a0, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: lui a0, 5 ; CHECK-NEXT: addi a0, a0, 1365 ; CHECK-NEXT: vsll.vi v8, v8, 2, v0.t ; CHECK-NEXT: vor.vv v8, v9, v8, v0.t ; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t ; CHECK-NEXT: vand.vx v9, v9, a0, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t ; CHECK-NEXT: vor.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret ; ; CHECK-ZVBB-LABEL: vp_bitreverse_nxv4i16: ; CHECK-ZVBB: # %bb.0: ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-ZVBB-NEXT: vbrev.v v8, v8, v0.t ; CHECK-ZVBB-NEXT: ret %v = call @llvm.vp.bitreverse.nxv4i16( %va, %m, i32 %evl) ret %v } define @vp_bitreverse_nxv4i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv4i16_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsrl.vi v9, v8, 8 ; CHECK-NEXT: vsll.vi v8, v8, 8 ; CHECK-NEXT: lui a0, 1 ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: addi a0, a0, -241 ; CHECK-NEXT: vsrl.vi v9, v8, 4 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: lui a0, 3 ; CHECK-NEXT: addi a0, a0, 819 ; CHECK-NEXT: vsll.vi v8, v8, 4 ; CHECK-NEXT: vor.vv v8, v9, v8 ; CHECK-NEXT: vsrl.vi v9, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: lui a0, 5 ; CHECK-NEXT: addi a0, a0, 1365 ; CHECK-NEXT: vsll.vi v8, v8, 2 ; CHECK-NEXT: vor.vv v8, v9, v8 ; CHECK-NEXT: vsrl.vi v9, v8, 1 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vor.vv v8, v9, v8 ; CHECK-NEXT: ret ; ; CHECK-ZVBB-LABEL: vp_bitreverse_nxv4i16_unmasked: ; CHECK-ZVBB: # %bb.0: ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-ZVBB-NEXT: vbrev.v v8, v8 ; CHECK-ZVBB-NEXT: ret %v = call @llvm.vp.bitreverse.nxv4i16( %va, splat (i1 true), i32 %evl) ret %v } declare @llvm.vp.bitreverse.nxv8i16(, , i32) define @vp_bitreverse_nxv8i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsrl.vi v10, v8, 8, v0.t ; CHECK-NEXT: lui a0, 1 ; CHECK-NEXT: vsll.vi v8, v8, 8, v0.t ; CHECK-NEXT: addi a0, a0, -241 ; CHECK-NEXT: vor.vv v8, v8, v10, v0.t ; CHECK-NEXT: vsrl.vi v10, v8, 4, v0.t ; CHECK-NEXT: vand.vx v10, v10, a0, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: lui a0, 3 ; CHECK-NEXT: addi a0, a0, 819 ; CHECK-NEXT: vsll.vi v8, v8, 4, v0.t ; CHECK-NEXT: vor.vv v8, v10, v8, v0.t ; CHECK-NEXT: vsrl.vi v10, v8, 2, v0.t ; CHECK-NEXT: vand.vx v10, v10, a0, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: lui a0, 5 ; CHECK-NEXT: addi a0, a0, 1365 ; CHECK-NEXT: vsll.vi v8, v8, 2, v0.t ; CHECK-NEXT: vor.vv v8, v10, v8, v0.t ; CHECK-NEXT: vsrl.vi v10, v8, 1, v0.t ; CHECK-NEXT: vand.vx v10, v10, a0, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t ; CHECK-NEXT: vor.vv v8, v10, v8, v0.t ; CHECK-NEXT: ret ; ; CHECK-ZVBB-LABEL: vp_bitreverse_nxv8i16: ; CHECK-ZVBB: # %bb.0: ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-ZVBB-NEXT: vbrev.v v8, v8, v0.t ; CHECK-ZVBB-NEXT: ret %v = call @llvm.vp.bitreverse.nxv8i16( %va, %m, i32 %evl) ret %v } define @vp_bitreverse_nxv8i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv8i16_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsrl.vi v10, v8, 8 ; CHECK-NEXT: vsll.vi v8, v8, 8 ; CHECK-NEXT: lui a0, 1 ; CHECK-NEXT: vor.vv v8, v8, v10 ; CHECK-NEXT: addi a0, a0, -241 ; CHECK-NEXT: vsrl.vi v10, v8, 4 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vand.vx v10, v10, a0 ; CHECK-NEXT: lui a0, 3 ; CHECK-NEXT: addi a0, a0, 819 ; CHECK-NEXT: vsll.vi v8, v8, 4 ; CHECK-NEXT: vor.vv v8, v10, v8 ; CHECK-NEXT: vsrl.vi v10, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vand.vx v10, v10, a0 ; CHECK-NEXT: lui a0, 5 ; CHECK-NEXT: addi a0, a0, 1365 ; CHECK-NEXT: vsll.vi v8, v8, 2 ; CHECK-NEXT: vor.vv v8, v10, v8 ; CHECK-NEXT: vsrl.vi v10, v8, 1 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vand.vx v10, v10, a0 ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vor.vv v8, v10, v8 ; CHECK-NEXT: ret ; ; CHECK-ZVBB-LABEL: vp_bitreverse_nxv8i16_unmasked: ; CHECK-ZVBB: # %bb.0: ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-ZVBB-NEXT: vbrev.v v8, v8 ; CHECK-ZVBB-NEXT: ret %v = call @llvm.vp.bitreverse.nxv8i16( %va, splat (i1 true), i32 %evl) ret %v } declare @llvm.vp.bitreverse.nxv16i16(, , i32) define @vp_bitreverse_nxv16i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv16i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vsrl.vi v12, v8, 8, v0.t ; CHECK-NEXT: lui a0, 1 ; CHECK-NEXT: vsll.vi v8, v8, 8, v0.t ; CHECK-NEXT: addi a0, a0, -241 ; CHECK-NEXT: vor.vv v8, v8, v12, v0.t ; CHECK-NEXT: vsrl.vi v12, v8, 4, v0.t ; CHECK-NEXT: vand.vx v12, v12, a0, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: lui a0, 3 ; CHECK-NEXT: addi a0, a0, 819 ; CHECK-NEXT: vsll.vi v8, v8, 4, v0.t ; CHECK-NEXT: vor.vv v8, v12, v8, v0.t ; CHECK-NEXT: vsrl.vi v12, v8, 2, v0.t ; CHECK-NEXT: vand.vx v12, v12, a0, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: lui a0, 5 ; CHECK-NEXT: addi a0, a0, 1365 ; CHECK-NEXT: vsll.vi v8, v8, 2, v0.t ; CHECK-NEXT: vor.vv v8, v12, v8, v0.t ; CHECK-NEXT: vsrl.vi v12, v8, 1, v0.t ; CHECK-NEXT: vand.vx v12, v12, a0, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t ; CHECK-NEXT: vor.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret ; ; CHECK-ZVBB-LABEL: vp_bitreverse_nxv16i16: ; CHECK-ZVBB: # %bb.0: ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-ZVBB-NEXT: vbrev.v v8, v8, v0.t ; CHECK-ZVBB-NEXT: ret %v = call @llvm.vp.bitreverse.nxv16i16( %va, %m, i32 %evl) ret %v } define @vp_bitreverse_nxv16i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv16i16_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-NEXT: vsrl.vi v12, v8, 8 ; CHECK-NEXT: vsll.vi v8, v8, 8 ; CHECK-NEXT: lui a0, 1 ; CHECK-NEXT: vor.vv v8, v8, v12 ; CHECK-NEXT: addi a0, a0, -241 ; CHECK-NEXT: vsrl.vi v12, v8, 4 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vand.vx v12, v12, a0 ; CHECK-NEXT: lui a0, 3 ; CHECK-NEXT: addi a0, a0, 819 ; CHECK-NEXT: vsll.vi v8, v8, 4 ; CHECK-NEXT: vor.vv v8, v12, v8 ; CHECK-NEXT: vsrl.vi v12, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vand.vx v12, v12, a0 ; CHECK-NEXT: lui a0, 5 ; CHECK-NEXT: addi a0, a0, 1365 ; CHECK-NEXT: vsll.vi v8, v8, 2 ; CHECK-NEXT: vor.vv v8, v12, v8 ; CHECK-NEXT: vsrl.vi v12, v8, 1 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vand.vx v12, v12, a0 ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vor.vv v8, v12, v8 ; CHECK-NEXT: ret ; ; CHECK-ZVBB-LABEL: vp_bitreverse_nxv16i16_unmasked: ; CHECK-ZVBB: # %bb.0: ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; CHECK-ZVBB-NEXT: vbrev.v v8, v8 ; CHECK-ZVBB-NEXT: ret %v = call @llvm.vp.bitreverse.nxv16i16( %va, splat (i1 true), i32 %evl) ret %v } declare @llvm.vp.bitreverse.nxv32i16(, , i32) define @vp_bitreverse_nxv32i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv32i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vsrl.vi v16, v8, 8, v0.t ; CHECK-NEXT: lui a0, 1 ; CHECK-NEXT: vsll.vi v8, v8, 8, v0.t ; CHECK-NEXT: addi a0, a0, -241 ; CHECK-NEXT: vor.vv v16, v8, v16, v0.t ; CHECK-NEXT: vsrl.vi v8, v16, 4, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: vand.vx v16, v16, a0, v0.t ; CHECK-NEXT: lui a0, 3 ; CHECK-NEXT: addi a0, a0, 819 ; CHECK-NEXT: vsll.vi v16, v16, 4, v0.t ; CHECK-NEXT: vor.vv v16, v8, v16, v0.t ; CHECK-NEXT: vsrl.vi v8, v16, 2, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: vand.vx v16, v16, a0, v0.t ; CHECK-NEXT: lui a0, 5 ; CHECK-NEXT: addi a0, a0, 1365 ; CHECK-NEXT: vsll.vi v16, v16, 2, v0.t ; CHECK-NEXT: vor.vv v8, v8, v16, v0.t ; CHECK-NEXT: vsrl.vi v16, v8, 1, v0.t ; CHECK-NEXT: vand.vx v16, v16, a0, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t ; CHECK-NEXT: vor.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret ; ; CHECK-ZVBB-LABEL: vp_bitreverse_nxv32i16: ; CHECK-ZVBB: # %bb.0: ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-ZVBB-NEXT: vbrev.v v8, v8, v0.t ; CHECK-ZVBB-NEXT: ret %v = call @llvm.vp.bitreverse.nxv32i16( %va, %m, i32 %evl) ret %v } define @vp_bitreverse_nxv32i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv32i16_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vsrl.vi v16, v8, 8 ; CHECK-NEXT: vsll.vi v8, v8, 8 ; CHECK-NEXT: lui a0, 1 ; CHECK-NEXT: vor.vv v8, v8, v16 ; CHECK-NEXT: addi a0, a0, -241 ; CHECK-NEXT: vsrl.vi v16, v8, 4 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vand.vx v16, v16, a0 ; CHECK-NEXT: lui a0, 3 ; CHECK-NEXT: addi a0, a0, 819 ; CHECK-NEXT: vsll.vi v8, v8, 4 ; CHECK-NEXT: vor.vv v8, v16, v8 ; CHECK-NEXT: vsrl.vi v16, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vand.vx v16, v16, a0 ; CHECK-NEXT: lui a0, 5 ; CHECK-NEXT: addi a0, a0, 1365 ; CHECK-NEXT: vsll.vi v8, v8, 2 ; CHECK-NEXT: vor.vv v8, v16, v8 ; CHECK-NEXT: vsrl.vi v16, v8, 1 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vand.vx v16, v16, a0 ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vor.vv v8, v16, v8 ; CHECK-NEXT: ret ; ; CHECK-ZVBB-LABEL: vp_bitreverse_nxv32i16_unmasked: ; CHECK-ZVBB: # %bb.0: ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-ZVBB-NEXT: vbrev.v v8, v8 ; CHECK-ZVBB-NEXT: ret %v = call @llvm.vp.bitreverse.nxv32i16( %va, splat (i1 true), i32 %evl) ret %v } declare @llvm.vp.bitreverse.nxv1i32(, , i32) define @vp_bitreverse_nxv1i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv1i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsrl.vi v9, v8, 8, v0.t ; CHECK-NEXT: lui a0, 16 ; CHECK-NEXT: addi a0, a0, -256 ; CHECK-NEXT: vand.vx v9, v9, a0, v0.t ; CHECK-NEXT: vsrl.vi v10, v8, 24, v0.t ; CHECK-NEXT: vor.vv v9, v9, v10, v0.t ; CHECK-NEXT: vand.vx v10, v8, a0, v0.t ; CHECK-NEXT: lui a0, 61681 ; CHECK-NEXT: addi a0, a0, -241 ; CHECK-NEXT: vsll.vi v10, v10, 8, v0.t ; CHECK-NEXT: vsll.vi v8, v8, 24, v0.t ; CHECK-NEXT: vor.vv v8, v8, v10, v0.t ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t ; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t ; CHECK-NEXT: vand.vx v9, v9, a0, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: lui a0, 209715 ; CHECK-NEXT: addi a0, a0, 819 ; CHECK-NEXT: vsll.vi v8, v8, 4, v0.t ; CHECK-NEXT: vor.vv v8, v9, v8, v0.t ; CHECK-NEXT: vsrl.vi v9, v8, 2, v0.t ; CHECK-NEXT: vand.vx v9, v9, a0, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: lui a0, 349525 ; CHECK-NEXT: addi a0, a0, 1365 ; CHECK-NEXT: vsll.vi v8, v8, 2, v0.t ; CHECK-NEXT: vor.vv v8, v9, v8, v0.t ; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t ; CHECK-NEXT: vand.vx v9, v9, a0, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t ; CHECK-NEXT: vor.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret ; ; CHECK-ZVBB-LABEL: vp_bitreverse_nxv1i32: ; CHECK-ZVBB: # %bb.0: ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-ZVBB-NEXT: vbrev.v v8, v8, v0.t ; CHECK-ZVBB-NEXT: ret %v = call @llvm.vp.bitreverse.nxv1i32( %va, %m, i32 %evl) ret %v } define @vp_bitreverse_nxv1i32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv1i32_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsrl.vi v9, v8, 8 ; CHECK-NEXT: lui a0, 16 ; CHECK-NEXT: vsrl.vi v10, v8, 24 ; CHECK-NEXT: addi a0, a0, -256 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: vor.vv v9, v9, v10 ; CHECK-NEXT: vsll.vi v10, v8, 24 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: lui a0, 61681 ; CHECK-NEXT: addi a0, a0, -241 ; CHECK-NEXT: vsll.vi v8, v8, 8 ; CHECK-NEXT: vor.vv v8, v10, v8 ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: vsrl.vi v9, v8, 4 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: lui a0, 209715 ; CHECK-NEXT: addi a0, a0, 819 ; CHECK-NEXT: vsll.vi v8, v8, 4 ; CHECK-NEXT: vor.vv v8, v9, v8 ; CHECK-NEXT: vsrl.vi v9, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: lui a0, 349525 ; CHECK-NEXT: addi a0, a0, 1365 ; CHECK-NEXT: vsll.vi v8, v8, 2 ; CHECK-NEXT: vor.vv v8, v9, v8 ; CHECK-NEXT: vsrl.vi v9, v8, 1 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vor.vv v8, v9, v8 ; CHECK-NEXT: ret ; ; CHECK-ZVBB-LABEL: vp_bitreverse_nxv1i32_unmasked: ; CHECK-ZVBB: # %bb.0: ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-ZVBB-NEXT: vbrev.v v8, v8 ; CHECK-ZVBB-NEXT: ret %v = call @llvm.vp.bitreverse.nxv1i32( %va, splat (i1 true), i32 %evl) ret %v } declare @llvm.vp.bitreverse.nxv2i32(, , i32) define @vp_bitreverse_nxv2i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsrl.vi v9, v8, 8, v0.t ; CHECK-NEXT: lui a0, 16 ; CHECK-NEXT: addi a0, a0, -256 ; CHECK-NEXT: vand.vx v9, v9, a0, v0.t ; CHECK-NEXT: vsrl.vi v10, v8, 24, v0.t ; CHECK-NEXT: vor.vv v9, v9, v10, v0.t ; CHECK-NEXT: vand.vx v10, v8, a0, v0.t ; CHECK-NEXT: lui a0, 61681 ; CHECK-NEXT: addi a0, a0, -241 ; CHECK-NEXT: vsll.vi v10, v10, 8, v0.t ; CHECK-NEXT: vsll.vi v8, v8, 24, v0.t ; CHECK-NEXT: vor.vv v8, v8, v10, v0.t ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t ; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t ; CHECK-NEXT: vand.vx v9, v9, a0, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: lui a0, 209715 ; CHECK-NEXT: addi a0, a0, 819 ; CHECK-NEXT: vsll.vi v8, v8, 4, v0.t ; CHECK-NEXT: vor.vv v8, v9, v8, v0.t ; CHECK-NEXT: vsrl.vi v9, v8, 2, v0.t ; CHECK-NEXT: vand.vx v9, v9, a0, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: lui a0, 349525 ; CHECK-NEXT: addi a0, a0, 1365 ; CHECK-NEXT: vsll.vi v8, v8, 2, v0.t ; CHECK-NEXT: vor.vv v8, v9, v8, v0.t ; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t ; CHECK-NEXT: vand.vx v9, v9, a0, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t ; CHECK-NEXT: vor.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret ; ; CHECK-ZVBB-LABEL: vp_bitreverse_nxv2i32: ; CHECK-ZVBB: # %bb.0: ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-ZVBB-NEXT: vbrev.v v8, v8, v0.t ; CHECK-ZVBB-NEXT: ret %v = call @llvm.vp.bitreverse.nxv2i32( %va, %m, i32 %evl) ret %v } define @vp_bitreverse_nxv2i32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv2i32_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsrl.vi v9, v8, 8 ; CHECK-NEXT: lui a0, 16 ; CHECK-NEXT: vsrl.vi v10, v8, 24 ; CHECK-NEXT: addi a0, a0, -256 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: vor.vv v9, v9, v10 ; CHECK-NEXT: vsll.vi v10, v8, 24 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: lui a0, 61681 ; CHECK-NEXT: addi a0, a0, -241 ; CHECK-NEXT: vsll.vi v8, v8, 8 ; CHECK-NEXT: vor.vv v8, v10, v8 ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: vsrl.vi v9, v8, 4 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: lui a0, 209715 ; CHECK-NEXT: addi a0, a0, 819 ; CHECK-NEXT: vsll.vi v8, v8, 4 ; CHECK-NEXT: vor.vv v8, v9, v8 ; CHECK-NEXT: vsrl.vi v9, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: lui a0, 349525 ; CHECK-NEXT: addi a0, a0, 1365 ; CHECK-NEXT: vsll.vi v8, v8, 2 ; CHECK-NEXT: vor.vv v8, v9, v8 ; CHECK-NEXT: vsrl.vi v9, v8, 1 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vor.vv v8, v9, v8 ; CHECK-NEXT: ret ; ; CHECK-ZVBB-LABEL: vp_bitreverse_nxv2i32_unmasked: ; CHECK-ZVBB: # %bb.0: ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-ZVBB-NEXT: vbrev.v v8, v8 ; CHECK-ZVBB-NEXT: ret %v = call @llvm.vp.bitreverse.nxv2i32( %va, splat (i1 true), i32 %evl) ret %v } declare @llvm.vp.bitreverse.nxv4i32(, , i32) define @vp_bitreverse_nxv4i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsrl.vi v10, v8, 8, v0.t ; CHECK-NEXT: lui a0, 16 ; CHECK-NEXT: addi a0, a0, -256 ; CHECK-NEXT: vand.vx v10, v10, a0, v0.t ; CHECK-NEXT: vsrl.vi v12, v8, 24, v0.t ; CHECK-NEXT: vor.vv v10, v10, v12, v0.t ; CHECK-NEXT: vand.vx v12, v8, a0, v0.t ; CHECK-NEXT: lui a0, 61681 ; CHECK-NEXT: addi a0, a0, -241 ; CHECK-NEXT: vsll.vi v12, v12, 8, v0.t ; CHECK-NEXT: vsll.vi v8, v8, 24, v0.t ; CHECK-NEXT: vor.vv v8, v8, v12, v0.t ; CHECK-NEXT: vor.vv v8, v8, v10, v0.t ; CHECK-NEXT: vsrl.vi v10, v8, 4, v0.t ; CHECK-NEXT: vand.vx v10, v10, a0, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: lui a0, 209715 ; CHECK-NEXT: addi a0, a0, 819 ; CHECK-NEXT: vsll.vi v8, v8, 4, v0.t ; CHECK-NEXT: vor.vv v8, v10, v8, v0.t ; CHECK-NEXT: vsrl.vi v10, v8, 2, v0.t ; CHECK-NEXT: vand.vx v10, v10, a0, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: lui a0, 349525 ; CHECK-NEXT: addi a0, a0, 1365 ; CHECK-NEXT: vsll.vi v8, v8, 2, v0.t ; CHECK-NEXT: vor.vv v8, v10, v8, v0.t ; CHECK-NEXT: vsrl.vi v10, v8, 1, v0.t ; CHECK-NEXT: vand.vx v10, v10, a0, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t ; CHECK-NEXT: vor.vv v8, v10, v8, v0.t ; CHECK-NEXT: ret ; ; CHECK-ZVBB-LABEL: vp_bitreverse_nxv4i32: ; CHECK-ZVBB: # %bb.0: ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-ZVBB-NEXT: vbrev.v v8, v8, v0.t ; CHECK-ZVBB-NEXT: ret %v = call @llvm.vp.bitreverse.nxv4i32( %va, %m, i32 %evl) ret %v } define @vp_bitreverse_nxv4i32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv4i32_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsrl.vi v10, v8, 8 ; CHECK-NEXT: lui a0, 16 ; CHECK-NEXT: vsrl.vi v12, v8, 24 ; CHECK-NEXT: addi a0, a0, -256 ; CHECK-NEXT: vand.vx v10, v10, a0 ; CHECK-NEXT: vor.vv v10, v10, v12 ; CHECK-NEXT: vsll.vi v12, v8, 24 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: lui a0, 61681 ; CHECK-NEXT: addi a0, a0, -241 ; CHECK-NEXT: vsll.vi v8, v8, 8 ; CHECK-NEXT: vor.vv v8, v12, v8 ; CHECK-NEXT: vor.vv v8, v8, v10 ; CHECK-NEXT: vsrl.vi v10, v8, 4 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vand.vx v10, v10, a0 ; CHECK-NEXT: lui a0, 209715 ; CHECK-NEXT: addi a0, a0, 819 ; CHECK-NEXT: vsll.vi v8, v8, 4 ; CHECK-NEXT: vor.vv v8, v10, v8 ; CHECK-NEXT: vsrl.vi v10, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vand.vx v10, v10, a0 ; CHECK-NEXT: lui a0, 349525 ; CHECK-NEXT: addi a0, a0, 1365 ; CHECK-NEXT: vsll.vi v8, v8, 2 ; CHECK-NEXT: vor.vv v8, v10, v8 ; CHECK-NEXT: vsrl.vi v10, v8, 1 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vand.vx v10, v10, a0 ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vor.vv v8, v10, v8 ; CHECK-NEXT: ret ; ; CHECK-ZVBB-LABEL: vp_bitreverse_nxv4i32_unmasked: ; CHECK-ZVBB: # %bb.0: ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-ZVBB-NEXT: vbrev.v v8, v8 ; CHECK-ZVBB-NEXT: ret %v = call @llvm.vp.bitreverse.nxv4i32( %va, splat (i1 true), i32 %evl) ret %v } declare @llvm.vp.bitreverse.nxv8i32(, , i32) define @vp_bitreverse_nxv8i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv8i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsrl.vi v12, v8, 8, v0.t ; CHECK-NEXT: lui a0, 16 ; CHECK-NEXT: addi a0, a0, -256 ; CHECK-NEXT: vand.vx v12, v12, a0, v0.t ; CHECK-NEXT: vsrl.vi v16, v8, 24, v0.t ; CHECK-NEXT: vor.vv v12, v12, v16, v0.t ; CHECK-NEXT: vand.vx v16, v8, a0, v0.t ; CHECK-NEXT: lui a0, 61681 ; CHECK-NEXT: addi a0, a0, -241 ; CHECK-NEXT: vsll.vi v16, v16, 8, v0.t ; CHECK-NEXT: vsll.vi v8, v8, 24, v0.t ; CHECK-NEXT: vor.vv v8, v8, v16, v0.t ; CHECK-NEXT: vor.vv v8, v8, v12, v0.t ; CHECK-NEXT: vsrl.vi v12, v8, 4, v0.t ; CHECK-NEXT: vand.vx v12, v12, a0, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: lui a0, 209715 ; CHECK-NEXT: addi a0, a0, 819 ; CHECK-NEXT: vsll.vi v8, v8, 4, v0.t ; CHECK-NEXT: vor.vv v8, v12, v8, v0.t ; CHECK-NEXT: vsrl.vi v12, v8, 2, v0.t ; CHECK-NEXT: vand.vx v12, v12, a0, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: lui a0, 349525 ; CHECK-NEXT: addi a0, a0, 1365 ; CHECK-NEXT: vsll.vi v8, v8, 2, v0.t ; CHECK-NEXT: vor.vv v8, v12, v8, v0.t ; CHECK-NEXT: vsrl.vi v12, v8, 1, v0.t ; CHECK-NEXT: vand.vx v12, v12, a0, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t ; CHECK-NEXT: vor.vv v8, v12, v8, v0.t ; CHECK-NEXT: ret ; ; CHECK-ZVBB-LABEL: vp_bitreverse_nxv8i32: ; CHECK-ZVBB: # %bb.0: ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-ZVBB-NEXT: vbrev.v v8, v8, v0.t ; CHECK-ZVBB-NEXT: ret %v = call @llvm.vp.bitreverse.nxv8i32( %va, %m, i32 %evl) ret %v } define @vp_bitreverse_nxv8i32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv8i32_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsrl.vi v12, v8, 8 ; CHECK-NEXT: lui a0, 16 ; CHECK-NEXT: vsrl.vi v16, v8, 24 ; CHECK-NEXT: addi a0, a0, -256 ; CHECK-NEXT: vand.vx v12, v12, a0 ; CHECK-NEXT: vor.vv v12, v12, v16 ; CHECK-NEXT: vsll.vi v16, v8, 24 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: lui a0, 61681 ; CHECK-NEXT: addi a0, a0, -241 ; CHECK-NEXT: vsll.vi v8, v8, 8 ; CHECK-NEXT: vor.vv v8, v16, v8 ; CHECK-NEXT: vor.vv v8, v8, v12 ; CHECK-NEXT: vsrl.vi v12, v8, 4 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vand.vx v12, v12, a0 ; CHECK-NEXT: lui a0, 209715 ; CHECK-NEXT: addi a0, a0, 819 ; CHECK-NEXT: vsll.vi v8, v8, 4 ; CHECK-NEXT: vor.vv v8, v12, v8 ; CHECK-NEXT: vsrl.vi v12, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vand.vx v12, v12, a0 ; CHECK-NEXT: lui a0, 349525 ; CHECK-NEXT: addi a0, a0, 1365 ; CHECK-NEXT: vsll.vi v8, v8, 2 ; CHECK-NEXT: vor.vv v8, v12, v8 ; CHECK-NEXT: vsrl.vi v12, v8, 1 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vand.vx v12, v12, a0 ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vor.vv v8, v12, v8 ; CHECK-NEXT: ret ; ; CHECK-ZVBB-LABEL: vp_bitreverse_nxv8i32_unmasked: ; CHECK-ZVBB: # %bb.0: ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-ZVBB-NEXT: vbrev.v v8, v8 ; CHECK-ZVBB-NEXT: ret %v = call @llvm.vp.bitreverse.nxv8i32( %va, splat (i1 true), i32 %evl) ret %v } declare @llvm.vp.bitreverse.nxv16i32(, , i32) define @vp_bitreverse_nxv16i32( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv16i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vsrl.vi v16, v8, 8, v0.t ; CHECK-NEXT: lui a0, 16 ; CHECK-NEXT: addi a0, a0, -256 ; CHECK-NEXT: vand.vx v16, v16, a0, v0.t ; CHECK-NEXT: vsrl.vi v24, v8, 24, v0.t ; CHECK-NEXT: vor.vv v16, v16, v24, v0.t ; CHECK-NEXT: vand.vx v24, v8, a0, v0.t ; CHECK-NEXT: lui a0, 61681 ; CHECK-NEXT: addi a0, a0, -241 ; CHECK-NEXT: vsll.vi v24, v24, 8, v0.t ; CHECK-NEXT: vsll.vi v8, v8, 24, v0.t ; CHECK-NEXT: vor.vv v8, v8, v24, v0.t ; CHECK-NEXT: vor.vv v16, v8, v16, v0.t ; CHECK-NEXT: vsrl.vi v8, v16, 4, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: vand.vx v16, v16, a0, v0.t ; CHECK-NEXT: lui a0, 209715 ; CHECK-NEXT: addi a0, a0, 819 ; CHECK-NEXT: vsll.vi v16, v16, 4, v0.t ; CHECK-NEXT: vor.vv v16, v8, v16, v0.t ; CHECK-NEXT: vsrl.vi v8, v16, 2, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: vand.vx v16, v16, a0, v0.t ; CHECK-NEXT: lui a0, 349525 ; CHECK-NEXT: addi a0, a0, 1365 ; CHECK-NEXT: vsll.vi v16, v16, 2, v0.t ; CHECK-NEXT: vor.vv v8, v8, v16, v0.t ; CHECK-NEXT: vsrl.vi v16, v8, 1, v0.t ; CHECK-NEXT: vand.vx v16, v16, a0, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t ; CHECK-NEXT: vor.vv v8, v16, v8, v0.t ; CHECK-NEXT: ret ; ; CHECK-ZVBB-LABEL: vp_bitreverse_nxv16i32: ; CHECK-ZVBB: # %bb.0: ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-ZVBB-NEXT: vbrev.v v8, v8, v0.t ; CHECK-ZVBB-NEXT: ret %v = call @llvm.vp.bitreverse.nxv16i32( %va, %m, i32 %evl) ret %v } define @vp_bitreverse_nxv16i32_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv16i32_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vsrl.vi v16, v8, 8 ; CHECK-NEXT: lui a0, 16 ; CHECK-NEXT: vsrl.vi v24, v8, 24 ; CHECK-NEXT: addi a0, a0, -256 ; CHECK-NEXT: vand.vx v16, v16, a0 ; CHECK-NEXT: vor.vv v16, v16, v24 ; CHECK-NEXT: vsll.vi v24, v8, 24 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: lui a0, 61681 ; CHECK-NEXT: addi a0, a0, -241 ; CHECK-NEXT: vsll.vi v8, v8, 8 ; CHECK-NEXT: vor.vv v8, v24, v8 ; CHECK-NEXT: vor.vv v8, v8, v16 ; CHECK-NEXT: vsrl.vi v16, v8, 4 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vand.vx v16, v16, a0 ; CHECK-NEXT: lui a0, 209715 ; CHECK-NEXT: addi a0, a0, 819 ; CHECK-NEXT: vsll.vi v8, v8, 4 ; CHECK-NEXT: vor.vv v8, v16, v8 ; CHECK-NEXT: vsrl.vi v16, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vand.vx v16, v16, a0 ; CHECK-NEXT: lui a0, 349525 ; CHECK-NEXT: addi a0, a0, 1365 ; CHECK-NEXT: vsll.vi v8, v8, 2 ; CHECK-NEXT: vor.vv v8, v16, v8 ; CHECK-NEXT: vsrl.vi v16, v8, 1 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vand.vx v16, v16, a0 ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vor.vv v8, v16, v8 ; CHECK-NEXT: ret ; ; CHECK-ZVBB-LABEL: vp_bitreverse_nxv16i32_unmasked: ; CHECK-ZVBB: # %bb.0: ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-ZVBB-NEXT: vbrev.v v8, v8 ; CHECK-ZVBB-NEXT: ret %v = call @llvm.vp.bitreverse.nxv16i32( %va, splat (i1 true), i32 %evl) ret %v } declare @llvm.vp.bitreverse.nxv1i64(, , i32) define @vp_bitreverse_nxv1i64( %va, %m, i32 zeroext %evl) { ; RV32-LABEL: vp_bitreverse_nxv1i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: lui a4, 1044480 ; RV32-NEXT: li a3, 56 ; RV32-NEXT: lui a5, 16 ; RV32-NEXT: li a2, 40 ; RV32-NEXT: lui a1, 4080 ; RV32-NEXT: addi a6, sp, 8 ; RV32-NEXT: sw a4, 8(sp) ; RV32-NEXT: sw zero, 12(sp) ; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; RV32-NEXT: vlse64.v v9, (a6), zero ; RV32-NEXT: lui a4, 61681 ; RV32-NEXT: vsll.vx v10, v8, a3, v0.t ; RV32-NEXT: addi a5, a5, -256 ; RV32-NEXT: vand.vx v11, v8, a5, v0.t ; RV32-NEXT: vsll.vx v11, v11, a2, v0.t ; RV32-NEXT: vor.vv v10, v10, v11, v0.t ; RV32-NEXT: vand.vx v11, v8, a1, v0.t ; RV32-NEXT: vsll.vi v11, v11, 24, v0.t ; RV32-NEXT: vand.vv v12, v8, v9, v0.t ; RV32-NEXT: vsll.vi v12, v12, 8, v0.t ; RV32-NEXT: vor.vv v11, v11, v12, v0.t ; RV32-NEXT: vor.vv v10, v10, v11, v0.t ; RV32-NEXT: vsrl.vx v11, v8, a3, v0.t ; RV32-NEXT: lui a3, 209715 ; RV32-NEXT: vsrl.vx v12, v8, a2, v0.t ; RV32-NEXT: lui a2, 349525 ; RV32-NEXT: addi a4, a4, -241 ; RV32-NEXT: addi a3, a3, 819 ; RV32-NEXT: addi a2, a2, 1365 ; RV32-NEXT: vand.vx v12, v12, a5, v0.t ; RV32-NEXT: vor.vv v11, v12, v11, v0.t ; RV32-NEXT: vsrl.vi v12, v8, 24, v0.t ; RV32-NEXT: vand.vx v12, v12, a1, v0.t ; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t ; RV32-NEXT: vand.vv v8, v8, v9, v0.t ; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; RV32-NEXT: vmv.v.x v9, a4 ; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; RV32-NEXT: vor.vv v8, v8, v12, v0.t ; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; RV32-NEXT: vmv.v.x v12, a3 ; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; RV32-NEXT: vor.vv v8, v8, v11, v0.t ; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; RV32-NEXT: vmv.v.x v11, a2 ; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; RV32-NEXT: vor.vv v8, v10, v8, v0.t ; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t ; RV32-NEXT: vand.vv v10, v10, v9, v0.t ; RV32-NEXT: vand.vv v8, v8, v9, v0.t ; RV32-NEXT: vsll.vi v8, v8, 4, v0.t ; RV32-NEXT: vor.vv v8, v10, v8, v0.t ; RV32-NEXT: vsrl.vi v9, v8, 2, v0.t ; RV32-NEXT: vand.vv v9, v9, v12, v0.t ; RV32-NEXT: vand.vv v8, v8, v12, v0.t ; RV32-NEXT: vsll.vi v8, v8, 2, v0.t ; RV32-NEXT: vor.vv v8, v9, v8, v0.t ; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t ; RV32-NEXT: vand.vv v9, v9, v11, v0.t ; RV32-NEXT: vand.vv v8, v8, v11, v0.t ; RV32-NEXT: vsll.vi v8, v8, 1, v0.t ; RV32-NEXT: vor.vv v8, v9, v8, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vp_bitreverse_nxv1i64: ; RV64: # %bb.0: ; RV64-NEXT: lui a1, 4080 ; RV64-NEXT: li a3, 255 ; RV64-NEXT: li a2, 56 ; RV64-NEXT: lui a4, 16 ; RV64-NEXT: lui a5, 61681 ; RV64-NEXT: lui a6, 209715 ; RV64-NEXT: lui a7, 349525 ; RV64-NEXT: addiw a5, a5, -241 ; RV64-NEXT: addiw a6, a6, 819 ; RV64-NEXT: addiw a7, a7, 1365 ; RV64-NEXT: slli t0, a5, 32 ; RV64-NEXT: add t0, a5, t0 ; RV64-NEXT: slli a5, a6, 32 ; RV64-NEXT: add a6, a6, a5 ; RV64-NEXT: slli a5, a7, 32 ; RV64-NEXT: add a5, a7, a5 ; RV64-NEXT: li a7, 40 ; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; RV64-NEXT: vand.vx v9, v8, a1, v0.t ; RV64-NEXT: slli a3, a3, 24 ; RV64-NEXT: addiw a0, a4, -256 ; RV64-NEXT: vsll.vi v9, v9, 24, v0.t ; RV64-NEXT: vand.vx v10, v8, a3, v0.t ; RV64-NEXT: vsll.vi v10, v10, 8, v0.t ; RV64-NEXT: vor.vv v9, v9, v10, v0.t ; RV64-NEXT: vsll.vx v10, v8, a2, v0.t ; RV64-NEXT: vand.vx v11, v8, a0, v0.t ; RV64-NEXT: vsll.vx v11, v11, a7, v0.t ; RV64-NEXT: vor.vv v10, v10, v11, v0.t ; RV64-NEXT: vor.vv v9, v10, v9, v0.t ; RV64-NEXT: vsrl.vx v10, v8, a2, v0.t ; RV64-NEXT: vsrl.vx v11, v8, a7, v0.t ; RV64-NEXT: vand.vx v11, v11, a0, v0.t ; RV64-NEXT: vor.vv v10, v11, v10, v0.t ; RV64-NEXT: vsrl.vi v11, v8, 24, v0.t ; RV64-NEXT: vand.vx v11, v11, a1, v0.t ; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t ; RV64-NEXT: vand.vx v8, v8, a3, v0.t ; RV64-NEXT: vor.vv v8, v8, v11, v0.t ; RV64-NEXT: vor.vv v8, v8, v10, v0.t ; RV64-NEXT: vor.vv v8, v9, v8, v0.t ; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t ; RV64-NEXT: vand.vx v9, v9, t0, v0.t ; RV64-NEXT: vand.vx v8, v8, t0, v0.t ; RV64-NEXT: vsll.vi v8, v8, 4, v0.t ; RV64-NEXT: vor.vv v8, v9, v8, v0.t ; RV64-NEXT: vsrl.vi v9, v8, 2, v0.t ; RV64-NEXT: vand.vx v9, v9, a6, v0.t ; RV64-NEXT: vand.vx v8, v8, a6, v0.t ; RV64-NEXT: vsll.vi v8, v8, 2, v0.t ; RV64-NEXT: vor.vv v8, v9, v8, v0.t ; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t ; RV64-NEXT: vand.vx v9, v9, a5, v0.t ; RV64-NEXT: vand.vx v8, v8, a5, v0.t ; RV64-NEXT: vsll.vi v8, v8, 1, v0.t ; RV64-NEXT: vor.vv v8, v9, v8, v0.t ; RV64-NEXT: ret ; ; CHECK-ZVBB-LABEL: vp_bitreverse_nxv1i64: ; CHECK-ZVBB: # %bb.0: ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-ZVBB-NEXT: vbrev.v v8, v8, v0.t ; CHECK-ZVBB-NEXT: ret %v = call @llvm.vp.bitreverse.nxv1i64( %va, %m, i32 %evl) ret %v } define @vp_bitreverse_nxv1i64_unmasked( %va, i32 zeroext %evl) { ; RV32-LABEL: vp_bitreverse_nxv1i64_unmasked: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: lui a1, 1044480 ; RV32-NEXT: li a2, 56 ; RV32-NEXT: lui a3, 16 ; RV32-NEXT: li a4, 40 ; RV32-NEXT: lui a5, 4080 ; RV32-NEXT: addi a6, sp, 8 ; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; RV32-NEXT: vsrl.vi v9, v8, 24 ; RV32-NEXT: sw a1, 8(sp) ; RV32-NEXT: sw zero, 12(sp) ; RV32-NEXT: vsll.vx v10, v8, a2 ; RV32-NEXT: addi a1, a3, -256 ; RV32-NEXT: vsrl.vx v11, v8, a2 ; RV32-NEXT: vsrl.vx v12, v8, a4 ; RV32-NEXT: vand.vx v13, v8, a1 ; RV32-NEXT: vand.vx v12, v12, a1 ; RV32-NEXT: vor.vv v11, v12, v11 ; RV32-NEXT: vlse64.v v12, (a6), zero ; RV32-NEXT: vsll.vx v13, v13, a4 ; RV32-NEXT: vor.vv v10, v10, v13 ; RV32-NEXT: vsrl.vi v13, v8, 8 ; RV32-NEXT: vand.vx v9, v9, a5 ; RV32-NEXT: vand.vv v13, v13, v12 ; RV32-NEXT: vor.vv v9, v13, v9 ; RV32-NEXT: lui a1, 61681 ; RV32-NEXT: lui a2, 209715 ; RV32-NEXT: lui a3, 349525 ; RV32-NEXT: vand.vv v12, v8, v12 ; RV32-NEXT: vand.vx v8, v8, a5 ; RV32-NEXT: addi a1, a1, -241 ; RV32-NEXT: addi a2, a2, 819 ; RV32-NEXT: addi a3, a3, 1365 ; RV32-NEXT: vsll.vi v8, v8, 24 ; RV32-NEXT: vsll.vi v12, v12, 8 ; RV32-NEXT: vor.vv v8, v8, v12 ; RV32-NEXT: vsetvli a4, zero, e32, m1, ta, ma ; RV32-NEXT: vmv.v.x v12, a1 ; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; RV32-NEXT: vor.vv v9, v9, v11 ; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; RV32-NEXT: vmv.v.x v11, a2 ; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; RV32-NEXT: vor.vv v8, v10, v8 ; RV32-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; RV32-NEXT: vmv.v.x v10, a3 ; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; RV32-NEXT: vor.vv v8, v8, v9 ; RV32-NEXT: vsrl.vi v9, v8, 4 ; RV32-NEXT: vand.vv v8, v8, v12 ; RV32-NEXT: vand.vv v9, v9, v12 ; RV32-NEXT: vsll.vi v8, v8, 4 ; RV32-NEXT: vor.vv v8, v9, v8 ; RV32-NEXT: vsrl.vi v9, v8, 2 ; RV32-NEXT: vand.vv v8, v8, v11 ; RV32-NEXT: vand.vv v9, v9, v11 ; RV32-NEXT: vsll.vi v8, v8, 2 ; RV32-NEXT: vor.vv v8, v9, v8 ; RV32-NEXT: vsrl.vi v9, v8, 1 ; RV32-NEXT: vand.vv v8, v8, v10 ; RV32-NEXT: vand.vv v9, v9, v10 ; RV32-NEXT: vadd.vv v8, v8, v8 ; RV32-NEXT: vor.vv v8, v9, v8 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vp_bitreverse_nxv1i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: lui a1, 4080 ; RV64-NEXT: li a2, 255 ; RV64-NEXT: li a3, 56 ; RV64-NEXT: lui a4, 16 ; RV64-NEXT: li a5, 40 ; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; RV64-NEXT: vsrl.vi v9, v8, 24 ; RV64-NEXT: vsrl.vi v10, v8, 8 ; RV64-NEXT: addiw a0, a4, -256 ; RV64-NEXT: vsrl.vx v11, v8, a3 ; RV64-NEXT: vsrl.vx v12, v8, a5 ; RV64-NEXT: vand.vx v12, v12, a0 ; RV64-NEXT: vor.vv v11, v12, v11 ; RV64-NEXT: vand.vx v12, v8, a1 ; RV64-NEXT: slli a2, a2, 24 ; RV64-NEXT: vand.vx v9, v9, a1 ; RV64-NEXT: vsll.vi v12, v12, 24 ; RV64-NEXT: vand.vx v10, v10, a2 ; RV64-NEXT: vor.vv v9, v10, v9 ; RV64-NEXT: vand.vx v10, v8, a2 ; RV64-NEXT: vsll.vi v10, v10, 8 ; RV64-NEXT: vor.vv v10, v12, v10 ; RV64-NEXT: vsll.vx v12, v8, a3 ; RV64-NEXT: vand.vx v8, v8, a0 ; RV64-NEXT: vsll.vx v8, v8, a5 ; RV64-NEXT: vor.vv v8, v12, v8 ; RV64-NEXT: lui a0, 61681 ; RV64-NEXT: lui a1, 209715 ; RV64-NEXT: lui a2, 349525 ; RV64-NEXT: addiw a0, a0, -241 ; RV64-NEXT: addiw a1, a1, 819 ; RV64-NEXT: addiw a2, a2, 1365 ; RV64-NEXT: slli a3, a0, 32 ; RV64-NEXT: slli a4, a1, 32 ; RV64-NEXT: add a0, a0, a3 ; RV64-NEXT: slli a3, a2, 32 ; RV64-NEXT: add a1, a1, a4 ; RV64-NEXT: add a2, a2, a3 ; RV64-NEXT: vor.vv v9, v9, v11 ; RV64-NEXT: vor.vv v8, v8, v10 ; RV64-NEXT: vor.vv v8, v8, v9 ; RV64-NEXT: vsrl.vi v9, v8, 4 ; RV64-NEXT: vand.vx v8, v8, a0 ; RV64-NEXT: vand.vx v9, v9, a0 ; RV64-NEXT: vsll.vi v8, v8, 4 ; RV64-NEXT: vor.vv v8, v9, v8 ; RV64-NEXT: vsrl.vi v9, v8, 2 ; RV64-NEXT: vand.vx v8, v8, a1 ; RV64-NEXT: vand.vx v9, v9, a1 ; RV64-NEXT: vsll.vi v8, v8, 2 ; RV64-NEXT: vor.vv v8, v9, v8 ; RV64-NEXT: vsrl.vi v9, v8, 1 ; RV64-NEXT: vand.vx v8, v8, a2 ; RV64-NEXT: vand.vx v9, v9, a2 ; RV64-NEXT: vadd.vv v8, v8, v8 ; RV64-NEXT: vor.vv v8, v9, v8 ; RV64-NEXT: ret ; ; CHECK-ZVBB-LABEL: vp_bitreverse_nxv1i64_unmasked: ; CHECK-ZVBB: # %bb.0: ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-ZVBB-NEXT: vbrev.v v8, v8 ; CHECK-ZVBB-NEXT: ret %v = call @llvm.vp.bitreverse.nxv1i64( %va, splat (i1 true), i32 %evl) ret %v } declare @llvm.vp.bitreverse.nxv2i64(, , i32) define @vp_bitreverse_nxv2i64( %va, %m, i32 zeroext %evl) { ; RV32-LABEL: vp_bitreverse_nxv2i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: lui a4, 1044480 ; RV32-NEXT: li a3, 56 ; RV32-NEXT: lui a5, 16 ; RV32-NEXT: li a2, 40 ; RV32-NEXT: lui a1, 4080 ; RV32-NEXT: addi a6, sp, 8 ; RV32-NEXT: sw a4, 8(sp) ; RV32-NEXT: sw zero, 12(sp) ; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; RV32-NEXT: vlse64.v v10, (a6), zero ; RV32-NEXT: lui a4, 61681 ; RV32-NEXT: vsll.vx v12, v8, a3, v0.t ; RV32-NEXT: addi a5, a5, -256 ; RV32-NEXT: vand.vx v14, v8, a5, v0.t ; RV32-NEXT: vsll.vx v14, v14, a2, v0.t ; RV32-NEXT: vor.vv v12, v12, v14, v0.t ; RV32-NEXT: vand.vx v14, v8, a1, v0.t ; RV32-NEXT: vsll.vi v14, v14, 24, v0.t ; RV32-NEXT: vand.vv v16, v8, v10, v0.t ; RV32-NEXT: vsll.vi v16, v16, 8, v0.t ; RV32-NEXT: vor.vv v14, v14, v16, v0.t ; RV32-NEXT: vor.vv v12, v12, v14, v0.t ; RV32-NEXT: vsrl.vx v14, v8, a3, v0.t ; RV32-NEXT: lui a3, 209715 ; RV32-NEXT: vsrl.vx v16, v8, a2, v0.t ; RV32-NEXT: lui a2, 349525 ; RV32-NEXT: addi a4, a4, -241 ; RV32-NEXT: addi a3, a3, 819 ; RV32-NEXT: addi a2, a2, 1365 ; RV32-NEXT: vand.vx v16, v16, a5, v0.t ; RV32-NEXT: vor.vv v14, v16, v14, v0.t ; RV32-NEXT: vsrl.vi v16, v8, 24, v0.t ; RV32-NEXT: vand.vx v16, v16, a1, v0.t ; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t ; RV32-NEXT: vand.vv v8, v8, v10, v0.t ; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; RV32-NEXT: vmv.v.x v10, a4 ; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; RV32-NEXT: vor.vv v8, v8, v16, v0.t ; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; RV32-NEXT: vmv.v.x v16, a3 ; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; RV32-NEXT: vor.vv v8, v8, v14, v0.t ; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; RV32-NEXT: vmv.v.x v14, a2 ; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; RV32-NEXT: vor.vv v8, v12, v8, v0.t ; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t ; RV32-NEXT: vand.vv v12, v12, v10, v0.t ; RV32-NEXT: vand.vv v8, v8, v10, v0.t ; RV32-NEXT: vsll.vi v8, v8, 4, v0.t ; RV32-NEXT: vor.vv v8, v12, v8, v0.t ; RV32-NEXT: vsrl.vi v10, v8, 2, v0.t ; RV32-NEXT: vand.vv v10, v10, v16, v0.t ; RV32-NEXT: vand.vv v8, v8, v16, v0.t ; RV32-NEXT: vsll.vi v8, v8, 2, v0.t ; RV32-NEXT: vor.vv v8, v10, v8, v0.t ; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t ; RV32-NEXT: vand.vv v10, v10, v14, v0.t ; RV32-NEXT: vand.vv v8, v8, v14, v0.t ; RV32-NEXT: vsll.vi v8, v8, 1, v0.t ; RV32-NEXT: vor.vv v8, v10, v8, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vp_bitreverse_nxv2i64: ; RV64: # %bb.0: ; RV64-NEXT: lui a1, 4080 ; RV64-NEXT: li a3, 255 ; RV64-NEXT: li a2, 56 ; RV64-NEXT: lui a4, 16 ; RV64-NEXT: lui a5, 61681 ; RV64-NEXT: lui a6, 209715 ; RV64-NEXT: lui a7, 349525 ; RV64-NEXT: addiw a5, a5, -241 ; RV64-NEXT: addiw a6, a6, 819 ; RV64-NEXT: addiw a7, a7, 1365 ; RV64-NEXT: slli t0, a5, 32 ; RV64-NEXT: add t0, a5, t0 ; RV64-NEXT: slli a5, a6, 32 ; RV64-NEXT: add a6, a6, a5 ; RV64-NEXT: slli a5, a7, 32 ; RV64-NEXT: add a5, a7, a5 ; RV64-NEXT: li a7, 40 ; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; RV64-NEXT: vand.vx v10, v8, a1, v0.t ; RV64-NEXT: slli a3, a3, 24 ; RV64-NEXT: addiw a0, a4, -256 ; RV64-NEXT: vsll.vi v10, v10, 24, v0.t ; RV64-NEXT: vand.vx v12, v8, a3, v0.t ; RV64-NEXT: vsll.vi v12, v12, 8, v0.t ; RV64-NEXT: vor.vv v10, v10, v12, v0.t ; RV64-NEXT: vsll.vx v12, v8, a2, v0.t ; RV64-NEXT: vand.vx v14, v8, a0, v0.t ; RV64-NEXT: vsll.vx v14, v14, a7, v0.t ; RV64-NEXT: vor.vv v12, v12, v14, v0.t ; RV64-NEXT: vor.vv v10, v12, v10, v0.t ; RV64-NEXT: vsrl.vx v12, v8, a2, v0.t ; RV64-NEXT: vsrl.vx v14, v8, a7, v0.t ; RV64-NEXT: vand.vx v14, v14, a0, v0.t ; RV64-NEXT: vor.vv v12, v14, v12, v0.t ; RV64-NEXT: vsrl.vi v14, v8, 24, v0.t ; RV64-NEXT: vand.vx v14, v14, a1, v0.t ; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t ; RV64-NEXT: vand.vx v8, v8, a3, v0.t ; RV64-NEXT: vor.vv v8, v8, v14, v0.t ; RV64-NEXT: vor.vv v8, v8, v12, v0.t ; RV64-NEXT: vor.vv v8, v10, v8, v0.t ; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t ; RV64-NEXT: vand.vx v10, v10, t0, v0.t ; RV64-NEXT: vand.vx v8, v8, t0, v0.t ; RV64-NEXT: vsll.vi v8, v8, 4, v0.t ; RV64-NEXT: vor.vv v8, v10, v8, v0.t ; RV64-NEXT: vsrl.vi v10, v8, 2, v0.t ; RV64-NEXT: vand.vx v10, v10, a6, v0.t ; RV64-NEXT: vand.vx v8, v8, a6, v0.t ; RV64-NEXT: vsll.vi v8, v8, 2, v0.t ; RV64-NEXT: vor.vv v8, v10, v8, v0.t ; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t ; RV64-NEXT: vand.vx v10, v10, a5, v0.t ; RV64-NEXT: vand.vx v8, v8, a5, v0.t ; RV64-NEXT: vsll.vi v8, v8, 1, v0.t ; RV64-NEXT: vor.vv v8, v10, v8, v0.t ; RV64-NEXT: ret ; ; CHECK-ZVBB-LABEL: vp_bitreverse_nxv2i64: ; CHECK-ZVBB: # %bb.0: ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-ZVBB-NEXT: vbrev.v v8, v8, v0.t ; CHECK-ZVBB-NEXT: ret %v = call @llvm.vp.bitreverse.nxv2i64( %va, %m, i32 %evl) ret %v } define @vp_bitreverse_nxv2i64_unmasked( %va, i32 zeroext %evl) { ; RV32-LABEL: vp_bitreverse_nxv2i64_unmasked: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: lui a1, 1044480 ; RV32-NEXT: li a2, 56 ; RV32-NEXT: lui a3, 16 ; RV32-NEXT: li a4, 40 ; RV32-NEXT: lui a5, 4080 ; RV32-NEXT: addi a6, sp, 8 ; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; RV32-NEXT: vsrl.vi v14, v8, 24 ; RV32-NEXT: sw a1, 8(sp) ; RV32-NEXT: sw zero, 12(sp) ; RV32-NEXT: vsll.vx v12, v8, a2 ; RV32-NEXT: addi a1, a3, -256 ; RV32-NEXT: vsrl.vx v10, v8, a2 ; RV32-NEXT: vsrl.vx v16, v8, a4 ; RV32-NEXT: vand.vx v18, v8, a1 ; RV32-NEXT: vand.vx v16, v16, a1 ; RV32-NEXT: vor.vv v10, v16, v10 ; RV32-NEXT: vlse64.v v16, (a6), zero ; RV32-NEXT: vsll.vx v18, v18, a4 ; RV32-NEXT: vor.vv v12, v12, v18 ; RV32-NEXT: vsrl.vi v18, v8, 8 ; RV32-NEXT: vand.vx v14, v14, a5 ; RV32-NEXT: vand.vv v18, v18, v16 ; RV32-NEXT: vor.vv v14, v18, v14 ; RV32-NEXT: lui a1, 61681 ; RV32-NEXT: lui a2, 209715 ; RV32-NEXT: lui a3, 349525 ; RV32-NEXT: vand.vv v16, v8, v16 ; RV32-NEXT: vand.vx v8, v8, a5 ; RV32-NEXT: addi a1, a1, -241 ; RV32-NEXT: addi a2, a2, 819 ; RV32-NEXT: addi a3, a3, 1365 ; RV32-NEXT: vsll.vi v8, v8, 24 ; RV32-NEXT: vsll.vi v16, v16, 8 ; RV32-NEXT: vor.vv v8, v8, v16 ; RV32-NEXT: vsetvli a4, zero, e32, m2, ta, ma ; RV32-NEXT: vmv.v.x v16, a1 ; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; RV32-NEXT: vor.vv v10, v14, v10 ; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; RV32-NEXT: vmv.v.x v14, a2 ; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; RV32-NEXT: vor.vv v8, v12, v8 ; RV32-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; RV32-NEXT: vmv.v.x v12, a3 ; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; RV32-NEXT: vor.vv v8, v8, v10 ; RV32-NEXT: vsrl.vi v10, v8, 4 ; RV32-NEXT: vand.vv v8, v8, v16 ; RV32-NEXT: vand.vv v10, v10, v16 ; RV32-NEXT: vsll.vi v8, v8, 4 ; RV32-NEXT: vor.vv v8, v10, v8 ; RV32-NEXT: vsrl.vi v10, v8, 2 ; RV32-NEXT: vand.vv v8, v8, v14 ; RV32-NEXT: vand.vv v10, v10, v14 ; RV32-NEXT: vsll.vi v8, v8, 2 ; RV32-NEXT: vor.vv v8, v10, v8 ; RV32-NEXT: vsrl.vi v10, v8, 1 ; RV32-NEXT: vand.vv v8, v8, v12 ; RV32-NEXT: vand.vv v10, v10, v12 ; RV32-NEXT: vadd.vv v8, v8, v8 ; RV32-NEXT: vor.vv v8, v10, v8 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vp_bitreverse_nxv2i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: lui a1, 4080 ; RV64-NEXT: li a2, 255 ; RV64-NEXT: li a3, 56 ; RV64-NEXT: lui a4, 16 ; RV64-NEXT: li a5, 40 ; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; RV64-NEXT: vsrl.vi v12, v8, 24 ; RV64-NEXT: vsrl.vi v14, v8, 8 ; RV64-NEXT: addiw a0, a4, -256 ; RV64-NEXT: vsrl.vx v10, v8, a3 ; RV64-NEXT: vsrl.vx v16, v8, a5 ; RV64-NEXT: vand.vx v16, v16, a0 ; RV64-NEXT: vor.vv v10, v16, v10 ; RV64-NEXT: vand.vx v16, v8, a1 ; RV64-NEXT: slli a2, a2, 24 ; RV64-NEXT: vand.vx v12, v12, a1 ; RV64-NEXT: vsll.vi v16, v16, 24 ; RV64-NEXT: vand.vx v14, v14, a2 ; RV64-NEXT: vor.vv v12, v14, v12 ; RV64-NEXT: vand.vx v14, v8, a2 ; RV64-NEXT: vsll.vi v14, v14, 8 ; RV64-NEXT: vor.vv v14, v16, v14 ; RV64-NEXT: vsll.vx v16, v8, a3 ; RV64-NEXT: vand.vx v8, v8, a0 ; RV64-NEXT: vsll.vx v8, v8, a5 ; RV64-NEXT: vor.vv v8, v16, v8 ; RV64-NEXT: lui a0, 61681 ; RV64-NEXT: lui a1, 209715 ; RV64-NEXT: lui a2, 349525 ; RV64-NEXT: addiw a0, a0, -241 ; RV64-NEXT: addiw a1, a1, 819 ; RV64-NEXT: addiw a2, a2, 1365 ; RV64-NEXT: slli a3, a0, 32 ; RV64-NEXT: slli a4, a1, 32 ; RV64-NEXT: add a0, a0, a3 ; RV64-NEXT: slli a3, a2, 32 ; RV64-NEXT: add a1, a1, a4 ; RV64-NEXT: add a2, a2, a3 ; RV64-NEXT: vor.vv v10, v12, v10 ; RV64-NEXT: vor.vv v8, v8, v14 ; RV64-NEXT: vor.vv v8, v8, v10 ; RV64-NEXT: vsrl.vi v10, v8, 4 ; RV64-NEXT: vand.vx v8, v8, a0 ; RV64-NEXT: vand.vx v10, v10, a0 ; RV64-NEXT: vsll.vi v8, v8, 4 ; RV64-NEXT: vor.vv v8, v10, v8 ; RV64-NEXT: vsrl.vi v10, v8, 2 ; RV64-NEXT: vand.vx v8, v8, a1 ; RV64-NEXT: vand.vx v10, v10, a1 ; RV64-NEXT: vsll.vi v8, v8, 2 ; RV64-NEXT: vor.vv v8, v10, v8 ; RV64-NEXT: vsrl.vi v10, v8, 1 ; RV64-NEXT: vand.vx v8, v8, a2 ; RV64-NEXT: vand.vx v10, v10, a2 ; RV64-NEXT: vadd.vv v8, v8, v8 ; RV64-NEXT: vor.vv v8, v10, v8 ; RV64-NEXT: ret ; ; CHECK-ZVBB-LABEL: vp_bitreverse_nxv2i64_unmasked: ; CHECK-ZVBB: # %bb.0: ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-ZVBB-NEXT: vbrev.v v8, v8 ; CHECK-ZVBB-NEXT: ret %v = call @llvm.vp.bitreverse.nxv2i64( %va, splat (i1 true), i32 %evl) ret %v } declare @llvm.vp.bitreverse.nxv4i64(, , i32) define @vp_bitreverse_nxv4i64( %va, %m, i32 zeroext %evl) { ; RV32-LABEL: vp_bitreverse_nxv4i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: lui a4, 1044480 ; RV32-NEXT: li a3, 56 ; RV32-NEXT: lui a5, 16 ; RV32-NEXT: li a2, 40 ; RV32-NEXT: lui a1, 4080 ; RV32-NEXT: addi a6, sp, 8 ; RV32-NEXT: sw a4, 8(sp) ; RV32-NEXT: sw zero, 12(sp) ; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; RV32-NEXT: vlse64.v v12, (a6), zero ; RV32-NEXT: lui a4, 61681 ; RV32-NEXT: vsll.vx v16, v8, a3, v0.t ; RV32-NEXT: addi a5, a5, -256 ; RV32-NEXT: vand.vx v20, v8, a5, v0.t ; RV32-NEXT: vsll.vx v20, v20, a2, v0.t ; RV32-NEXT: vor.vv v16, v16, v20, v0.t ; RV32-NEXT: vand.vx v20, v8, a1, v0.t ; RV32-NEXT: vsll.vi v20, v20, 24, v0.t ; RV32-NEXT: vand.vv v24, v8, v12, v0.t ; RV32-NEXT: vsll.vi v24, v24, 8, v0.t ; RV32-NEXT: vor.vv v20, v20, v24, v0.t ; RV32-NEXT: vor.vv v16, v16, v20, v0.t ; RV32-NEXT: vsrl.vx v20, v8, a3, v0.t ; RV32-NEXT: lui a3, 209715 ; RV32-NEXT: vsrl.vx v24, v8, a2, v0.t ; RV32-NEXT: lui a2, 349525 ; RV32-NEXT: addi a4, a4, -241 ; RV32-NEXT: addi a3, a3, 819 ; RV32-NEXT: addi a2, a2, 1365 ; RV32-NEXT: vand.vx v24, v24, a5, v0.t ; RV32-NEXT: vor.vv v20, v24, v20, v0.t ; RV32-NEXT: vsrl.vi v24, v8, 24, v0.t ; RV32-NEXT: vand.vx v24, v24, a1, v0.t ; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t ; RV32-NEXT: vand.vv v8, v8, v12, v0.t ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vmv.v.x v28, a4 ; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; RV32-NEXT: vor.vv v8, v8, v24, v0.t ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vmv.v.x v12, a3 ; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; RV32-NEXT: vor.vv v20, v8, v20, v0.t ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vmv.v.x v8, a2 ; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; RV32-NEXT: vor.vv v16, v16, v20, v0.t ; RV32-NEXT: vsrl.vi v20, v16, 4, v0.t ; RV32-NEXT: vand.vv v20, v20, v28, v0.t ; RV32-NEXT: vand.vv v16, v16, v28, v0.t ; RV32-NEXT: vsll.vi v16, v16, 4, v0.t ; RV32-NEXT: vor.vv v16, v20, v16, v0.t ; RV32-NEXT: vsrl.vi v20, v16, 2, v0.t ; RV32-NEXT: vand.vv v20, v20, v12, v0.t ; RV32-NEXT: vand.vv v12, v16, v12, v0.t ; RV32-NEXT: vsll.vi v12, v12, 2, v0.t ; RV32-NEXT: vor.vv v12, v20, v12, v0.t ; RV32-NEXT: vsrl.vi v16, v12, 1, v0.t ; RV32-NEXT: vand.vv v16, v16, v8, v0.t ; RV32-NEXT: vand.vv v8, v12, v8, v0.t ; RV32-NEXT: vsll.vi v8, v8, 1, v0.t ; RV32-NEXT: vor.vv v8, v16, v8, v0.t ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vp_bitreverse_nxv4i64: ; RV64: # %bb.0: ; RV64-NEXT: lui a1, 4080 ; RV64-NEXT: li a3, 255 ; RV64-NEXT: li a2, 56 ; RV64-NEXT: lui a4, 16 ; RV64-NEXT: lui a5, 61681 ; RV64-NEXT: lui a6, 209715 ; RV64-NEXT: lui a7, 349525 ; RV64-NEXT: addiw a5, a5, -241 ; RV64-NEXT: addiw a6, a6, 819 ; RV64-NEXT: addiw a7, a7, 1365 ; RV64-NEXT: slli t0, a5, 32 ; RV64-NEXT: add t0, a5, t0 ; RV64-NEXT: slli a5, a6, 32 ; RV64-NEXT: add a6, a6, a5 ; RV64-NEXT: slli a5, a7, 32 ; RV64-NEXT: add a5, a7, a5 ; RV64-NEXT: li a7, 40 ; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; RV64-NEXT: vand.vx v12, v8, a1, v0.t ; RV64-NEXT: slli a3, a3, 24 ; RV64-NEXT: addiw a0, a4, -256 ; RV64-NEXT: vsll.vi v12, v12, 24, v0.t ; RV64-NEXT: vand.vx v16, v8, a3, v0.t ; RV64-NEXT: vsll.vi v16, v16, 8, v0.t ; RV64-NEXT: vor.vv v12, v12, v16, v0.t ; RV64-NEXT: vsll.vx v16, v8, a2, v0.t ; RV64-NEXT: vand.vx v20, v8, a0, v0.t ; RV64-NEXT: vsll.vx v20, v20, a7, v0.t ; RV64-NEXT: vor.vv v16, v16, v20, v0.t ; RV64-NEXT: vor.vv v12, v16, v12, v0.t ; RV64-NEXT: vsrl.vx v16, v8, a2, v0.t ; RV64-NEXT: vsrl.vx v20, v8, a7, v0.t ; RV64-NEXT: vand.vx v20, v20, a0, v0.t ; RV64-NEXT: vor.vv v16, v20, v16, v0.t ; RV64-NEXT: vsrl.vi v20, v8, 24, v0.t ; RV64-NEXT: vand.vx v20, v20, a1, v0.t ; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t ; RV64-NEXT: vand.vx v8, v8, a3, v0.t ; RV64-NEXT: vor.vv v8, v8, v20, v0.t ; RV64-NEXT: vor.vv v8, v8, v16, v0.t ; RV64-NEXT: vor.vv v8, v12, v8, v0.t ; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t ; RV64-NEXT: vand.vx v12, v12, t0, v0.t ; RV64-NEXT: vand.vx v8, v8, t0, v0.t ; RV64-NEXT: vsll.vi v8, v8, 4, v0.t ; RV64-NEXT: vor.vv v8, v12, v8, v0.t ; RV64-NEXT: vsrl.vi v12, v8, 2, v0.t ; RV64-NEXT: vand.vx v12, v12, a6, v0.t ; RV64-NEXT: vand.vx v8, v8, a6, v0.t ; RV64-NEXT: vsll.vi v8, v8, 2, v0.t ; RV64-NEXT: vor.vv v8, v12, v8, v0.t ; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t ; RV64-NEXT: vand.vx v12, v12, a5, v0.t ; RV64-NEXT: vand.vx v8, v8, a5, v0.t ; RV64-NEXT: vsll.vi v8, v8, 1, v0.t ; RV64-NEXT: vor.vv v8, v12, v8, v0.t ; RV64-NEXT: ret ; ; CHECK-ZVBB-LABEL: vp_bitreverse_nxv4i64: ; CHECK-ZVBB: # %bb.0: ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-ZVBB-NEXT: vbrev.v v8, v8, v0.t ; CHECK-ZVBB-NEXT: ret %v = call @llvm.vp.bitreverse.nxv4i64( %va, %m, i32 %evl) ret %v } define @vp_bitreverse_nxv4i64_unmasked( %va, i32 zeroext %evl) { ; RV32-LABEL: vp_bitreverse_nxv4i64_unmasked: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: lui a1, 1044480 ; RV32-NEXT: li a2, 56 ; RV32-NEXT: lui a3, 16 ; RV32-NEXT: li a4, 40 ; RV32-NEXT: lui a5, 4080 ; RV32-NEXT: addi a6, sp, 8 ; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; RV32-NEXT: vsrl.vi v20, v8, 24 ; RV32-NEXT: sw a1, 8(sp) ; RV32-NEXT: sw zero, 12(sp) ; RV32-NEXT: vsll.vx v16, v8, a2 ; RV32-NEXT: addi a1, a3, -256 ; RV32-NEXT: vsrl.vx v12, v8, a2 ; RV32-NEXT: vsrl.vx v24, v8, a4 ; RV32-NEXT: vand.vx v28, v8, a1 ; RV32-NEXT: vand.vx v24, v24, a1 ; RV32-NEXT: vor.vv v12, v24, v12 ; RV32-NEXT: vlse64.v v24, (a6), zero ; RV32-NEXT: vsll.vx v28, v28, a4 ; RV32-NEXT: vor.vv v16, v16, v28 ; RV32-NEXT: vsrl.vi v28, v8, 8 ; RV32-NEXT: vand.vx v20, v20, a5 ; RV32-NEXT: vand.vv v28, v28, v24 ; RV32-NEXT: vor.vv v20, v28, v20 ; RV32-NEXT: lui a1, 61681 ; RV32-NEXT: lui a2, 209715 ; RV32-NEXT: lui a3, 349525 ; RV32-NEXT: vand.vv v24, v8, v24 ; RV32-NEXT: vand.vx v8, v8, a5 ; RV32-NEXT: addi a1, a1, -241 ; RV32-NEXT: addi a2, a2, 819 ; RV32-NEXT: addi a3, a3, 1365 ; RV32-NEXT: vsll.vi v8, v8, 24 ; RV32-NEXT: vsll.vi v24, v24, 8 ; RV32-NEXT: vor.vv v8, v8, v24 ; RV32-NEXT: vsetvli a4, zero, e32, m4, ta, ma ; RV32-NEXT: vmv.v.x v24, a1 ; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; RV32-NEXT: vor.vv v12, v20, v12 ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vmv.v.x v20, a2 ; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; RV32-NEXT: vor.vv v8, v16, v8 ; RV32-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; RV32-NEXT: vmv.v.x v16, a3 ; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; RV32-NEXT: vor.vv v8, v8, v12 ; RV32-NEXT: vsrl.vi v12, v8, 4 ; RV32-NEXT: vand.vv v8, v8, v24 ; RV32-NEXT: vand.vv v12, v12, v24 ; RV32-NEXT: vsll.vi v8, v8, 4 ; RV32-NEXT: vor.vv v8, v12, v8 ; RV32-NEXT: vsrl.vi v12, v8, 2 ; RV32-NEXT: vand.vv v8, v8, v20 ; RV32-NEXT: vand.vv v12, v12, v20 ; RV32-NEXT: vsll.vi v8, v8, 2 ; RV32-NEXT: vor.vv v8, v12, v8 ; RV32-NEXT: vsrl.vi v12, v8, 1 ; RV32-NEXT: vand.vv v8, v8, v16 ; RV32-NEXT: vand.vv v12, v12, v16 ; RV32-NEXT: vadd.vv v8, v8, v8 ; RV32-NEXT: vor.vv v8, v12, v8 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vp_bitreverse_nxv4i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: lui a1, 4080 ; RV64-NEXT: li a2, 255 ; RV64-NEXT: li a3, 56 ; RV64-NEXT: lui a4, 16 ; RV64-NEXT: li a5, 40 ; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; RV64-NEXT: vsrl.vi v16, v8, 24 ; RV64-NEXT: vsrl.vi v20, v8, 8 ; RV64-NEXT: addiw a0, a4, -256 ; RV64-NEXT: vsrl.vx v12, v8, a3 ; RV64-NEXT: vsrl.vx v24, v8, a5 ; RV64-NEXT: vand.vx v24, v24, a0 ; RV64-NEXT: vor.vv v12, v24, v12 ; RV64-NEXT: vand.vx v24, v8, a1 ; RV64-NEXT: slli a2, a2, 24 ; RV64-NEXT: vand.vx v16, v16, a1 ; RV64-NEXT: vsll.vi v24, v24, 24 ; RV64-NEXT: vand.vx v20, v20, a2 ; RV64-NEXT: vor.vv v16, v20, v16 ; RV64-NEXT: vand.vx v20, v8, a2 ; RV64-NEXT: vsll.vi v20, v20, 8 ; RV64-NEXT: vor.vv v20, v24, v20 ; RV64-NEXT: vsll.vx v24, v8, a3 ; RV64-NEXT: vand.vx v8, v8, a0 ; RV64-NEXT: vsll.vx v8, v8, a5 ; RV64-NEXT: vor.vv v8, v24, v8 ; RV64-NEXT: lui a0, 61681 ; RV64-NEXT: lui a1, 209715 ; RV64-NEXT: lui a2, 349525 ; RV64-NEXT: addiw a0, a0, -241 ; RV64-NEXT: addiw a1, a1, 819 ; RV64-NEXT: addiw a2, a2, 1365 ; RV64-NEXT: slli a3, a0, 32 ; RV64-NEXT: slli a4, a1, 32 ; RV64-NEXT: add a0, a0, a3 ; RV64-NEXT: slli a3, a2, 32 ; RV64-NEXT: add a1, a1, a4 ; RV64-NEXT: add a2, a2, a3 ; RV64-NEXT: vor.vv v12, v16, v12 ; RV64-NEXT: vor.vv v8, v8, v20 ; RV64-NEXT: vor.vv v8, v8, v12 ; RV64-NEXT: vsrl.vi v12, v8, 4 ; RV64-NEXT: vand.vx v8, v8, a0 ; RV64-NEXT: vand.vx v12, v12, a0 ; RV64-NEXT: vsll.vi v8, v8, 4 ; RV64-NEXT: vor.vv v8, v12, v8 ; RV64-NEXT: vsrl.vi v12, v8, 2 ; RV64-NEXT: vand.vx v8, v8, a1 ; RV64-NEXT: vand.vx v12, v12, a1 ; RV64-NEXT: vsll.vi v8, v8, 2 ; RV64-NEXT: vor.vv v8, v12, v8 ; RV64-NEXT: vsrl.vi v12, v8, 1 ; RV64-NEXT: vand.vx v8, v8, a2 ; RV64-NEXT: vand.vx v12, v12, a2 ; RV64-NEXT: vadd.vv v8, v8, v8 ; RV64-NEXT: vor.vv v8, v12, v8 ; RV64-NEXT: ret ; ; CHECK-ZVBB-LABEL: vp_bitreverse_nxv4i64_unmasked: ; CHECK-ZVBB: # %bb.0: ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-ZVBB-NEXT: vbrev.v v8, v8 ; CHECK-ZVBB-NEXT: ret %v = call @llvm.vp.bitreverse.nxv4i64( %va, splat (i1 true), i32 %evl) ret %v } declare @llvm.vp.bitreverse.nxv7i64(, , i32) define @vp_bitreverse_nxv7i64( %va, %m, i32 zeroext %evl) { ; RV32-LABEL: vp_bitreverse_nxv7i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: li a2, 24 ; RV32-NEXT: mul a1, a1, a2 ; RV32-NEXT: sub sp, sp, a1 ; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb ; RV32-NEXT: lui a1, 1044480 ; RV32-NEXT: li a2, 56 ; RV32-NEXT: lui a3, 16 ; RV32-NEXT: li a4, 40 ; RV32-NEXT: addi a5, sp, 8 ; RV32-NEXT: sw a1, 8(sp) ; RV32-NEXT: sw zero, 12(sp) ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vsll.vx v16, v8, a2, v0.t ; RV32-NEXT: addi a1, a3, -256 ; RV32-NEXT: vand.vx v24, v8, a1, v0.t ; RV32-NEXT: vsll.vx v24, v24, a4, v0.t ; RV32-NEXT: vor.vv v16, v16, v24, v0.t ; RV32-NEXT: csrr a3, vlenb ; RV32-NEXT: slli a3, a3, 4 ; RV32-NEXT: add a3, sp, a3 ; RV32-NEXT: addi a3, a3, 16 ; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill ; RV32-NEXT: vlse64.v v16, (a5), zero ; RV32-NEXT: csrr a3, vlenb ; RV32-NEXT: slli a3, a3, 3 ; RV32-NEXT: add a3, sp, a3 ; RV32-NEXT: addi a3, a3, 16 ; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill ; RV32-NEXT: lui a3, 4080 ; RV32-NEXT: vand.vx v24, v8, a3, v0.t ; RV32-NEXT: vsll.vi v24, v24, 24, v0.t ; RV32-NEXT: addi a5, sp, 16 ; RV32-NEXT: vs8r.v v24, (a5) # Unknown-size Folded Spill ; RV32-NEXT: vand.vv v24, v8, v16, v0.t ; RV32-NEXT: vsll.vi v16, v24, 8, v0.t ; RV32-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload ; RV32-NEXT: vor.vv v16, v24, v16, v0.t ; RV32-NEXT: csrr a5, vlenb ; RV32-NEXT: slli a5, a5, 4 ; RV32-NEXT: add a5, sp, a5 ; RV32-NEXT: addi a5, a5, 16 ; RV32-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload ; RV32-NEXT: vor.vv v16, v24, v16, v0.t ; RV32-NEXT: csrr a5, vlenb ; RV32-NEXT: slli a5, a5, 4 ; RV32-NEXT: add a5, sp, a5 ; RV32-NEXT: addi a5, a5, 16 ; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill ; RV32-NEXT: vsrl.vx v16, v8, a2, v0.t ; RV32-NEXT: vsrl.vx v24, v8, a4, v0.t ; RV32-NEXT: vand.vx v24, v24, a1, v0.t ; RV32-NEXT: vor.vv v16, v24, v16, v0.t ; RV32-NEXT: addi a1, sp, 16 ; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill ; RV32-NEXT: vsrl.vi v24, v8, 24, v0.t ; RV32-NEXT: vand.vx v24, v24, a3, v0.t ; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 3 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 16 ; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload ; RV32-NEXT: vand.vv v8, v8, v16, v0.t ; RV32-NEXT: vor.vv v8, v8, v24, v0.t ; RV32-NEXT: addi a1, sp, 16 ; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload ; RV32-NEXT: vor.vv v8, v8, v16, v0.t ; RV32-NEXT: lui a1, 61681 ; RV32-NEXT: lui a2, 209715 ; RV32-NEXT: lui a3, 349525 ; RV32-NEXT: addi a1, a1, -241 ; RV32-NEXT: addi a2, a2, 819 ; RV32-NEXT: addi a3, a3, 1365 ; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma ; RV32-NEXT: vmv.v.x v24, a1 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 4 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 16 ; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vor.vv v8, v16, v8, v0.t ; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t ; RV32-NEXT: vand.vv v16, v16, v24, v0.t ; RV32-NEXT: vand.vv v24, v8, v24, v0.t ; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; RV32-NEXT: vmv.v.x v8, a2 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vsll.vi v24, v24, 4, v0.t ; RV32-NEXT: vor.vv v24, v16, v24, v0.t ; RV32-NEXT: vsrl.vi v16, v24, 2, v0.t ; RV32-NEXT: vand.vv v16, v16, v8, v0.t ; RV32-NEXT: vand.vv v24, v24, v8, v0.t ; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; RV32-NEXT: vmv.v.x v8, a3 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vsll.vi v24, v24, 2, v0.t ; RV32-NEXT: vor.vv v16, v16, v24, v0.t ; RV32-NEXT: vsrl.vi v24, v16, 1, v0.t ; RV32-NEXT: vand.vv v24, v24, v8, v0.t ; RV32-NEXT: vand.vv v8, v16, v8, v0.t ; RV32-NEXT: vsll.vi v8, v8, 1, v0.t ; RV32-NEXT: vor.vv v8, v24, v8, v0.t ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: li a1, 24 ; RV32-NEXT: mul a0, a0, a1 ; RV32-NEXT: add sp, sp, a0 ; RV32-NEXT: .cfi_def_cfa sp, 16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vp_bitreverse_nxv7i64: ; RV64: # %bb.0: ; RV64-NEXT: addi sp, sp, -16 ; RV64-NEXT: .cfi_def_cfa_offset 16 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 3 ; RV64-NEXT: sub sp, sp, a1 ; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb ; RV64-NEXT: lui a1, 4080 ; RV64-NEXT: li a2, 255 ; RV64-NEXT: li a3, 56 ; RV64-NEXT: lui a4, 16 ; RV64-NEXT: li a5, 40 ; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV64-NEXT: vand.vx v16, v8, a1, v0.t ; RV64-NEXT: slli a2, a2, 24 ; RV64-NEXT: addiw a0, a4, -256 ; RV64-NEXT: vsll.vi v16, v16, 24, v0.t ; RV64-NEXT: vand.vx v24, v8, a2, v0.t ; RV64-NEXT: vsll.vi v24, v24, 8, v0.t ; RV64-NEXT: vor.vv v16, v16, v24, v0.t ; RV64-NEXT: addi a4, sp, 16 ; RV64-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill ; RV64-NEXT: vsll.vx v24, v8, a3, v0.t ; RV64-NEXT: vand.vx v16, v8, a0, v0.t ; RV64-NEXT: vsll.vx v16, v16, a5, v0.t ; RV64-NEXT: vor.vv v16, v24, v16, v0.t ; RV64-NEXT: vl8r.v v24, (a4) # Unknown-size Folded Reload ; RV64-NEXT: vor.vv v16, v16, v24, v0.t ; RV64-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill ; RV64-NEXT: vsrl.vx v24, v8, a3, v0.t ; RV64-NEXT: vsrl.vx v16, v8, a5, v0.t ; RV64-NEXT: vand.vx v16, v16, a0, v0.t ; RV64-NEXT: vor.vv v24, v16, v24, v0.t ; RV64-NEXT: vsrl.vi v16, v8, 24, v0.t ; RV64-NEXT: vand.vx v16, v16, a1, v0.t ; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t ; RV64-NEXT: vand.vx v8, v8, a2, v0.t ; RV64-NEXT: vor.vv v8, v8, v16, v0.t ; RV64-NEXT: vor.vv v8, v8, v24, v0.t ; RV64-NEXT: lui a0, 61681 ; RV64-NEXT: lui a1, 209715 ; RV64-NEXT: lui a2, 349525 ; RV64-NEXT: addiw a0, a0, -241 ; RV64-NEXT: addiw a1, a1, 819 ; RV64-NEXT: addiw a2, a2, 1365 ; RV64-NEXT: slli a3, a0, 32 ; RV64-NEXT: slli a4, a1, 32 ; RV64-NEXT: add a0, a0, a3 ; RV64-NEXT: slli a3, a2, 32 ; RV64-NEXT: add a1, a1, a4 ; RV64-NEXT: add a2, a2, a3 ; RV64-NEXT: addi a3, sp, 16 ; RV64-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload ; RV64-NEXT: vor.vv v8, v16, v8, v0.t ; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t ; RV64-NEXT: vand.vx v16, v16, a0, v0.t ; RV64-NEXT: vand.vx v8, v8, a0, v0.t ; RV64-NEXT: vsll.vi v8, v8, 4, v0.t ; RV64-NEXT: vor.vv v8, v16, v8, v0.t ; RV64-NEXT: vsrl.vi v16, v8, 2, v0.t ; RV64-NEXT: vand.vx v16, v16, a1, v0.t ; RV64-NEXT: vand.vx v8, v8, a1, v0.t ; RV64-NEXT: vsll.vi v8, v8, 2, v0.t ; RV64-NEXT: vor.vv v8, v16, v8, v0.t ; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t ; RV64-NEXT: vand.vx v16, v16, a2, v0.t ; RV64-NEXT: vand.vx v8, v8, a2, v0.t ; RV64-NEXT: vsll.vi v8, v8, 1, v0.t ; RV64-NEXT: vor.vv v8, v16, v8, v0.t ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 3 ; RV64-NEXT: add sp, sp, a0 ; RV64-NEXT: .cfi_def_cfa sp, 16 ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; CHECK-ZVBB-LABEL: vp_bitreverse_nxv7i64: ; CHECK-ZVBB: # %bb.0: ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-ZVBB-NEXT: vbrev.v v8, v8, v0.t ; CHECK-ZVBB-NEXT: ret %v = call @llvm.vp.bitreverse.nxv7i64( %va, %m, i32 %evl) ret %v } define @vp_bitreverse_nxv7i64_unmasked( %va, i32 zeroext %evl) { ; RV32-LABEL: vp_bitreverse_nxv7i64_unmasked: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 4 ; RV32-NEXT: sub sp, sp, a1 ; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb ; RV32-NEXT: lui a1, 1044480 ; RV32-NEXT: li a2, 56 ; RV32-NEXT: lui a3, 16 ; RV32-NEXT: li a4, 40 ; RV32-NEXT: lui a5, 4080 ; RV32-NEXT: addi a6, sp, 8 ; RV32-NEXT: sw a1, 8(sp) ; RV32-NEXT: sw zero, 12(sp) ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vsll.vx v16, v8, a2 ; RV32-NEXT: addi a1, a3, -256 ; RV32-NEXT: vsrl.vx v24, v8, a2 ; RV32-NEXT: vsrl.vx v0, v8, a4 ; RV32-NEXT: vand.vx v0, v0, a1 ; RV32-NEXT: vor.vv v24, v0, v24 ; RV32-NEXT: addi a2, sp, 16 ; RV32-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill ; RV32-NEXT: vand.vx v24, v8, a1 ; RV32-NEXT: vsll.vx v24, v24, a4 ; RV32-NEXT: vor.vv v16, v16, v24 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 3 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 16 ; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill ; RV32-NEXT: vlse64.v v24, (a6), zero ; RV32-NEXT: vsrl.vi v16, v8, 24 ; RV32-NEXT: vand.vx v16, v16, a5 ; RV32-NEXT: vsrl.vi v0, v8, 8 ; RV32-NEXT: vand.vv v0, v0, v24 ; RV32-NEXT: vor.vv v16, v0, v16 ; RV32-NEXT: vand.vv v24, v8, v24 ; RV32-NEXT: vand.vx v8, v8, a5 ; RV32-NEXT: vsll.vi v8, v8, 24 ; RV32-NEXT: vsll.vi v24, v24, 8 ; RV32-NEXT: vor.vv v24, v8, v24 ; RV32-NEXT: addi a1, sp, 16 ; RV32-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload ; RV32-NEXT: vor.vv v8, v16, v8 ; RV32-NEXT: lui a1, 61681 ; RV32-NEXT: lui a2, 209715 ; RV32-NEXT: lui a3, 349525 ; RV32-NEXT: addi a1, a1, -241 ; RV32-NEXT: addi a2, a2, 819 ; RV32-NEXT: addi a3, a3, 1365 ; RV32-NEXT: csrr a4, vlenb ; RV32-NEXT: slli a4, a4, 3 ; RV32-NEXT: add a4, sp, a4 ; RV32-NEXT: addi a4, a4, 16 ; RV32-NEXT: vl8r.v v16, (a4) # Unknown-size Folded Reload ; RV32-NEXT: vor.vv v16, v16, v24 ; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma ; RV32-NEXT: vmv.v.x v24, a1 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vor.vv v8, v16, v8 ; RV32-NEXT: vsrl.vi v16, v8, 4 ; RV32-NEXT: vand.vv v8, v8, v24 ; RV32-NEXT: vand.vv v16, v16, v24 ; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; RV32-NEXT: vmv.v.x v24, a2 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vsll.vi v8, v8, 4 ; RV32-NEXT: vor.vv v8, v16, v8 ; RV32-NEXT: vsrl.vi v16, v8, 2 ; RV32-NEXT: vand.vv v8, v8, v24 ; RV32-NEXT: vand.vv v16, v16, v24 ; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; RV32-NEXT: vmv.v.x v24, a3 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vsll.vi v8, v8, 2 ; RV32-NEXT: vor.vv v8, v16, v8 ; RV32-NEXT: vsrl.vi v16, v8, 1 ; RV32-NEXT: vand.vv v8, v8, v24 ; RV32-NEXT: vand.vv v16, v16, v24 ; RV32-NEXT: vadd.vv v8, v8, v8 ; RV32-NEXT: vor.vv v8, v16, v8 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 4 ; RV32-NEXT: add sp, sp, a0 ; RV32-NEXT: .cfi_def_cfa sp, 16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vp_bitreverse_nxv7i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: addi sp, sp, -16 ; RV64-NEXT: .cfi_def_cfa_offset 16 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 3 ; RV64-NEXT: sub sp, sp, a1 ; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb ; RV64-NEXT: lui a1, 4080 ; RV64-NEXT: li a2, 255 ; RV64-NEXT: li a3, 56 ; RV64-NEXT: lui a4, 16 ; RV64-NEXT: li a5, 40 ; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV64-NEXT: vsrl.vi v24, v8, 24 ; RV64-NEXT: addiw a0, a4, -256 ; RV64-NEXT: vsrl.vx v16, v8, a3 ; RV64-NEXT: vsrl.vx v0, v8, a5 ; RV64-NEXT: vand.vx v0, v0, a0 ; RV64-NEXT: vor.vv v16, v0, v16 ; RV64-NEXT: addi a4, sp, 16 ; RV64-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill ; RV64-NEXT: vsrl.vi v0, v8, 8 ; RV64-NEXT: slli a2, a2, 24 ; RV64-NEXT: vand.vx v24, v24, a1 ; RV64-NEXT: vand.vx v0, v0, a2 ; RV64-NEXT: vor.vv v24, v0, v24 ; RV64-NEXT: vand.vx v0, v8, a1 ; RV64-NEXT: vsll.vi v0, v0, 24 ; RV64-NEXT: vand.vx v16, v8, a2 ; RV64-NEXT: vsll.vi v16, v16, 8 ; RV64-NEXT: vor.vv v0, v0, v16 ; RV64-NEXT: vsll.vx v16, v8, a3 ; RV64-NEXT: vand.vx v8, v8, a0 ; RV64-NEXT: vsll.vx v8, v8, a5 ; RV64-NEXT: vor.vv v8, v16, v8 ; RV64-NEXT: lui a0, 61681 ; RV64-NEXT: lui a1, 209715 ; RV64-NEXT: lui a2, 349525 ; RV64-NEXT: addiw a0, a0, -241 ; RV64-NEXT: addiw a1, a1, 819 ; RV64-NEXT: addiw a2, a2, 1365 ; RV64-NEXT: slli a3, a0, 32 ; RV64-NEXT: slli a4, a1, 32 ; RV64-NEXT: add a0, a0, a3 ; RV64-NEXT: slli a3, a2, 32 ; RV64-NEXT: add a1, a1, a4 ; RV64-NEXT: add a2, a2, a3 ; RV64-NEXT: addi a3, sp, 16 ; RV64-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload ; RV64-NEXT: vor.vv v16, v24, v16 ; RV64-NEXT: vor.vv v8, v8, v0 ; RV64-NEXT: vor.vv v8, v8, v16 ; RV64-NEXT: vsrl.vi v16, v8, 4 ; RV64-NEXT: vand.vx v8, v8, a0 ; RV64-NEXT: vand.vx v16, v16, a0 ; RV64-NEXT: vsll.vi v8, v8, 4 ; RV64-NEXT: vor.vv v8, v16, v8 ; RV64-NEXT: vsrl.vi v16, v8, 2 ; RV64-NEXT: vand.vx v8, v8, a1 ; RV64-NEXT: vand.vx v16, v16, a1 ; RV64-NEXT: vsll.vi v8, v8, 2 ; RV64-NEXT: vor.vv v8, v16, v8 ; RV64-NEXT: vsrl.vi v16, v8, 1 ; RV64-NEXT: vand.vx v8, v8, a2 ; RV64-NEXT: vand.vx v16, v16, a2 ; RV64-NEXT: vadd.vv v8, v8, v8 ; RV64-NEXT: vor.vv v8, v16, v8 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 3 ; RV64-NEXT: add sp, sp, a0 ; RV64-NEXT: .cfi_def_cfa sp, 16 ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; CHECK-ZVBB-LABEL: vp_bitreverse_nxv7i64_unmasked: ; CHECK-ZVBB: # %bb.0: ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-ZVBB-NEXT: vbrev.v v8, v8 ; CHECK-ZVBB-NEXT: ret %v = call @llvm.vp.bitreverse.nxv7i64( %va, splat (i1 true), i32 %evl) ret %v } declare @llvm.vp.bitreverse.nxv8i64(, , i32) define @vp_bitreverse_nxv8i64( %va, %m, i32 zeroext %evl) { ; RV32-LABEL: vp_bitreverse_nxv8i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: li a2, 24 ; RV32-NEXT: mul a1, a1, a2 ; RV32-NEXT: sub sp, sp, a1 ; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb ; RV32-NEXT: lui a1, 1044480 ; RV32-NEXT: li a2, 56 ; RV32-NEXT: lui a3, 16 ; RV32-NEXT: li a4, 40 ; RV32-NEXT: addi a5, sp, 8 ; RV32-NEXT: sw a1, 8(sp) ; RV32-NEXT: sw zero, 12(sp) ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vsll.vx v16, v8, a2, v0.t ; RV32-NEXT: addi a1, a3, -256 ; RV32-NEXT: vand.vx v24, v8, a1, v0.t ; RV32-NEXT: vsll.vx v24, v24, a4, v0.t ; RV32-NEXT: vor.vv v16, v16, v24, v0.t ; RV32-NEXT: csrr a3, vlenb ; RV32-NEXT: slli a3, a3, 4 ; RV32-NEXT: add a3, sp, a3 ; RV32-NEXT: addi a3, a3, 16 ; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill ; RV32-NEXT: vlse64.v v16, (a5), zero ; RV32-NEXT: csrr a3, vlenb ; RV32-NEXT: slli a3, a3, 3 ; RV32-NEXT: add a3, sp, a3 ; RV32-NEXT: addi a3, a3, 16 ; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill ; RV32-NEXT: lui a3, 4080 ; RV32-NEXT: vand.vx v24, v8, a3, v0.t ; RV32-NEXT: vsll.vi v24, v24, 24, v0.t ; RV32-NEXT: addi a5, sp, 16 ; RV32-NEXT: vs8r.v v24, (a5) # Unknown-size Folded Spill ; RV32-NEXT: vand.vv v24, v8, v16, v0.t ; RV32-NEXT: vsll.vi v16, v24, 8, v0.t ; RV32-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload ; RV32-NEXT: vor.vv v16, v24, v16, v0.t ; RV32-NEXT: csrr a5, vlenb ; RV32-NEXT: slli a5, a5, 4 ; RV32-NEXT: add a5, sp, a5 ; RV32-NEXT: addi a5, a5, 16 ; RV32-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload ; RV32-NEXT: vor.vv v16, v24, v16, v0.t ; RV32-NEXT: csrr a5, vlenb ; RV32-NEXT: slli a5, a5, 4 ; RV32-NEXT: add a5, sp, a5 ; RV32-NEXT: addi a5, a5, 16 ; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill ; RV32-NEXT: vsrl.vx v16, v8, a2, v0.t ; RV32-NEXT: vsrl.vx v24, v8, a4, v0.t ; RV32-NEXT: vand.vx v24, v24, a1, v0.t ; RV32-NEXT: vor.vv v16, v24, v16, v0.t ; RV32-NEXT: addi a1, sp, 16 ; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill ; RV32-NEXT: vsrl.vi v24, v8, 24, v0.t ; RV32-NEXT: vand.vx v24, v24, a3, v0.t ; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 3 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 16 ; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload ; RV32-NEXT: vand.vv v8, v8, v16, v0.t ; RV32-NEXT: vor.vv v8, v8, v24, v0.t ; RV32-NEXT: addi a1, sp, 16 ; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload ; RV32-NEXT: vor.vv v8, v8, v16, v0.t ; RV32-NEXT: lui a1, 61681 ; RV32-NEXT: lui a2, 209715 ; RV32-NEXT: lui a3, 349525 ; RV32-NEXT: addi a1, a1, -241 ; RV32-NEXT: addi a2, a2, 819 ; RV32-NEXT: addi a3, a3, 1365 ; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma ; RV32-NEXT: vmv.v.x v24, a1 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 4 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 16 ; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vor.vv v8, v16, v8, v0.t ; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t ; RV32-NEXT: vand.vv v16, v16, v24, v0.t ; RV32-NEXT: vand.vv v24, v8, v24, v0.t ; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; RV32-NEXT: vmv.v.x v8, a2 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vsll.vi v24, v24, 4, v0.t ; RV32-NEXT: vor.vv v24, v16, v24, v0.t ; RV32-NEXT: vsrl.vi v16, v24, 2, v0.t ; RV32-NEXT: vand.vv v16, v16, v8, v0.t ; RV32-NEXT: vand.vv v24, v24, v8, v0.t ; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; RV32-NEXT: vmv.v.x v8, a3 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vsll.vi v24, v24, 2, v0.t ; RV32-NEXT: vor.vv v16, v16, v24, v0.t ; RV32-NEXT: vsrl.vi v24, v16, 1, v0.t ; RV32-NEXT: vand.vv v24, v24, v8, v0.t ; RV32-NEXT: vand.vv v8, v16, v8, v0.t ; RV32-NEXT: vsll.vi v8, v8, 1, v0.t ; RV32-NEXT: vor.vv v8, v24, v8, v0.t ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: li a1, 24 ; RV32-NEXT: mul a0, a0, a1 ; RV32-NEXT: add sp, sp, a0 ; RV32-NEXT: .cfi_def_cfa sp, 16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vp_bitreverse_nxv8i64: ; RV64: # %bb.0: ; RV64-NEXT: addi sp, sp, -16 ; RV64-NEXT: .cfi_def_cfa_offset 16 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 3 ; RV64-NEXT: sub sp, sp, a1 ; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb ; RV64-NEXT: lui a1, 4080 ; RV64-NEXT: li a2, 255 ; RV64-NEXT: li a3, 56 ; RV64-NEXT: lui a4, 16 ; RV64-NEXT: li a5, 40 ; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV64-NEXT: vand.vx v16, v8, a1, v0.t ; RV64-NEXT: slli a2, a2, 24 ; RV64-NEXT: addiw a0, a4, -256 ; RV64-NEXT: vsll.vi v16, v16, 24, v0.t ; RV64-NEXT: vand.vx v24, v8, a2, v0.t ; RV64-NEXT: vsll.vi v24, v24, 8, v0.t ; RV64-NEXT: vor.vv v16, v16, v24, v0.t ; RV64-NEXT: addi a4, sp, 16 ; RV64-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill ; RV64-NEXT: vsll.vx v24, v8, a3, v0.t ; RV64-NEXT: vand.vx v16, v8, a0, v0.t ; RV64-NEXT: vsll.vx v16, v16, a5, v0.t ; RV64-NEXT: vor.vv v16, v24, v16, v0.t ; RV64-NEXT: vl8r.v v24, (a4) # Unknown-size Folded Reload ; RV64-NEXT: vor.vv v16, v16, v24, v0.t ; RV64-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill ; RV64-NEXT: vsrl.vx v24, v8, a3, v0.t ; RV64-NEXT: vsrl.vx v16, v8, a5, v0.t ; RV64-NEXT: vand.vx v16, v16, a0, v0.t ; RV64-NEXT: vor.vv v24, v16, v24, v0.t ; RV64-NEXT: vsrl.vi v16, v8, 24, v0.t ; RV64-NEXT: vand.vx v16, v16, a1, v0.t ; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t ; RV64-NEXT: vand.vx v8, v8, a2, v0.t ; RV64-NEXT: vor.vv v8, v8, v16, v0.t ; RV64-NEXT: vor.vv v8, v8, v24, v0.t ; RV64-NEXT: lui a0, 61681 ; RV64-NEXT: lui a1, 209715 ; RV64-NEXT: lui a2, 349525 ; RV64-NEXT: addiw a0, a0, -241 ; RV64-NEXT: addiw a1, a1, 819 ; RV64-NEXT: addiw a2, a2, 1365 ; RV64-NEXT: slli a3, a0, 32 ; RV64-NEXT: slli a4, a1, 32 ; RV64-NEXT: add a0, a0, a3 ; RV64-NEXT: slli a3, a2, 32 ; RV64-NEXT: add a1, a1, a4 ; RV64-NEXT: add a2, a2, a3 ; RV64-NEXT: addi a3, sp, 16 ; RV64-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload ; RV64-NEXT: vor.vv v8, v16, v8, v0.t ; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t ; RV64-NEXT: vand.vx v16, v16, a0, v0.t ; RV64-NEXT: vand.vx v8, v8, a0, v0.t ; RV64-NEXT: vsll.vi v8, v8, 4, v0.t ; RV64-NEXT: vor.vv v8, v16, v8, v0.t ; RV64-NEXT: vsrl.vi v16, v8, 2, v0.t ; RV64-NEXT: vand.vx v16, v16, a1, v0.t ; RV64-NEXT: vand.vx v8, v8, a1, v0.t ; RV64-NEXT: vsll.vi v8, v8, 2, v0.t ; RV64-NEXT: vor.vv v8, v16, v8, v0.t ; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t ; RV64-NEXT: vand.vx v16, v16, a2, v0.t ; RV64-NEXT: vand.vx v8, v8, a2, v0.t ; RV64-NEXT: vsll.vi v8, v8, 1, v0.t ; RV64-NEXT: vor.vv v8, v16, v8, v0.t ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 3 ; RV64-NEXT: add sp, sp, a0 ; RV64-NEXT: .cfi_def_cfa sp, 16 ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; CHECK-ZVBB-LABEL: vp_bitreverse_nxv8i64: ; CHECK-ZVBB: # %bb.0: ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-ZVBB-NEXT: vbrev.v v8, v8, v0.t ; CHECK-ZVBB-NEXT: ret %v = call @llvm.vp.bitreverse.nxv8i64( %va, %m, i32 %evl) ret %v } define @vp_bitreverse_nxv8i64_unmasked( %va, i32 zeroext %evl) { ; RV32-LABEL: vp_bitreverse_nxv8i64_unmasked: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -16 ; RV32-NEXT: .cfi_def_cfa_offset 16 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 4 ; RV32-NEXT: sub sp, sp, a1 ; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb ; RV32-NEXT: lui a1, 1044480 ; RV32-NEXT: li a2, 56 ; RV32-NEXT: lui a3, 16 ; RV32-NEXT: li a4, 40 ; RV32-NEXT: lui a5, 4080 ; RV32-NEXT: addi a6, sp, 8 ; RV32-NEXT: sw a1, 8(sp) ; RV32-NEXT: sw zero, 12(sp) ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vsll.vx v16, v8, a2 ; RV32-NEXT: addi a1, a3, -256 ; RV32-NEXT: vsrl.vx v24, v8, a2 ; RV32-NEXT: vsrl.vx v0, v8, a4 ; RV32-NEXT: vand.vx v0, v0, a1 ; RV32-NEXT: vor.vv v24, v0, v24 ; RV32-NEXT: addi a2, sp, 16 ; RV32-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill ; RV32-NEXT: vand.vx v24, v8, a1 ; RV32-NEXT: vsll.vx v24, v24, a4 ; RV32-NEXT: vor.vv v16, v16, v24 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 3 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 16 ; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill ; RV32-NEXT: vlse64.v v24, (a6), zero ; RV32-NEXT: vsrl.vi v16, v8, 24 ; RV32-NEXT: vand.vx v16, v16, a5 ; RV32-NEXT: vsrl.vi v0, v8, 8 ; RV32-NEXT: vand.vv v0, v0, v24 ; RV32-NEXT: vor.vv v16, v0, v16 ; RV32-NEXT: vand.vv v24, v8, v24 ; RV32-NEXT: vand.vx v8, v8, a5 ; RV32-NEXT: vsll.vi v8, v8, 24 ; RV32-NEXT: vsll.vi v24, v24, 8 ; RV32-NEXT: vor.vv v24, v8, v24 ; RV32-NEXT: addi a1, sp, 16 ; RV32-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload ; RV32-NEXT: vor.vv v8, v16, v8 ; RV32-NEXT: lui a1, 61681 ; RV32-NEXT: lui a2, 209715 ; RV32-NEXT: lui a3, 349525 ; RV32-NEXT: addi a1, a1, -241 ; RV32-NEXT: addi a2, a2, 819 ; RV32-NEXT: addi a3, a3, 1365 ; RV32-NEXT: csrr a4, vlenb ; RV32-NEXT: slli a4, a4, 3 ; RV32-NEXT: add a4, sp, a4 ; RV32-NEXT: addi a4, a4, 16 ; RV32-NEXT: vl8r.v v16, (a4) # Unknown-size Folded Reload ; RV32-NEXT: vor.vv v16, v16, v24 ; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma ; RV32-NEXT: vmv.v.x v24, a1 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vor.vv v8, v16, v8 ; RV32-NEXT: vsrl.vi v16, v8, 4 ; RV32-NEXT: vand.vv v8, v8, v24 ; RV32-NEXT: vand.vv v16, v16, v24 ; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; RV32-NEXT: vmv.v.x v24, a2 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vsll.vi v8, v8, 4 ; RV32-NEXT: vor.vv v8, v16, v8 ; RV32-NEXT: vsrl.vi v16, v8, 2 ; RV32-NEXT: vand.vv v8, v8, v24 ; RV32-NEXT: vand.vv v16, v16, v24 ; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, ma ; RV32-NEXT: vmv.v.x v24, a3 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vsll.vi v8, v8, 2 ; RV32-NEXT: vor.vv v8, v16, v8 ; RV32-NEXT: vsrl.vi v16, v8, 1 ; RV32-NEXT: vand.vv v8, v8, v24 ; RV32-NEXT: vand.vv v16, v16, v24 ; RV32-NEXT: vadd.vv v8, v8, v8 ; RV32-NEXT: vor.vv v8, v16, v8 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 4 ; RV32-NEXT: add sp, sp, a0 ; RV32-NEXT: .cfi_def_cfa sp, 16 ; RV32-NEXT: addi sp, sp, 16 ; RV32-NEXT: .cfi_def_cfa_offset 0 ; RV32-NEXT: ret ; ; RV64-LABEL: vp_bitreverse_nxv8i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: addi sp, sp, -16 ; RV64-NEXT: .cfi_def_cfa_offset 16 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 3 ; RV64-NEXT: sub sp, sp, a1 ; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb ; RV64-NEXT: lui a1, 4080 ; RV64-NEXT: li a2, 255 ; RV64-NEXT: li a3, 56 ; RV64-NEXT: lui a4, 16 ; RV64-NEXT: li a5, 40 ; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV64-NEXT: vsrl.vi v24, v8, 24 ; RV64-NEXT: addiw a0, a4, -256 ; RV64-NEXT: vsrl.vx v16, v8, a3 ; RV64-NEXT: vsrl.vx v0, v8, a5 ; RV64-NEXT: vand.vx v0, v0, a0 ; RV64-NEXT: vor.vv v16, v0, v16 ; RV64-NEXT: addi a4, sp, 16 ; RV64-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill ; RV64-NEXT: vsrl.vi v0, v8, 8 ; RV64-NEXT: slli a2, a2, 24 ; RV64-NEXT: vand.vx v24, v24, a1 ; RV64-NEXT: vand.vx v0, v0, a2 ; RV64-NEXT: vor.vv v24, v0, v24 ; RV64-NEXT: vand.vx v0, v8, a1 ; RV64-NEXT: vsll.vi v0, v0, 24 ; RV64-NEXT: vand.vx v16, v8, a2 ; RV64-NEXT: vsll.vi v16, v16, 8 ; RV64-NEXT: vor.vv v0, v0, v16 ; RV64-NEXT: vsll.vx v16, v8, a3 ; RV64-NEXT: vand.vx v8, v8, a0 ; RV64-NEXT: vsll.vx v8, v8, a5 ; RV64-NEXT: vor.vv v8, v16, v8 ; RV64-NEXT: lui a0, 61681 ; RV64-NEXT: lui a1, 209715 ; RV64-NEXT: lui a2, 349525 ; RV64-NEXT: addiw a0, a0, -241 ; RV64-NEXT: addiw a1, a1, 819 ; RV64-NEXT: addiw a2, a2, 1365 ; RV64-NEXT: slli a3, a0, 32 ; RV64-NEXT: slli a4, a1, 32 ; RV64-NEXT: add a0, a0, a3 ; RV64-NEXT: slli a3, a2, 32 ; RV64-NEXT: add a1, a1, a4 ; RV64-NEXT: add a2, a2, a3 ; RV64-NEXT: addi a3, sp, 16 ; RV64-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload ; RV64-NEXT: vor.vv v16, v24, v16 ; RV64-NEXT: vor.vv v8, v8, v0 ; RV64-NEXT: vor.vv v8, v8, v16 ; RV64-NEXT: vsrl.vi v16, v8, 4 ; RV64-NEXT: vand.vx v8, v8, a0 ; RV64-NEXT: vand.vx v16, v16, a0 ; RV64-NEXT: vsll.vi v8, v8, 4 ; RV64-NEXT: vor.vv v8, v16, v8 ; RV64-NEXT: vsrl.vi v16, v8, 2 ; RV64-NEXT: vand.vx v8, v8, a1 ; RV64-NEXT: vand.vx v16, v16, a1 ; RV64-NEXT: vsll.vi v8, v8, 2 ; RV64-NEXT: vor.vv v8, v16, v8 ; RV64-NEXT: vsrl.vi v16, v8, 1 ; RV64-NEXT: vand.vx v8, v8, a2 ; RV64-NEXT: vand.vx v16, v16, a2 ; RV64-NEXT: vadd.vv v8, v8, v8 ; RV64-NEXT: vor.vv v8, v16, v8 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 3 ; RV64-NEXT: add sp, sp, a0 ; RV64-NEXT: .cfi_def_cfa sp, 16 ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: .cfi_def_cfa_offset 0 ; RV64-NEXT: ret ; ; CHECK-ZVBB-LABEL: vp_bitreverse_nxv8i64_unmasked: ; CHECK-ZVBB: # %bb.0: ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-ZVBB-NEXT: vbrev.v v8, v8 ; CHECK-ZVBB-NEXT: ret %v = call @llvm.vp.bitreverse.nxv8i64( %va, splat (i1 true), i32 %evl) ret %v } ; Test splitting. Use i16 version for easier check. declare @llvm.vp.bitreverse.nxv64i16(, , i32) define @vp_bitreverse_nxv64i16( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv64i16: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 4 ; CHECK-NEXT: sub sp, sp, a1 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v24, v0 ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: add a1, sp, a1 ; CHECK-NEXT: addi a1, a1, 16 ; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill ; CHECK-NEXT: csrr a3, vlenb ; CHECK-NEXT: lui a1, 1 ; CHECK-NEXT: lui a2, 3 ; CHECK-NEXT: srli a4, a3, 1 ; CHECK-NEXT: slli a3, a3, 2 ; CHECK-NEXT: vslidedown.vx v0, v0, a4 ; CHECK-NEXT: sub a4, a0, a3 ; CHECK-NEXT: sltu a5, a0, a4 ; CHECK-NEXT: addi a5, a5, -1 ; CHECK-NEXT: and a5, a5, a4 ; CHECK-NEXT: lui a6, 5 ; CHECK-NEXT: addi a4, a1, -241 ; CHECK-NEXT: addi a2, a2, 819 ; CHECK-NEXT: addi a1, a6, 1365 ; CHECK-NEXT: vsetvli zero, a5, e16, m8, ta, ma ; CHECK-NEXT: vsrl.vi v8, v16, 8, v0.t ; CHECK-NEXT: vsll.vi v16, v16, 8, v0.t ; CHECK-NEXT: vor.vv v8, v16, v8, v0.t ; CHECK-NEXT: vsrl.vi v16, v8, 4, v0.t ; CHECK-NEXT: vand.vx v16, v16, a4, v0.t ; CHECK-NEXT: vand.vx v8, v8, a4, v0.t ; CHECK-NEXT: vsll.vi v8, v8, 4, v0.t ; CHECK-NEXT: vor.vv v8, v16, v8, v0.t ; CHECK-NEXT: vsrl.vi v16, v8, 2, v0.t ; CHECK-NEXT: vand.vx v16, v16, a2, v0.t ; CHECK-NEXT: vand.vx v8, v8, a2, v0.t ; CHECK-NEXT: vsll.vi v8, v8, 2, v0.t ; CHECK-NEXT: vor.vv v8, v16, v8, v0.t ; CHECK-NEXT: vsrl.vi v16, v8, 1, v0.t ; CHECK-NEXT: vand.vx v16, v16, a1, v0.t ; CHECK-NEXT: vand.vx v8, v8, a1, v0.t ; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t ; CHECK-NEXT: vor.vv v8, v16, v8, v0.t ; CHECK-NEXT: addi a5, sp, 16 ; CHECK-NEXT: vs8r.v v8, (a5) # Unknown-size Folded Spill ; CHECK-NEXT: bltu a0, a3, .LBB46_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a0, a3 ; CHECK-NEXT: .LBB46_2: ; CHECK-NEXT: vmv1r.v v0, v24 ; CHECK-NEXT: csrr a3, vlenb ; CHECK-NEXT: slli a3, a3, 3 ; CHECK-NEXT: add a3, sp, a3 ; CHECK-NEXT: addi a3, a3, 16 ; CHECK-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vsrl.vi v16, v8, 8, v0.t ; CHECK-NEXT: vsll.vi v8, v8, 8, v0.t ; CHECK-NEXT: vor.vv v8, v8, v16, v0.t ; CHECK-NEXT: vsrl.vi v16, v8, 4, v0.t ; CHECK-NEXT: vand.vx v16, v16, a4, v0.t ; CHECK-NEXT: vand.vx v8, v8, a4, v0.t ; CHECK-NEXT: vsll.vi v8, v8, 4, v0.t ; CHECK-NEXT: vor.vv v8, v16, v8, v0.t ; CHECK-NEXT: vsrl.vi v16, v8, 2, v0.t ; CHECK-NEXT: vand.vx v16, v16, a2, v0.t ; CHECK-NEXT: vand.vx v8, v8, a2, v0.t ; CHECK-NEXT: vsll.vi v8, v8, 2, v0.t ; CHECK-NEXT: vor.vv v8, v16, v8, v0.t ; CHECK-NEXT: vsrl.vi v16, v8, 1, v0.t ; CHECK-NEXT: vand.vx v16, v16, a1, v0.t ; CHECK-NEXT: vand.vx v8, v8, a1, v0.t ; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t ; CHECK-NEXT: vor.vv v8, v16, v8, v0.t ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 4 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: .cfi_def_cfa sp, 16 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: .cfi_def_cfa_offset 0 ; CHECK-NEXT: ret ; ; CHECK-ZVBB-LABEL: vp_bitreverse_nxv64i16: ; CHECK-ZVBB: # %bb.0: ; CHECK-ZVBB-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-ZVBB-NEXT: vmv1r.v v24, v0 ; CHECK-ZVBB-NEXT: csrr a1, vlenb ; CHECK-ZVBB-NEXT: srli a2, a1, 1 ; CHECK-ZVBB-NEXT: slli a1, a1, 2 ; CHECK-ZVBB-NEXT: vslidedown.vx v0, v0, a2 ; CHECK-ZVBB-NEXT: sub a2, a0, a1 ; CHECK-ZVBB-NEXT: sltu a3, a0, a2 ; CHECK-ZVBB-NEXT: addi a3, a3, -1 ; CHECK-ZVBB-NEXT: and a2, a3, a2 ; CHECK-ZVBB-NEXT: vsetvli zero, a2, e16, m8, ta, ma ; CHECK-ZVBB-NEXT: vbrev.v v16, v16, v0.t ; CHECK-ZVBB-NEXT: bltu a0, a1, .LBB46_2 ; CHECK-ZVBB-NEXT: # %bb.1: ; CHECK-ZVBB-NEXT: mv a0, a1 ; CHECK-ZVBB-NEXT: .LBB46_2: ; CHECK-ZVBB-NEXT: vmv1r.v v0, v24 ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-ZVBB-NEXT: vbrev.v v8, v8, v0.t ; CHECK-ZVBB-NEXT: ret %v = call @llvm.vp.bitreverse.nxv64i16( %va, %m, i32 %evl) ret %v } define @vp_bitreverse_nxv64i16_unmasked( %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv64i16_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a3, vlenb ; CHECK-NEXT: lui a1, 1 ; CHECK-NEXT: lui a2, 3 ; CHECK-NEXT: slli a3, a3, 2 ; CHECK-NEXT: sub a4, a0, a3 ; CHECK-NEXT: sltu a5, a0, a4 ; CHECK-NEXT: addi a5, a5, -1 ; CHECK-NEXT: and a5, a5, a4 ; CHECK-NEXT: lui a6, 5 ; CHECK-NEXT: addi a4, a1, -241 ; CHECK-NEXT: addi a2, a2, 819 ; CHECK-NEXT: addi a1, a6, 1365 ; CHECK-NEXT: vsetvli zero, a5, e16, m8, ta, ma ; CHECK-NEXT: vsrl.vi v24, v16, 8 ; CHECK-NEXT: vsll.vi v16, v16, 8 ; CHECK-NEXT: vor.vv v16, v16, v24 ; CHECK-NEXT: vsrl.vi v24, v16, 4 ; CHECK-NEXT: vand.vx v16, v16, a4 ; CHECK-NEXT: vand.vx v24, v24, a4 ; CHECK-NEXT: vsll.vi v16, v16, 4 ; CHECK-NEXT: vor.vv v16, v24, v16 ; CHECK-NEXT: vsrl.vi v24, v16, 2 ; CHECK-NEXT: vand.vx v16, v16, a2 ; CHECK-NEXT: vand.vx v24, v24, a2 ; CHECK-NEXT: vsll.vi v16, v16, 2 ; CHECK-NEXT: vor.vv v16, v24, v16 ; CHECK-NEXT: vsrl.vi v24, v16, 1 ; CHECK-NEXT: vand.vx v16, v16, a1 ; CHECK-NEXT: vand.vx v24, v24, a1 ; CHECK-NEXT: vadd.vv v16, v16, v16 ; CHECK-NEXT: vor.vv v16, v24, v16 ; CHECK-NEXT: bltu a0, a3, .LBB47_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: mv a0, a3 ; CHECK-NEXT: .LBB47_2: ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-NEXT: vsrl.vi v24, v8, 8 ; CHECK-NEXT: vsll.vi v8, v8, 8 ; CHECK-NEXT: vor.vv v8, v8, v24 ; CHECK-NEXT: vsrl.vi v24, v8, 4 ; CHECK-NEXT: vand.vx v8, v8, a4 ; CHECK-NEXT: vand.vx v24, v24, a4 ; CHECK-NEXT: vsll.vi v8, v8, 4 ; CHECK-NEXT: vor.vv v8, v24, v8 ; CHECK-NEXT: vsrl.vi v24, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a2 ; CHECK-NEXT: vand.vx v24, v24, a2 ; CHECK-NEXT: vsll.vi v8, v8, 2 ; CHECK-NEXT: vor.vv v8, v24, v8 ; CHECK-NEXT: vsrl.vi v24, v8, 1 ; CHECK-NEXT: vand.vx v8, v8, a1 ; CHECK-NEXT: vand.vx v24, v24, a1 ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vor.vv v8, v24, v8 ; CHECK-NEXT: ret ; ; CHECK-ZVBB-LABEL: vp_bitreverse_nxv64i16_unmasked: ; CHECK-ZVBB: # %bb.0: ; CHECK-ZVBB-NEXT: csrr a1, vlenb ; CHECK-ZVBB-NEXT: slli a1, a1, 2 ; CHECK-ZVBB-NEXT: sub a2, a0, a1 ; CHECK-ZVBB-NEXT: sltu a3, a0, a2 ; CHECK-ZVBB-NEXT: addi a3, a3, -1 ; CHECK-ZVBB-NEXT: and a2, a3, a2 ; CHECK-ZVBB-NEXT: vsetvli zero, a2, e16, m8, ta, ma ; CHECK-ZVBB-NEXT: vbrev.v v16, v16 ; CHECK-ZVBB-NEXT: bltu a0, a1, .LBB47_2 ; CHECK-ZVBB-NEXT: # %bb.1: ; CHECK-ZVBB-NEXT: mv a0, a1 ; CHECK-ZVBB-NEXT: .LBB47_2: ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; CHECK-ZVBB-NEXT: vbrev.v v8, v8 ; CHECK-ZVBB-NEXT: ret %v = call @llvm.vp.bitreverse.nxv64i16( %va, splat (i1 true), i32 %evl) ret %v } ; Test promotion. declare @llvm.vp.bitreverse.nxv1i9(, , i32) define @vp_bitreverse_nxv1i9( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_bitreverse_nxv1i9: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsrl.vi v9, v8, 8, v0.t ; CHECK-NEXT: lui a0, 1 ; CHECK-NEXT: vsll.vi v8, v8, 8, v0.t ; CHECK-NEXT: addi a0, a0, -241 ; CHECK-NEXT: vor.vv v8, v8, v9, v0.t ; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t ; CHECK-NEXT: vand.vx v9, v9, a0, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: lui a0, 3 ; CHECK-NEXT: addi a0, a0, 819 ; CHECK-NEXT: vsll.vi v8, v8, 4, v0.t ; CHECK-NEXT: vor.vv v8, v9, v8, v0.t ; CHECK-NEXT: vsrl.vi v9, v8, 2, v0.t ; CHECK-NEXT: vand.vx v9, v9, a0, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: lui a0, 5 ; CHECK-NEXT: addi a0, a0, 1365 ; CHECK-NEXT: vsll.vi v8, v8, 2, v0.t ; CHECK-NEXT: vor.vv v8, v9, v8, v0.t ; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t ; CHECK-NEXT: vand.vx v9, v9, a0, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t ; CHECK-NEXT: vor.vv v8, v9, v8, v0.t ; CHECK-NEXT: vsrl.vi v8, v8, 7, v0.t ; CHECK-NEXT: ret ; ; CHECK-ZVBB-LABEL: vp_bitreverse_nxv1i9: ; CHECK-ZVBB: # %bb.0: ; CHECK-ZVBB-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-ZVBB-NEXT: vbrev.v v8, v8, v0.t ; CHECK-ZVBB-NEXT: vsrl.vi v8, v8, 7, v0.t ; CHECK-ZVBB-NEXT: ret %v = call @llvm.vp.bitreverse.nxv1i9( %va, %m, i32 %evl) ret %v }