1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v \ 3; RUN: -verify-machineinstrs | FileCheck %s 4; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \ 5; RUN: -verify-machineinstrs | FileCheck %s 6 7declare <vscale x 1 x i1> @llvm.riscv.vmset.nxv1i1( 8 iXLen); 9 10define <vscale x 1 x i1> @intrinsic_vmset_m_pseudo_nxv1i1(iXLen %0) nounwind { 11; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv1i1: 12; CHECK: # %bb.0: # %entry 13; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma 14; CHECK-NEXT: vmset.m v0 15; CHECK-NEXT: ret 16entry: 17 %a = call <vscale x 1 x i1> @llvm.riscv.vmset.nxv1i1( 18 iXLen %0) 19 20 ret <vscale x 1 x i1> %a 21} 22 23declare <vscale x 2 x i1> @llvm.riscv.vmset.nxv2i1( 24 iXLen); 25 26define <vscale x 2 x i1> @intrinsic_vmset_m_pseudo_nxv2i1(iXLen %0) nounwind { 27; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv2i1: 28; CHECK: # %bb.0: # %entry 29; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma 30; CHECK-NEXT: vmset.m v0 31; CHECK-NEXT: ret 32entry: 33 %a = call <vscale x 2 x i1> @llvm.riscv.vmset.nxv2i1( 34 iXLen %0) 35 36 ret <vscale x 2 x i1> %a 37} 38 39declare <vscale x 4 x i1> @llvm.riscv.vmset.nxv4i1( 40 iXLen); 41 42define <vscale x 4 x i1> @intrinsic_vmset_m_pseudo_nxv4i1(iXLen %0) nounwind { 43; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv4i1: 44; CHECK: # %bb.0: # %entry 45; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma 46; CHECK-NEXT: vmset.m v0 47; CHECK-NEXT: ret 48entry: 49 %a = call <vscale x 4 x i1> @llvm.riscv.vmset.nxv4i1( 50 iXLen %0) 51 52 ret <vscale x 4 x i1> %a 53} 54 55declare <vscale x 8 x i1> @llvm.riscv.vmset.nxv8i1( 56 iXLen); 57 58define <vscale x 8 x i1> @intrinsic_vmset_m_pseudo_nxv8i1(iXLen %0) nounwind { 59; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv8i1: 60; CHECK: # %bb.0: # %entry 61; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma 62; CHECK-NEXT: vmset.m v0 63; CHECK-NEXT: ret 64entry: 65 %a = call <vscale x 8 x i1> @llvm.riscv.vmset.nxv8i1( 66 iXLen %0) 67 68 ret <vscale x 8 x i1> %a 69} 70 71declare <vscale x 16 x i1> @llvm.riscv.vmset.nxv16i1( 72 iXLen); 73 74define <vscale x 16 x i1> @intrinsic_vmset_m_pseudo_nxv16i1(iXLen %0) nounwind { 75; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv16i1: 76; CHECK: # %bb.0: # %entry 77; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma 78; CHECK-NEXT: vmset.m v0 79; CHECK-NEXT: ret 80entry: 81 %a = call <vscale x 16 x i1> @llvm.riscv.vmset.nxv16i1( 82 iXLen %0) 83 84 ret <vscale x 16 x i1> %a 85} 86 87declare <vscale x 32 x i1> @llvm.riscv.vmset.nxv32i1( 88 iXLen); 89 90define <vscale x 32 x i1> @intrinsic_vmset_m_pseudo_nxv32i1(iXLen %0) nounwind { 91; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv32i1: 92; CHECK: # %bb.0: # %entry 93; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma 94; CHECK-NEXT: vmset.m v0 95; CHECK-NEXT: ret 96entry: 97 %a = call <vscale x 32 x i1> @llvm.riscv.vmset.nxv32i1( 98 iXLen %0) 99 100 ret <vscale x 32 x i1> %a 101} 102 103declare <vscale x 64 x i1> @llvm.riscv.vmset.nxv64i1( 104 iXLen); 105 106define <vscale x 64 x i1> @intrinsic_vmset_m_pseudo_nxv64i1(iXLen %0) nounwind { 107; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv64i1: 108; CHECK: # %bb.0: # %entry 109; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma 110; CHECK-NEXT: vmset.m v0 111; CHECK-NEXT: ret 112entry: 113 %a = call <vscale x 64 x i1> @llvm.riscv.vmset.nxv64i1( 114 iXLen %0) 115 116 ret <vscale x 64 x i1> %a 117} 118