1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs | FileCheck %s 3; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs | FileCheck %s 4 5declare <vscale x 1 x i1> @llvm.riscv.vmset.nxv1i1(iXLen); 6 7declare <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8( 8 <vscale x 1 x i8>, 9 <vscale x 1 x i8>, 10 <vscale x 1 x i8>, 11 <vscale x 1 x i1>, 12 iXLen, iXLen); 13 14; Use unmasked instruction because the mask operand is allone mask 15define <vscale x 1 x i8> @test0(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind { 16; CHECK-LABEL: test0: 17; CHECK: # %bb.0: # %entry 18; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma 19; CHECK-NEXT: vadd.vv v8, v8, v9 20; CHECK-NEXT: ret 21entry: 22 %allone = call <vscale x 1 x i1> @llvm.riscv.vmset.nxv1i1( 23 iXLen %2); 24 %a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8( 25 <vscale x 1 x i8> undef, 26 <vscale x 1 x i8> %0, 27 <vscale x 1 x i8> %1, 28 <vscale x 1 x i1> %allone, 29 iXLen %2, iXLen 1) 30 31 ret <vscale x 1 x i8> %a 32} 33 34; Use an unmasked TAIL_AGNOSTIC instruction if the tie operand is IMPLICIT_DEF 35define <vscale x 1 x i8> @test1(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind { 36; CHECK-LABEL: test1: 37; CHECK: # %bb.0: # %entry 38; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma 39; CHECK-NEXT: vadd.vv v8, v8, v9 40; CHECK-NEXT: ret 41entry: 42 %allone = call <vscale x 1 x i1> @llvm.riscv.vmset.nxv1i1( 43 iXLen %2); 44 %a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8( 45 <vscale x 1 x i8> undef, 46 <vscale x 1 x i8> %0, 47 <vscale x 1 x i8> %1, 48 <vscale x 1 x i1> %allone, 49 iXLen %2, iXLen 0) 50 51 ret <vscale x 1 x i8> %a 52} 53 54; Use an unmasked TU instruction because of the policy operand 55define <vscale x 1 x i8> @test2(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind { 56; CHECK-LABEL: test2: 57; CHECK: # %bb.0: # %entry 58; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma 59; CHECK-NEXT: vadd.vv v8, v9, v10 60; CHECK-NEXT: ret 61entry: 62 %allone = call <vscale x 1 x i1> @llvm.riscv.vmset.nxv1i1( 63 iXLen %3); 64 %a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8( 65 <vscale x 1 x i8> %0, 66 <vscale x 1 x i8> %1, 67 <vscale x 1 x i8> %2, 68 <vscale x 1 x i1> %allone, 69 iXLen %3, iXLen 0) 70 71 ret <vscale x 1 x i8> %a 72} 73 74; Merge operand is dropped because of the policy operand 75define <vscale x 1 x i8> @test3(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind { 76; CHECK-LABEL: test3: 77; CHECK: # %bb.0: # %entry 78; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma 79; CHECK-NEXT: vadd.vv v8, v9, v10 80; CHECK-NEXT: ret 81entry: 82 %allone = call <vscale x 1 x i1> @llvm.riscv.vmset.nxv1i1( 83 iXLen %3); 84 %a = call <vscale x 1 x i8> @llvm.riscv.vadd.mask.nxv1i8.nxv1i8( 85 <vscale x 1 x i8> %0, 86 <vscale x 1 x i8> %1, 87 <vscale x 1 x i8> %2, 88 <vscale x 1 x i1> %allone, 89 iXLen %3, iXLen 1) 90 91 ret <vscale x 1 x i8> %a 92} 93