1; REQUIRES: asserts 2; RUN: llc < %s -O3 -mattr=+v -debug -lsr-drop-solution 2>&1 | FileCheck --check-prefix=DEBUG %s 3; RUN: llc < %s -O3 -mattr=+v -debug 2>&1 | FileCheck --check-prefix=DEBUG2 %s 4 5target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n64-S128" 6target triple = "riscv64-unknown-linux-gnu" 7 8define ptr @foo(ptr %a0, ptr %a1, i64 %a2) { 9;DEBUG: The baseline solution requires 2 instructions 4 regs, with addrec cost 2, plus 3 setup cost 10;DEBUG: The chosen solution requires 3 instructions 6 regs, with addrec cost 1, plus 2 base adds, plus 5 setup cost 11;DEBUG: Baseline is more profitable than chosen solution, dropping LSR solution. 12 13;DEBUG2: Baseline is more profitable than chosen solution, add option 'lsr-drop-solution' to drop LSR solution. 14entry: 15 %0 = ptrtoint ptr %a0 to i64 16 %1 = tail call i64 @llvm.riscv.vsetvli.i64(i64 %a2, i64 0, i64 3) 17 %cmp.not = icmp eq i64 %1, %a2 18 br i1 %cmp.not, label %if.end, label %if.then 19 20if.then: ; preds = %entry 21 %add = add i64 %0, %a2 22 %sub = sub i64 %add, %1 23 br label %do.body 24 25do.body: ; preds = %do.body, %if.then 26 %a3.0 = phi i64 [ %0, %if.then ], [ %add1, %do.body ] 27 %a1.addr.0 = phi ptr [ %a1, %if.then ], [ %add.ptr, %do.body ] 28 %2 = tail call <vscale x 64 x i8> @llvm.riscv.vle.nxv64i8.i64(<vscale x 64 x i8> undef, ptr %a1.addr.0, i64 %1) 29 %3 = inttoptr i64 %a3.0 to ptr 30 tail call void @llvm.riscv.vse.nxv64i8.i64(<vscale x 64 x i8> %2, ptr %3, i64 %1) 31 %add1 = add i64 %a3.0, %1 32 %add.ptr = getelementptr i8, ptr %a1.addr.0, i64 %1 33 %cmp2 = icmp ugt i64 %sub, %add1 34 br i1 %cmp2, label %do.body, label %do.end 35 36do.end: ; preds = %do.body 37 %sub4 = sub i64 %add, %add1 38 %4 = tail call i64 @llvm.riscv.vsetvli.i64(i64 %sub4, i64 0, i64 3) 39 br label %if.end 40 41if.end: ; preds = %do.end, %entry 42 %a3.1 = phi i64 [ %add1, %do.end ], [ %0, %entry ] 43 %t0.0 = phi i64 [ %4, %do.end ], [ %a2, %entry ] 44 %a1.addr.1 = phi ptr [ %add.ptr, %do.end ], [ %a1, %entry ] 45 %5 = tail call <vscale x 64 x i8> @llvm.riscv.vle.nxv64i8.i64(<vscale x 64 x i8> undef, ptr %a1.addr.1, i64 %t0.0) 46 %6 = inttoptr i64 %a3.1 to ptr 47 tail call void @llvm.riscv.vse.nxv64i8.i64(<vscale x 64 x i8> %5, ptr %6, i64 %t0.0) 48 ret ptr %a0 49} 50 51declare i64 @llvm.riscv.vsetvli.i64(i64, i64 immarg, i64 immarg) 52 53declare <vscale x 64 x i8> @llvm.riscv.vle.nxv64i8.i64(<vscale x 64 x i8>, ptr nocapture, i64) 54 55declare void @llvm.riscv.vse.nxv64i8.i64(<vscale x 64 x i8>, ptr nocapture, i64) 56