1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -O3 -mattr=+v -lsr-drop-solution | FileCheck --check-prefix=CHECK %s 3 4target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n64-S128" 5target triple = "riscv64-unknown-linux-gnu" 6 7define ptr @foo(ptr %a0, ptr %a1, i64 %a2) { 8; CHECK-LABEL: foo: 9; CHECK: # %bb.0: # %entry 10; CHECK-NEXT: vsetvli a4, a2, e8, m8, ta, ma 11; CHECK-NEXT: bne a4, a2, .LBB0_2 12; CHECK-NEXT: # %bb.1: 13; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma 14; CHECK-NEXT: vle8.v v8, (a1) 15; CHECK-NEXT: vse8.v v8, (a0) 16; CHECK-NEXT: ret 17; CHECK-NEXT: .LBB0_2: # %if.then 18; CHECK-NEXT: add a2, a0, a2 19; CHECK-NEXT: sub a5, a2, a4 20; CHECK-NEXT: mv a3, a0 21; CHECK-NEXT: .LBB0_3: # %do.body 22; CHECK-NEXT: # =>This Inner Loop Header: Depth=1 23; CHECK-NEXT: vle8.v v8, (a1) 24; CHECK-NEXT: vse8.v v8, (a3) 25; CHECK-NEXT: add a3, a3, a4 26; CHECK-NEXT: add a1, a1, a4 27; CHECK-NEXT: bltu a3, a5, .LBB0_3 28; CHECK-NEXT: # %bb.4: # %do.end 29; CHECK-NEXT: sub a2, a2, a3 30; CHECK-NEXT: vsetvli a2, a2, e8, m8, ta, ma 31; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma 32; CHECK-NEXT: vle8.v v8, (a1) 33; CHECK-NEXT: vse8.v v8, (a3) 34; CHECK-NEXT: ret 35entry: 36 %0 = ptrtoint ptr %a0 to i64 37 %1 = tail call i64 @llvm.riscv.vsetvli.i64(i64 %a2, i64 0, i64 3) 38 %cmp.not = icmp eq i64 %1, %a2 39 br i1 %cmp.not, label %if.end, label %if.then 40 41if.then: ; preds = %entry 42 %add = add i64 %0, %a2 43 %sub = sub i64 %add, %1 44 br label %do.body 45 46do.body: ; preds = %do.body, %if.then 47 %a3.0 = phi i64 [ %0, %if.then ], [ %add1, %do.body ] 48 %a1.addr.0 = phi ptr [ %a1, %if.then ], [ %add.ptr, %do.body ] 49 %2 = tail call <vscale x 64 x i8> @llvm.riscv.vle.nxv64i8.i64(<vscale x 64 x i8> undef, ptr %a1.addr.0, i64 %1) 50 %3 = inttoptr i64 %a3.0 to ptr 51 tail call void @llvm.riscv.vse.nxv64i8.i64(<vscale x 64 x i8> %2, ptr %3, i64 %1) 52 %add1 = add i64 %a3.0, %1 53 %add.ptr = getelementptr i8, ptr %a1.addr.0, i64 %1 54 %cmp2 = icmp ugt i64 %sub, %add1 55 br i1 %cmp2, label %do.body, label %do.end 56 57do.end: ; preds = %do.body 58 %sub4 = sub i64 %add, %add1 59 %4 = tail call i64 @llvm.riscv.vsetvli.i64(i64 %sub4, i64 0, i64 3) 60 br label %if.end 61 62if.end: ; preds = %do.end, %entry 63 %a3.1 = phi i64 [ %add1, %do.end ], [ %0, %entry ] 64 %t0.0 = phi i64 [ %4, %do.end ], [ %a2, %entry ] 65 %a1.addr.1 = phi ptr [ %add.ptr, %do.end ], [ %a1, %entry ] 66 %5 = tail call <vscale x 64 x i8> @llvm.riscv.vle.nxv64i8.i64(<vscale x 64 x i8> undef, ptr %a1.addr.1, i64 %t0.0) 67 %6 = inttoptr i64 %a3.1 to ptr 68 tail call void @llvm.riscv.vse.nxv64i8.i64(<vscale x 64 x i8> %5, ptr %6, i64 %t0.0) 69 ret ptr %a0 70} 71 72declare i64 @llvm.riscv.vsetvli.i64(i64, i64 immarg, i64 immarg) 73 74declare <vscale x 64 x i8> @llvm.riscv.vle.nxv64i8.i64(<vscale x 64 x i8>, ptr nocapture, i64) 75 76declare void @llvm.riscv.vse.nxv64i8.i64(<vscale x 64 x i8>, ptr nocapture, i64) 77