; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+m,+v -O2 < %s \ ; RUN: | FileCheck %s -check-prefix=RV64IV define @access_fixed_object(ptr %val) { ; RV64IV-LABEL: access_fixed_object: ; RV64IV: # %bb.0: ; RV64IV-NEXT: addi sp, sp, -528 ; RV64IV-NEXT: .cfi_def_cfa_offset 528 ; RV64IV-NEXT: addi a1, sp, 8 ; RV64IV-NEXT: vl1re64.v v8, (a1) ; RV64IV-NEXT: ld a1, 520(sp) ; RV64IV-NEXT: sd a1, 0(a0) ; RV64IV-NEXT: addi sp, sp, 528 ; RV64IV-NEXT: .cfi_def_cfa_offset 0 ; RV64IV-NEXT: ret %local = alloca i64 %array = alloca [64 x i64] %v = load , ptr %array %len = load i64, ptr %local store i64 %len, ptr %val ret %v } declare @llvm.riscv.vadd.nxv1i64.nxv1i64( , , , i64); define @access_fixed_and_vector_objects(ptr %val) { ; RV64IV-LABEL: access_fixed_and_vector_objects: ; RV64IV: # %bb.0: ; RV64IV-NEXT: addi sp, sp, -528 ; RV64IV-NEXT: .cfi_def_cfa_offset 528 ; RV64IV-NEXT: csrr a0, vlenb ; RV64IV-NEXT: sub sp, sp, a0 ; RV64IV-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0x90, 0x04, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 528 + 1 * vlenb ; RV64IV-NEXT: addi a0, sp, 8 ; RV64IV-NEXT: vl1re64.v v8, (a0) ; RV64IV-NEXT: addi a0, sp, 528 ; RV64IV-NEXT: vl1re64.v v9, (a0) ; RV64IV-NEXT: ld a0, 520(sp) ; RV64IV-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; RV64IV-NEXT: vadd.vv v8, v8, v9 ; RV64IV-NEXT: csrr a0, vlenb ; RV64IV-NEXT: add sp, sp, a0 ; RV64IV-NEXT: .cfi_def_cfa sp, 528 ; RV64IV-NEXT: addi sp, sp, 528 ; RV64IV-NEXT: .cfi_def_cfa_offset 0 ; RV64IV-NEXT: ret %local = alloca i64 %vector = alloca %array = alloca [64 x i64] %v1 = load , ptr %array %v2 = load , ptr %vector %len = load i64, ptr %local %a = call @llvm.riscv.vadd.nxv1i64.nxv1i64( undef, %v1, %v2, i64 %len) ret %a } define @probe_fixed_and_vector_objects(ptr %val, %dummy) "probe-stack"="inline-asm" { ; RV64IV-LABEL: probe_fixed_and_vector_objects: ; RV64IV: # %bb.0: ; RV64IV-NEXT: addi sp, sp, -528 ; RV64IV-NEXT: .cfi_def_cfa_offset 528 ; RV64IV-NEXT: csrr t1, vlenb ; RV64IV-NEXT: .cfi_def_cfa t1, -8 ; RV64IV-NEXT: lui t2, 1 ; RV64IV-NEXT: .LBB2_1: # =>This Inner Loop Header: Depth=1 ; RV64IV-NEXT: sub sp, sp, t2 ; RV64IV-NEXT: sd zero, 0(sp) ; RV64IV-NEXT: sub t1, t1, t2 ; RV64IV-NEXT: bge t1, t2, .LBB2_1 ; RV64IV-NEXT: # %bb.2: ; RV64IV-NEXT: .cfi_def_cfa_register sp ; RV64IV-NEXT: sub sp, sp, t1 ; RV64IV-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0x90, 0x04, 0x22, 0x11, 0x01, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 528 + 1 * vlenb ; RV64IV-NEXT: addi a0, sp, 8 ; RV64IV-NEXT: vl1re64.v v9, (a0) ; RV64IV-NEXT: addi a0, sp, 528 ; RV64IV-NEXT: vl1re64.v v10, (a0) ; RV64IV-NEXT: ld a0, 520(sp) ; RV64IV-NEXT: vsetvli zero, a0, e64, m1, tu, ma ; RV64IV-NEXT: vadd.vv v8, v9, v10 ; RV64IV-NEXT: csrr a0, vlenb ; RV64IV-NEXT: add sp, sp, a0 ; RV64IV-NEXT: .cfi_def_cfa sp, 528 ; RV64IV-NEXT: addi sp, sp, 528 ; RV64IV-NEXT: .cfi_def_cfa_offset 0 ; RV64IV-NEXT: ret %local = alloca i64 %vector = alloca %array = alloca [64 x i64] %v1 = load , ptr %array %v2 = load , ptr %vector %len = load i64, ptr %local %a = call @llvm.riscv.vadd.nxv1i64.nxv1i64( %dummy, %v1, %v2, i64 %len) ret %a }