1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=riscv64 -mattr=+m,+v -O2 < %s \ 3; RUN: | FileCheck %s -check-prefix=RV64IV 4 5define <vscale x 1 x i64> @access_fixed_object(ptr %val) { 6; RV64IV-LABEL: access_fixed_object: 7; RV64IV: # %bb.0: 8; RV64IV-NEXT: addi sp, sp, -528 9; RV64IV-NEXT: .cfi_def_cfa_offset 528 10; RV64IV-NEXT: addi a1, sp, 8 11; RV64IV-NEXT: vl1re64.v v8, (a1) 12; RV64IV-NEXT: ld a1, 520(sp) 13; RV64IV-NEXT: sd a1, 0(a0) 14; RV64IV-NEXT: addi sp, sp, 528 15; RV64IV-NEXT: ret 16 %local = alloca i64 17 %array = alloca [64 x i64] 18 %v = load <vscale x 1 x i64>, ptr %array 19 %len = load i64, ptr %local 20 store i64 %len, ptr %val 21 ret <vscale x 1 x i64> %v 22} 23 24declare <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64( 25 <vscale x 1 x i64>, 26 <vscale x 1 x i64>, 27 <vscale x 1 x i64>, 28 i64); 29 30define <vscale x 1 x i64> @access_fixed_and_vector_objects(ptr %val) { 31; RV64IV-LABEL: access_fixed_and_vector_objects: 32; RV64IV: # %bb.0: 33; RV64IV-NEXT: addi sp, sp, -528 34; RV64IV-NEXT: .cfi_def_cfa_offset 528 35; RV64IV-NEXT: csrr a0, vlenb 36; RV64IV-NEXT: slli a0, a0, 1 37; RV64IV-NEXT: sub sp, sp, a0 38; RV64IV-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0x90, 0x04, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 528 + 2 * vlenb 39; RV64IV-NEXT: addi a0, sp, 8 40; RV64IV-NEXT: vl1re64.v v8, (a0) 41; RV64IV-NEXT: addi a0, sp, 528 42; RV64IV-NEXT: ld a1, 520(sp) 43; RV64IV-NEXT: vl1re64.v v9, (a0) 44; RV64IV-NEXT: vsetvli zero, a1, e64, m1, ta, ma 45; RV64IV-NEXT: vadd.vv v8, v8, v9 46; RV64IV-NEXT: csrr a0, vlenb 47; RV64IV-NEXT: slli a0, a0, 1 48; RV64IV-NEXT: add sp, sp, a0 49; RV64IV-NEXT: addi sp, sp, 528 50; RV64IV-NEXT: ret 51 %local = alloca i64 52 %vector = alloca <vscale x 1 x i64> 53 %array = alloca [64 x i64] 54 %v1 = load <vscale x 1 x i64>, ptr %array 55 %v2 = load <vscale x 1 x i64>, ptr %vector 56 %len = load i64, ptr %local 57 58 %a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64( 59 <vscale x 1 x i64> undef, 60 <vscale x 1 x i64> %v1, 61 <vscale x 1 x i64> %v2, 62 i64 %len) 63 64 ret <vscale x 1 x i64> %a 65} 66