1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=riscv64 -mattr=+m,+v -O2 < %s \ 3; RUN: | FileCheck %s -check-prefix=RV64IV 4 5define <vscale x 1 x i64> @access_fixed_object(i64 *%val) { 6; RV64IV-LABEL: access_fixed_object: 7; RV64IV: # %bb.0: 8; RV64IV-NEXT: addi sp, sp, -528 9; RV64IV-NEXT: .cfi_def_cfa_offset 528 10; RV64IV-NEXT: addi a1, sp, 8 11; RV64IV-NEXT: vl1re64.v v8, (a1) 12; RV64IV-NEXT: ld a1, 520(sp) 13; RV64IV-NEXT: sd a1, 0(a0) 14; RV64IV-NEXT: addi sp, sp, 528 15; RV64IV-NEXT: ret 16 %local = alloca i64 17 %array = alloca [64 x i64] 18 %vptr = bitcast [64 x i64]* %array to <vscale x 1 x i64>* 19 %v = load <vscale x 1 x i64>, <vscale x 1 x i64>* %vptr 20 %len = load i64, i64* %local 21 store i64 %len, i64* %val 22 ret <vscale x 1 x i64> %v 23} 24 25declare <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64( 26 <vscale x 1 x i64>, 27 <vscale x 1 x i64>, 28 <vscale x 1 x i64>, 29 i64); 30 31define <vscale x 1 x i64> @access_fixed_and_vector_objects(i64 *%val) { 32; RV64IV-LABEL: access_fixed_and_vector_objects: 33; RV64IV: # %bb.0: 34; RV64IV-NEXT: addi sp, sp, -544 35; RV64IV-NEXT: .cfi_def_cfa_offset 544 36; RV64IV-NEXT: csrr a0, vlenb 37; RV64IV-NEXT: sub sp, sp, a0 38; RV64IV-NEXT: addi a0, sp, 24 39; RV64IV-NEXT: vl1re64.v v8, (a0) 40; RV64IV-NEXT: ld a0, 536(sp) 41; RV64IV-NEXT: addi a1, sp, 544 42; RV64IV-NEXT: vl1re64.v v9, (a1) 43; RV64IV-NEXT: vsetvli zero, a0, e64, m1, ta, mu 44; RV64IV-NEXT: vadd.vv v8, v8, v9 45; RV64IV-NEXT: csrr a0, vlenb 46; RV64IV-NEXT: add sp, sp, a0 47; RV64IV-NEXT: addi sp, sp, 544 48; RV64IV-NEXT: ret 49 %local = alloca i64 50 %vector = alloca <vscale x 1 x i64> 51 %array = alloca [64 x i64] 52 %vptr = bitcast [64 x i64]* %array to <vscale x 1 x i64>* 53 %v1 = load <vscale x 1 x i64>, <vscale x 1 x i64>* %vptr 54 %v2 = load <vscale x 1 x i64>, <vscale x 1 x i64>* %vector 55 %len = load i64, i64* %local 56 57 %a = call <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64( 58 <vscale x 1 x i64> undef, 59 <vscale x 1 x i64> %v1, 60 <vscale x 1 x i64> %v2, 61 i64 %len) 62 63 ret <vscale x 1 x i64> %a 64} 65