1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ 3; RUN: | FileCheck -check-prefix=RV32I %s 4; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ 5; RUN: | FileCheck -check-prefix=RV64I %s 6 7declare void @notdead(ptr) 8declare ptr @llvm.frameaddress(i32) 9declare ptr @llvm.returnaddress(i32) 10 11define ptr @test_frameaddress_0() nounwind { 12; RV32I-LABEL: test_frameaddress_0: 13; RV32I: # %bb.0: 14; RV32I-NEXT: addi sp, sp, -16 15; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 16; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill 17; RV32I-NEXT: addi s0, sp, 16 18; RV32I-NEXT: mv a0, s0 19; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 20; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload 21; RV32I-NEXT: addi sp, sp, 16 22; RV32I-NEXT: ret 23; 24; RV64I-LABEL: test_frameaddress_0: 25; RV64I: # %bb.0: 26; RV64I-NEXT: addi sp, sp, -16 27; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 28; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill 29; RV64I-NEXT: addi s0, sp, 16 30; RV64I-NEXT: mv a0, s0 31; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 32; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload 33; RV64I-NEXT: addi sp, sp, 16 34; RV64I-NEXT: ret 35 %1 = call ptr @llvm.frameaddress(i32 0) 36 ret ptr %1 37} 38 39define ptr @test_frameaddress_2() nounwind { 40; RV32I-LABEL: test_frameaddress_2: 41; RV32I: # %bb.0: 42; RV32I-NEXT: addi sp, sp, -16 43; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 44; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill 45; RV32I-NEXT: addi s0, sp, 16 46; RV32I-NEXT: lw a0, -8(s0) 47; RV32I-NEXT: lw a0, -8(a0) 48; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 49; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload 50; RV32I-NEXT: addi sp, sp, 16 51; RV32I-NEXT: ret 52; 53; RV64I-LABEL: test_frameaddress_2: 54; RV64I: # %bb.0: 55; RV64I-NEXT: addi sp, sp, -16 56; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 57; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill 58; RV64I-NEXT: addi s0, sp, 16 59; RV64I-NEXT: ld a0, -16(s0) 60; RV64I-NEXT: ld a0, -16(a0) 61; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 62; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload 63; RV64I-NEXT: addi sp, sp, 16 64; RV64I-NEXT: ret 65 %1 = call ptr @llvm.frameaddress(i32 2) 66 ret ptr %1 67} 68 69define ptr @test_frameaddress_3_alloca() nounwind { 70; RV32I-LABEL: test_frameaddress_3_alloca: 71; RV32I: # %bb.0: 72; RV32I-NEXT: addi sp, sp, -112 73; RV32I-NEXT: sw ra, 108(sp) # 4-byte Folded Spill 74; RV32I-NEXT: sw s0, 104(sp) # 4-byte Folded Spill 75; RV32I-NEXT: addi s0, sp, 112 76; RV32I-NEXT: addi a0, s0, -108 77; RV32I-NEXT: call notdead 78; RV32I-NEXT: lw a0, -8(s0) 79; RV32I-NEXT: lw a0, -8(a0) 80; RV32I-NEXT: lw a0, -8(a0) 81; RV32I-NEXT: lw ra, 108(sp) # 4-byte Folded Reload 82; RV32I-NEXT: lw s0, 104(sp) # 4-byte Folded Reload 83; RV32I-NEXT: addi sp, sp, 112 84; RV32I-NEXT: ret 85; 86; RV64I-LABEL: test_frameaddress_3_alloca: 87; RV64I: # %bb.0: 88; RV64I-NEXT: addi sp, sp, -128 89; RV64I-NEXT: sd ra, 120(sp) # 8-byte Folded Spill 90; RV64I-NEXT: sd s0, 112(sp) # 8-byte Folded Spill 91; RV64I-NEXT: addi s0, sp, 128 92; RV64I-NEXT: addi a0, s0, -116 93; RV64I-NEXT: call notdead 94; RV64I-NEXT: ld a0, -16(s0) 95; RV64I-NEXT: ld a0, -16(a0) 96; RV64I-NEXT: ld a0, -16(a0) 97; RV64I-NEXT: ld ra, 120(sp) # 8-byte Folded Reload 98; RV64I-NEXT: ld s0, 112(sp) # 8-byte Folded Reload 99; RV64I-NEXT: addi sp, sp, 128 100; RV64I-NEXT: ret 101 %1 = alloca [100 x i8] 102 call void @notdead(ptr %1) 103 %2 = call ptr @llvm.frameaddress(i32 3) 104 ret ptr %2 105} 106 107define ptr @test_returnaddress_0() nounwind { 108; RV32I-LABEL: test_returnaddress_0: 109; RV32I: # %bb.0: 110; RV32I-NEXT: mv a0, ra 111; RV32I-NEXT: ret 112; 113; RV64I-LABEL: test_returnaddress_0: 114; RV64I: # %bb.0: 115; RV64I-NEXT: mv a0, ra 116; RV64I-NEXT: ret 117 %1 = call ptr @llvm.returnaddress(i32 0) 118 ret ptr %1 119} 120 121define ptr @test_returnaddress_2() nounwind { 122; RV32I-LABEL: test_returnaddress_2: 123; RV32I: # %bb.0: 124; RV32I-NEXT: addi sp, sp, -16 125; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 126; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill 127; RV32I-NEXT: addi s0, sp, 16 128; RV32I-NEXT: lw a0, -8(s0) 129; RV32I-NEXT: lw a0, -8(a0) 130; RV32I-NEXT: lw a0, -4(a0) 131; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 132; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload 133; RV32I-NEXT: addi sp, sp, 16 134; RV32I-NEXT: ret 135; 136; RV64I-LABEL: test_returnaddress_2: 137; RV64I: # %bb.0: 138; RV64I-NEXT: addi sp, sp, -16 139; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 140; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill 141; RV64I-NEXT: addi s0, sp, 16 142; RV64I-NEXT: ld a0, -16(s0) 143; RV64I-NEXT: ld a0, -16(a0) 144; RV64I-NEXT: ld a0, -8(a0) 145; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 146; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload 147; RV64I-NEXT: addi sp, sp, 16 148; RV64I-NEXT: ret 149 %1 = call ptr @llvm.returnaddress(i32 2) 150 ret ptr %1 151} 152