1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ 3; RUN: | FileCheck %s --check-prefix=RV32 4; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ 5; RUN: | FileCheck %s --check-prefix=RV64 6 7define void @f1() shadowcallstack { 8; RV32-LABEL: f1: 9; RV32: # %bb.0: 10; RV32-NEXT: ret 11; 12; RV64-LABEL: f1: 13; RV64: # %bb.0: 14; RV64-NEXT: ret 15 ret void 16} 17 18declare void @foo() 19 20define void @f2() shadowcallstack { 21; RV32-LABEL: f2: 22; RV32: # %bb.0: 23; RV32-NEXT: tail foo 24; 25; RV64-LABEL: f2: 26; RV64: # %bb.0: 27; RV64-NEXT: tail foo 28 tail call void @foo() 29 ret void 30} 31 32declare i32 @bar() 33 34define i32 @f3() shadowcallstack { 35; RV32-LABEL: f3: 36; RV32: # %bb.0: 37; RV32-NEXT: addi gp, gp, 4 38; RV32-NEXT: sw ra, -4(gp) 39; RV32-NEXT: .cfi_escape 0x16, 0x03, 0x02, 0x73, 0x7c # 40; RV32-NEXT: addi sp, sp, -16 41; RV32-NEXT: .cfi_def_cfa_offset 16 42; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 43; RV32-NEXT: .cfi_offset ra, -4 44; RV32-NEXT: call bar 45; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 46; RV32-NEXT: addi sp, sp, 16 47; RV32-NEXT: lw ra, -4(gp) 48; RV32-NEXT: addi gp, gp, -4 49; RV32-NEXT: .cfi_restore gp 50; RV32-NEXT: ret 51; 52; RV64-LABEL: f3: 53; RV64: # %bb.0: 54; RV64-NEXT: addi gp, gp, 8 55; RV64-NEXT: sd ra, -8(gp) 56; RV64-NEXT: .cfi_escape 0x16, 0x03, 0x02, 0x73, 0x78 # 57; RV64-NEXT: addi sp, sp, -16 58; RV64-NEXT: .cfi_def_cfa_offset 16 59; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 60; RV64-NEXT: .cfi_offset ra, -8 61; RV64-NEXT: call bar 62; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 63; RV64-NEXT: addi sp, sp, 16 64; RV64-NEXT: ld ra, -8(gp) 65; RV64-NEXT: addi gp, gp, -8 66; RV64-NEXT: .cfi_restore gp 67; RV64-NEXT: ret 68 %res = call i32 @bar() 69 %res1 = add i32 %res, 1 70 ret i32 %res 71} 72 73define i32 @f4() shadowcallstack { 74; RV32-LABEL: f4: 75; RV32: # %bb.0: 76; RV32-NEXT: addi gp, gp, 4 77; RV32-NEXT: sw ra, -4(gp) 78; RV32-NEXT: .cfi_escape 0x16, 0x03, 0x02, 0x73, 0x7c # 79; RV32-NEXT: addi sp, sp, -16 80; RV32-NEXT: .cfi_def_cfa_offset 16 81; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 82; RV32-NEXT: sw s0, 8(sp) # 4-byte Folded Spill 83; RV32-NEXT: sw s1, 4(sp) # 4-byte Folded Spill 84; RV32-NEXT: sw s2, 0(sp) # 4-byte Folded Spill 85; RV32-NEXT: .cfi_offset ra, -4 86; RV32-NEXT: .cfi_offset s0, -8 87; RV32-NEXT: .cfi_offset s1, -12 88; RV32-NEXT: .cfi_offset s2, -16 89; RV32-NEXT: call bar 90; RV32-NEXT: mv s0, a0 91; RV32-NEXT: call bar 92; RV32-NEXT: mv s1, a0 93; RV32-NEXT: call bar 94; RV32-NEXT: mv s2, a0 95; RV32-NEXT: call bar 96; RV32-NEXT: add s0, s0, s1 97; RV32-NEXT: add a0, s2, a0 98; RV32-NEXT: add a0, s0, a0 99; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 100; RV32-NEXT: lw s0, 8(sp) # 4-byte Folded Reload 101; RV32-NEXT: lw s1, 4(sp) # 4-byte Folded Reload 102; RV32-NEXT: lw s2, 0(sp) # 4-byte Folded Reload 103; RV32-NEXT: addi sp, sp, 16 104; RV32-NEXT: lw ra, -4(gp) 105; RV32-NEXT: addi gp, gp, -4 106; RV32-NEXT: .cfi_restore gp 107; RV32-NEXT: ret 108; 109; RV64-LABEL: f4: 110; RV64: # %bb.0: 111; RV64-NEXT: addi gp, gp, 8 112; RV64-NEXT: sd ra, -8(gp) 113; RV64-NEXT: .cfi_escape 0x16, 0x03, 0x02, 0x73, 0x78 # 114; RV64-NEXT: addi sp, sp, -32 115; RV64-NEXT: .cfi_def_cfa_offset 32 116; RV64-NEXT: sd ra, 24(sp) # 8-byte Folded Spill 117; RV64-NEXT: sd s0, 16(sp) # 8-byte Folded Spill 118; RV64-NEXT: sd s1, 8(sp) # 8-byte Folded Spill 119; RV64-NEXT: sd s2, 0(sp) # 8-byte Folded Spill 120; RV64-NEXT: .cfi_offset ra, -8 121; RV64-NEXT: .cfi_offset s0, -16 122; RV64-NEXT: .cfi_offset s1, -24 123; RV64-NEXT: .cfi_offset s2, -32 124; RV64-NEXT: call bar 125; RV64-NEXT: mv s0, a0 126; RV64-NEXT: call bar 127; RV64-NEXT: mv s1, a0 128; RV64-NEXT: call bar 129; RV64-NEXT: mv s2, a0 130; RV64-NEXT: call bar 131; RV64-NEXT: add s0, s0, s1 132; RV64-NEXT: add a0, s2, a0 133; RV64-NEXT: addw a0, s0, a0 134; RV64-NEXT: ld ra, 24(sp) # 8-byte Folded Reload 135; RV64-NEXT: ld s0, 16(sp) # 8-byte Folded Reload 136; RV64-NEXT: ld s1, 8(sp) # 8-byte Folded Reload 137; RV64-NEXT: ld s2, 0(sp) # 8-byte Folded Reload 138; RV64-NEXT: addi sp, sp, 32 139; RV64-NEXT: ld ra, -8(gp) 140; RV64-NEXT: addi gp, gp, -8 141; RV64-NEXT: .cfi_restore gp 142; RV64-NEXT: ret 143 %res1 = call i32 @bar() 144 %res2 = call i32 @bar() 145 %res3 = call i32 @bar() 146 %res4 = call i32 @bar() 147 %res12 = add i32 %res1, %res2 148 %res34 = add i32 %res3, %res4 149 %res1234 = add i32 %res12, %res34 150 ret i32 %res1234 151} 152 153define i32 @f5() shadowcallstack nounwind { 154; RV32-LABEL: f5: 155; RV32: # %bb.0: 156; RV32-NEXT: addi gp, gp, 4 157; RV32-NEXT: sw ra, -4(gp) 158; RV32-NEXT: addi sp, sp, -16 159; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 160; RV32-NEXT: call bar 161; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 162; RV32-NEXT: addi sp, sp, 16 163; RV32-NEXT: lw ra, -4(gp) 164; RV32-NEXT: addi gp, gp, -4 165; RV32-NEXT: ret 166; 167; RV64-LABEL: f5: 168; RV64: # %bb.0: 169; RV64-NEXT: addi gp, gp, 8 170; RV64-NEXT: sd ra, -8(gp) 171; RV64-NEXT: addi sp, sp, -16 172; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 173; RV64-NEXT: call bar 174; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 175; RV64-NEXT: addi sp, sp, 16 176; RV64-NEXT: ld ra, -8(gp) 177; RV64-NEXT: addi gp, gp, -8 178; RV64-NEXT: ret 179 %res = call i32 @bar() 180 %res1 = add i32 %res, 1 181 ret i32 %res 182} 183