1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ 3; RUN: | FileCheck -check-prefix=RV32I-FPELIM %s 4; RUN: llc -mtriple=riscv32 -verify-machineinstrs -frame-pointer=all < %s \ 5; RUN: | FileCheck -check-prefix=RV32I-WITHFP %s 6 7; TODO: the quality of the generated code is poor 8 9define void @test() { 10; RV32I-FPELIM-LABEL: test: 11; RV32I-FPELIM: # %bb.0: 12; RV32I-FPELIM-NEXT: lui a0, 74565 13; RV32I-FPELIM-NEXT: addi a0, a0, 1664 14; RV32I-FPELIM-NEXT: sub sp, sp, a0 15; RV32I-FPELIM-NEXT: .cfi_def_cfa_offset 305419904 16; RV32I-FPELIM-NEXT: lui a0, 74565 17; RV32I-FPELIM-NEXT: addi a0, a0, 1664 18; RV32I-FPELIM-NEXT: add sp, sp, a0 19; RV32I-FPELIM-NEXT: .cfi_def_cfa_offset 0 20; RV32I-FPELIM-NEXT: ret 21; 22; RV32I-WITHFP-LABEL: test: 23; RV32I-WITHFP: # %bb.0: 24; RV32I-WITHFP-NEXT: addi sp, sp, -2032 25; RV32I-WITHFP-NEXT: .cfi_def_cfa_offset 2032 26; RV32I-WITHFP-NEXT: sw ra, 2028(sp) # 4-byte Folded Spill 27; RV32I-WITHFP-NEXT: sw s0, 2024(sp) # 4-byte Folded Spill 28; RV32I-WITHFP-NEXT: .cfi_offset ra, -4 29; RV32I-WITHFP-NEXT: .cfi_offset s0, -8 30; RV32I-WITHFP-NEXT: addi s0, sp, 2032 31; RV32I-WITHFP-NEXT: .cfi_def_cfa s0, 0 32; RV32I-WITHFP-NEXT: lui a0, 74565 33; RV32I-WITHFP-NEXT: addi a0, a0, -352 34; RV32I-WITHFP-NEXT: sub sp, sp, a0 35; RV32I-WITHFP-NEXT: lui a0, 74565 36; RV32I-WITHFP-NEXT: addi a0, a0, -352 37; RV32I-WITHFP-NEXT: add sp, sp, a0 38; RV32I-WITHFP-NEXT: .cfi_def_cfa sp, 2032 39; RV32I-WITHFP-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload 40; RV32I-WITHFP-NEXT: lw s0, 2024(sp) # 4-byte Folded Reload 41; RV32I-WITHFP-NEXT: .cfi_restore ra 42; RV32I-WITHFP-NEXT: .cfi_restore s0 43; RV32I-WITHFP-NEXT: addi sp, sp, 2032 44; RV32I-WITHFP-NEXT: .cfi_def_cfa_offset 0 45; RV32I-WITHFP-NEXT: ret 46 %tmp = alloca [ 305419896 x i8 ] , align 4 47 ret void 48} 49 50; This test case artificially produces register pressure which should force 51; use of the emergency spill slot. 52 53define void @test_emergency_spill_slot(i32 %a) { 54; RV32I-FPELIM-LABEL: test_emergency_spill_slot: 55; RV32I-FPELIM: # %bb.0: 56; RV32I-FPELIM-NEXT: addi sp, sp, -2032 57; RV32I-FPELIM-NEXT: .cfi_def_cfa_offset 2032 58; RV32I-FPELIM-NEXT: sw s0, 2028(sp) # 4-byte Folded Spill 59; RV32I-FPELIM-NEXT: sw s1, 2024(sp) # 4-byte Folded Spill 60; RV32I-FPELIM-NEXT: .cfi_offset s0, -4 61; RV32I-FPELIM-NEXT: .cfi_offset s1, -8 62; RV32I-FPELIM-NEXT: lui a1, 97 63; RV32I-FPELIM-NEXT: addi a1, a1, 672 64; RV32I-FPELIM-NEXT: sub sp, sp, a1 65; RV32I-FPELIM-NEXT: .cfi_def_cfa_offset 400016 66; RV32I-FPELIM-NEXT: lui a1, 78 67; RV32I-FPELIM-NEXT: addi a2, sp, 8 68; RV32I-FPELIM-NEXT: add a1, a2, a1 69; RV32I-FPELIM-NEXT: #APP 70; RV32I-FPELIM-NEXT: nop 71; RV32I-FPELIM-NEXT: #NO_APP 72; RV32I-FPELIM-NEXT: sw a0, 512(a1) 73; RV32I-FPELIM-NEXT: #APP 74; RV32I-FPELIM-NEXT: nop 75; RV32I-FPELIM-NEXT: #NO_APP 76; RV32I-FPELIM-NEXT: lui a0, 97 77; RV32I-FPELIM-NEXT: addi a0, a0, 672 78; RV32I-FPELIM-NEXT: add sp, sp, a0 79; RV32I-FPELIM-NEXT: .cfi_def_cfa_offset 2032 80; RV32I-FPELIM-NEXT: lw s0, 2028(sp) # 4-byte Folded Reload 81; RV32I-FPELIM-NEXT: lw s1, 2024(sp) # 4-byte Folded Reload 82; RV32I-FPELIM-NEXT: .cfi_restore s0 83; RV32I-FPELIM-NEXT: .cfi_restore s1 84; RV32I-FPELIM-NEXT: addi sp, sp, 2032 85; RV32I-FPELIM-NEXT: .cfi_def_cfa_offset 0 86; RV32I-FPELIM-NEXT: ret 87; 88; RV32I-WITHFP-LABEL: test_emergency_spill_slot: 89; RV32I-WITHFP: # %bb.0: 90; RV32I-WITHFP-NEXT: addi sp, sp, -2032 91; RV32I-WITHFP-NEXT: .cfi_def_cfa_offset 2032 92; RV32I-WITHFP-NEXT: sw ra, 2028(sp) # 4-byte Folded Spill 93; RV32I-WITHFP-NEXT: sw s0, 2024(sp) # 4-byte Folded Spill 94; RV32I-WITHFP-NEXT: sw s1, 2020(sp) # 4-byte Folded Spill 95; RV32I-WITHFP-NEXT: sw s2, 2016(sp) # 4-byte Folded Spill 96; RV32I-WITHFP-NEXT: .cfi_offset ra, -4 97; RV32I-WITHFP-NEXT: .cfi_offset s0, -8 98; RV32I-WITHFP-NEXT: .cfi_offset s1, -12 99; RV32I-WITHFP-NEXT: .cfi_offset s2, -16 100; RV32I-WITHFP-NEXT: addi s0, sp, 2032 101; RV32I-WITHFP-NEXT: .cfi_def_cfa s0, 0 102; RV32I-WITHFP-NEXT: lui a1, 97 103; RV32I-WITHFP-NEXT: addi a1, a1, 688 104; RV32I-WITHFP-NEXT: sub sp, sp, a1 105; RV32I-WITHFP-NEXT: lui a1, 78 106; RV32I-WITHFP-NEXT: lui a2, 98 107; RV32I-WITHFP-NEXT: addi a2, a2, -1388 108; RV32I-WITHFP-NEXT: sub a2, s0, a2 109; RV32I-WITHFP-NEXT: add a1, a2, a1 110; RV32I-WITHFP-NEXT: #APP 111; RV32I-WITHFP-NEXT: nop 112; RV32I-WITHFP-NEXT: #NO_APP 113; RV32I-WITHFP-NEXT: sw a0, 512(a1) 114; RV32I-WITHFP-NEXT: #APP 115; RV32I-WITHFP-NEXT: nop 116; RV32I-WITHFP-NEXT: #NO_APP 117; RV32I-WITHFP-NEXT: lui a0, 97 118; RV32I-WITHFP-NEXT: addi a0, a0, 688 119; RV32I-WITHFP-NEXT: add sp, sp, a0 120; RV32I-WITHFP-NEXT: .cfi_def_cfa sp, 2032 121; RV32I-WITHFP-NEXT: lw ra, 2028(sp) # 4-byte Folded Reload 122; RV32I-WITHFP-NEXT: lw s0, 2024(sp) # 4-byte Folded Reload 123; RV32I-WITHFP-NEXT: lw s1, 2020(sp) # 4-byte Folded Reload 124; RV32I-WITHFP-NEXT: lw s2, 2016(sp) # 4-byte Folded Reload 125; RV32I-WITHFP-NEXT: .cfi_restore ra 126; RV32I-WITHFP-NEXT: .cfi_restore s0 127; RV32I-WITHFP-NEXT: .cfi_restore s1 128; RV32I-WITHFP-NEXT: .cfi_restore s2 129; RV32I-WITHFP-NEXT: addi sp, sp, 2032 130; RV32I-WITHFP-NEXT: .cfi_def_cfa_offset 0 131; RV32I-WITHFP-NEXT: ret 132 %data = alloca [ 100000 x i32 ] , align 4 133 %ptr = getelementptr inbounds [100000 x i32], ptr %data, i32 0, i32 80000 134 %1 = tail call { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } asm sideeffect "nop", "=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r,=r"() 135 %asmresult0 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %1, 0 136 %asmresult1 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %1, 1 137 %asmresult2 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %1, 2 138 %asmresult3 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %1, 3 139 %asmresult4 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %1, 4 140 %asmresult5 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %1, 5 141 %asmresult6 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %1, 6 142 %asmresult7 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %1, 7 143 %asmresult8 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %1, 8 144 %asmresult9 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %1, 9 145 %asmresult10 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %1, 10 146 %asmresult11 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %1, 11 147 %asmresult12 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %1, 12 148 %asmresult13 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %1, 13 149 %asmresult14 = extractvalue { i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32, i32 } %1, 14 150 store volatile i32 %a, ptr %ptr 151 tail call void asm sideeffect "nop", "r,r,r,r,r,r,r,r,r,r,r,r,r,r,r"(i32 %asmresult0, i32 %asmresult1, i32 %asmresult2, i32 %asmresult3, i32 %asmresult4, i32 %asmresult5, i32 %asmresult6, i32 %asmresult7, i32 %asmresult8, i32 %asmresult9, i32 %asmresult10, i32 %asmresult11, i32 %asmresult12, i32 %asmresult13, i32 %asmresult14) 152 ret void 153} 154