1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ 3; RUN: | FileCheck -check-prefix=RV64I %s 4 5; The test cases check that we use the si versions of the conversions from 6; double. 7 8declare i32 @llvm.experimental.constrained.fptoui.i32.f32(float, metadata) 9declare i32 @llvm.experimental.constrained.fptosi.i32.f32(float, metadata) 10 11define i32 @strict_fp32_to_ui32(float %a) nounwind strictfp { 12; RV64I-LABEL: strict_fp32_to_ui32: 13; RV64I: # %bb.0: # %entry 14; RV64I-NEXT: addi sp, sp, -16 15; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 16; RV64I-NEXT: call __fixunssfsi 17; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 18; RV64I-NEXT: addi sp, sp, 16 19; RV64I-NEXT: ret 20entry: 21 %conv = tail call i32 @llvm.experimental.constrained.fptoui.i32.f32(float %a, metadata !"fpexcept.strict") 22 ret i32 %conv 23} 24 25define i32 @strict_fp32_to_si32(float %a) nounwind strictfp { 26; RV64I-LABEL: strict_fp32_to_si32: 27; RV64I: # %bb.0: # %entry 28; RV64I-NEXT: addi sp, sp, -16 29; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 30; RV64I-NEXT: call __fixsfsi 31; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 32; RV64I-NEXT: addi sp, sp, 16 33; RV64I-NEXT: ret 34entry: 35 %conv = tail call i32 @llvm.experimental.constrained.fptosi.i32.f32(float %a, metadata !"fpexcept.strict") 36 ret i32 %conv 37} 38