1382de908SSanjay Patel; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2382de908SSanjay Patel; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=R32 3382de908SSanjay Patel; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=R64 4382de908SSanjay Patel 5382de908SSanjay Pateldefine float @maxnum_f32(float %x, float %y) nounwind { 6382de908SSanjay Patel; R32-LABEL: maxnum_f32: 7382de908SSanjay Patel; R32: # %bb.0: 8382de908SSanjay Patel; R32-NEXT: addi sp, sp, -16 9382de908SSanjay Patel; R32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 10*eabaee0cSFangrui Song; R32-NEXT: call fmaxf 11382de908SSanjay Patel; R32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 12382de908SSanjay Patel; R32-NEXT: addi sp, sp, 16 13382de908SSanjay Patel; R32-NEXT: ret 14382de908SSanjay Patel; 15382de908SSanjay Patel; R64-LABEL: maxnum_f32: 16382de908SSanjay Patel; R64: # %bb.0: 17382de908SSanjay Patel; R64-NEXT: addi sp, sp, -16 18382de908SSanjay Patel; R64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 19*eabaee0cSFangrui Song; R64-NEXT: call fmaxf 20382de908SSanjay Patel; R64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 21382de908SSanjay Patel; R64-NEXT: addi sp, sp, 16 22382de908SSanjay Patel; R64-NEXT: ret 23382de908SSanjay Patel %r = call float @llvm.maxnum.f32(float %x, float %y) 24382de908SSanjay Patel ret float %r 25382de908SSanjay Patel} 26382de908SSanjay Patel 27382de908SSanjay Pateldefine float @maxnum_f32_fast(float %x, float %y) nounwind { 28382de908SSanjay Patel; R32-LABEL: maxnum_f32_fast: 29382de908SSanjay Patel; R32: # %bb.0: 30382de908SSanjay Patel; R32-NEXT: addi sp, sp, -16 31382de908SSanjay Patel; R32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 32436b875eSSanjay Patel; R32-NEXT: sw s0, 8(sp) # 4-byte Folded Spill 33436b875eSSanjay Patel; R32-NEXT: sw s1, 4(sp) # 4-byte Folded Spill 347b3bbd83SJay Foad; R32-NEXT: mv s1, a1 357b3bbd83SJay Foad; R32-NEXT: mv s0, a0 36*eabaee0cSFangrui Song; R32-NEXT: call __gtsf2 37436b875eSSanjay Patel; R32-NEXT: bgtz a0, .LBB1_2 38436b875eSSanjay Patel; R32-NEXT: # %bb.1: 397b3bbd83SJay Foad; R32-NEXT: mv s0, s1 40436b875eSSanjay Patel; R32-NEXT: .LBB1_2: 417b3bbd83SJay Foad; R32-NEXT: mv a0, s0 42382de908SSanjay Patel; R32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 43436b875eSSanjay Patel; R32-NEXT: lw s0, 8(sp) # 4-byte Folded Reload 44436b875eSSanjay Patel; R32-NEXT: lw s1, 4(sp) # 4-byte Folded Reload 45382de908SSanjay Patel; R32-NEXT: addi sp, sp, 16 46382de908SSanjay Patel; R32-NEXT: ret 47382de908SSanjay Patel; 48382de908SSanjay Patel; R64-LABEL: maxnum_f32_fast: 49382de908SSanjay Patel; R64: # %bb.0: 50436b875eSSanjay Patel; R64-NEXT: addi sp, sp, -32 51436b875eSSanjay Patel; R64-NEXT: sd ra, 24(sp) # 8-byte Folded Spill 52436b875eSSanjay Patel; R64-NEXT: sd s0, 16(sp) # 8-byte Folded Spill 53436b875eSSanjay Patel; R64-NEXT: sd s1, 8(sp) # 8-byte Folded Spill 547b3bbd83SJay Foad; R64-NEXT: mv s1, a1 557b3bbd83SJay Foad; R64-NEXT: mv s0, a0 56*eabaee0cSFangrui Song; R64-NEXT: call __gtsf2 57436b875eSSanjay Patel; R64-NEXT: bgtz a0, .LBB1_2 58436b875eSSanjay Patel; R64-NEXT: # %bb.1: 597b3bbd83SJay Foad; R64-NEXT: mv s0, s1 60436b875eSSanjay Patel; R64-NEXT: .LBB1_2: 617b3bbd83SJay Foad; R64-NEXT: mv a0, s0 62436b875eSSanjay Patel; R64-NEXT: ld ra, 24(sp) # 8-byte Folded Reload 63436b875eSSanjay Patel; R64-NEXT: ld s0, 16(sp) # 8-byte Folded Reload 64436b875eSSanjay Patel; R64-NEXT: ld s1, 8(sp) # 8-byte Folded Reload 65436b875eSSanjay Patel; R64-NEXT: addi sp, sp, 32 66382de908SSanjay Patel; R64-NEXT: ret 67382de908SSanjay Patel %r = call fast float @llvm.maxnum.f32(float %x, float %y) 68382de908SSanjay Patel ret float %r 69382de908SSanjay Patel} 70382de908SSanjay Patel 71382de908SSanjay Pateldefine double @maxnum_f64(double %x, double %y) nounwind { 72382de908SSanjay Patel; R32-LABEL: maxnum_f64: 73382de908SSanjay Patel; R32: # %bb.0: 74382de908SSanjay Patel; R32-NEXT: addi sp, sp, -16 75382de908SSanjay Patel; R32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 76*eabaee0cSFangrui Song; R32-NEXT: call fmax 77382de908SSanjay Patel; R32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 78382de908SSanjay Patel; R32-NEXT: addi sp, sp, 16 79382de908SSanjay Patel; R32-NEXT: ret 80382de908SSanjay Patel; 81382de908SSanjay Patel; R64-LABEL: maxnum_f64: 82382de908SSanjay Patel; R64: # %bb.0: 83382de908SSanjay Patel; R64-NEXT: addi sp, sp, -16 84382de908SSanjay Patel; R64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 85*eabaee0cSFangrui Song; R64-NEXT: call fmax 86382de908SSanjay Patel; R64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 87382de908SSanjay Patel; R64-NEXT: addi sp, sp, 16 88382de908SSanjay Patel; R64-NEXT: ret 89382de908SSanjay Patel %r = call double @llvm.maxnum.f64(double %x, double %y) 90382de908SSanjay Patel ret double %r 91382de908SSanjay Patel} 92382de908SSanjay Patel 93382de908SSanjay Pateldefine double @maxnum_f64_nnan(double %x, double %y) nounwind { 94382de908SSanjay Patel; R32-LABEL: maxnum_f64_nnan: 95382de908SSanjay Patel; R32: # %bb.0: 96436b875eSSanjay Patel; R32-NEXT: addi sp, sp, -32 97436b875eSSanjay Patel; R32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill 98436b875eSSanjay Patel; R32-NEXT: sw s0, 24(sp) # 4-byte Folded Spill 99436b875eSSanjay Patel; R32-NEXT: sw s1, 20(sp) # 4-byte Folded Spill 100436b875eSSanjay Patel; R32-NEXT: sw s2, 16(sp) # 4-byte Folded Spill 101436b875eSSanjay Patel; R32-NEXT: sw s3, 12(sp) # 4-byte Folded Spill 1027b3bbd83SJay Foad; R32-NEXT: mv s1, a3 103436b875eSSanjay Patel; R32-NEXT: mv s2, a2 1047b3bbd83SJay Foad; R32-NEXT: mv s0, a1 105cbdccb30SGuozhi Wei; R32-NEXT: mv s3, a0 106*eabaee0cSFangrui Song; R32-NEXT: call __gtdf2 107cbdccb30SGuozhi Wei; R32-NEXT: mv a1, a0 108cbdccb30SGuozhi Wei; R32-NEXT: mv a0, s3 109cbdccb30SGuozhi Wei; R32-NEXT: bgtz a1, .LBB3_2 110436b875eSSanjay Patel; R32-NEXT: # %bb.1: 111436b875eSSanjay Patel; R32-NEXT: mv s3, s2 112436b875eSSanjay Patel; R32-NEXT: .LBB3_2: 1137b3bbd83SJay Foad; R32-NEXT: mv a1, s0 114436b875eSSanjay Patel; R32-NEXT: mv a2, s2 1157b3bbd83SJay Foad; R32-NEXT: mv a3, s1 116*eabaee0cSFangrui Song; R32-NEXT: call __gtdf2 117436b875eSSanjay Patel; R32-NEXT: bgtz a0, .LBB3_4 118436b875eSSanjay Patel; R32-NEXT: # %bb.3: 1197b3bbd83SJay Foad; R32-NEXT: mv s0, s1 120436b875eSSanjay Patel; R32-NEXT: .LBB3_4: 121436b875eSSanjay Patel; R32-NEXT: mv a0, s3 1227b3bbd83SJay Foad; R32-NEXT: mv a1, s0 123436b875eSSanjay Patel; R32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload 124436b875eSSanjay Patel; R32-NEXT: lw s0, 24(sp) # 4-byte Folded Reload 125436b875eSSanjay Patel; R32-NEXT: lw s1, 20(sp) # 4-byte Folded Reload 126436b875eSSanjay Patel; R32-NEXT: lw s2, 16(sp) # 4-byte Folded Reload 127436b875eSSanjay Patel; R32-NEXT: lw s3, 12(sp) # 4-byte Folded Reload 128436b875eSSanjay Patel; R32-NEXT: addi sp, sp, 32 129382de908SSanjay Patel; R32-NEXT: ret 130382de908SSanjay Patel; 131382de908SSanjay Patel; R64-LABEL: maxnum_f64_nnan: 132382de908SSanjay Patel; R64: # %bb.0: 133436b875eSSanjay Patel; R64-NEXT: addi sp, sp, -32 134436b875eSSanjay Patel; R64-NEXT: sd ra, 24(sp) # 8-byte Folded Spill 135436b875eSSanjay Patel; R64-NEXT: sd s0, 16(sp) # 8-byte Folded Spill 136436b875eSSanjay Patel; R64-NEXT: sd s1, 8(sp) # 8-byte Folded Spill 1377b3bbd83SJay Foad; R64-NEXT: mv s1, a1 1387b3bbd83SJay Foad; R64-NEXT: mv s0, a0 139*eabaee0cSFangrui Song; R64-NEXT: call __gtdf2 140436b875eSSanjay Patel; R64-NEXT: bgtz a0, .LBB3_2 141436b875eSSanjay Patel; R64-NEXT: # %bb.1: 1427b3bbd83SJay Foad; R64-NEXT: mv s0, s1 143436b875eSSanjay Patel; R64-NEXT: .LBB3_2: 1447b3bbd83SJay Foad; R64-NEXT: mv a0, s0 145436b875eSSanjay Patel; R64-NEXT: ld ra, 24(sp) # 8-byte Folded Reload 146436b875eSSanjay Patel; R64-NEXT: ld s0, 16(sp) # 8-byte Folded Reload 147436b875eSSanjay Patel; R64-NEXT: ld s1, 8(sp) # 8-byte Folded Reload 148436b875eSSanjay Patel; R64-NEXT: addi sp, sp, 32 149382de908SSanjay Patel; R64-NEXT: ret 150382de908SSanjay Patel %r = call nnan double @llvm.maxnum.f64(double %x, double %y) 151382de908SSanjay Patel ret double %r 152382de908SSanjay Patel} 153382de908SSanjay Patel 154382de908SSanjay Pateldefine float @minnum_f32(float %x, float %y) nounwind { 155382de908SSanjay Patel; R32-LABEL: minnum_f32: 156382de908SSanjay Patel; R32: # %bb.0: 157382de908SSanjay Patel; R32-NEXT: addi sp, sp, -16 158382de908SSanjay Patel; R32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 159*eabaee0cSFangrui Song; R32-NEXT: call fminf 160382de908SSanjay Patel; R32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 161382de908SSanjay Patel; R32-NEXT: addi sp, sp, 16 162382de908SSanjay Patel; R32-NEXT: ret 163382de908SSanjay Patel; 164382de908SSanjay Patel; R64-LABEL: minnum_f32: 165382de908SSanjay Patel; R64: # %bb.0: 166382de908SSanjay Patel; R64-NEXT: addi sp, sp, -16 167382de908SSanjay Patel; R64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 168*eabaee0cSFangrui Song; R64-NEXT: call fminf 169382de908SSanjay Patel; R64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 170382de908SSanjay Patel; R64-NEXT: addi sp, sp, 16 171382de908SSanjay Patel; R64-NEXT: ret 172382de908SSanjay Patel %r = call float @llvm.minnum.f32(float %x, float %y) 173382de908SSanjay Patel ret float %r 174382de908SSanjay Patel} 175382de908SSanjay Patel 176382de908SSanjay Pateldefine float @minnum_f32_nnan(float %x, float %y) nounwind { 177382de908SSanjay Patel; R32-LABEL: minnum_f32_nnan: 178382de908SSanjay Patel; R32: # %bb.0: 179382de908SSanjay Patel; R32-NEXT: addi sp, sp, -16 180382de908SSanjay Patel; R32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 181436b875eSSanjay Patel; R32-NEXT: sw s0, 8(sp) # 4-byte Folded Spill 182436b875eSSanjay Patel; R32-NEXT: sw s1, 4(sp) # 4-byte Folded Spill 1837b3bbd83SJay Foad; R32-NEXT: mv s1, a1 1847b3bbd83SJay Foad; R32-NEXT: mv s0, a0 185*eabaee0cSFangrui Song; R32-NEXT: call __ltsf2 186436b875eSSanjay Patel; R32-NEXT: bltz a0, .LBB5_2 187436b875eSSanjay Patel; R32-NEXT: # %bb.1: 1887b3bbd83SJay Foad; R32-NEXT: mv s0, s1 189436b875eSSanjay Patel; R32-NEXT: .LBB5_2: 1907b3bbd83SJay Foad; R32-NEXT: mv a0, s0 191382de908SSanjay Patel; R32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 192436b875eSSanjay Patel; R32-NEXT: lw s0, 8(sp) # 4-byte Folded Reload 193436b875eSSanjay Patel; R32-NEXT: lw s1, 4(sp) # 4-byte Folded Reload 194382de908SSanjay Patel; R32-NEXT: addi sp, sp, 16 195382de908SSanjay Patel; R32-NEXT: ret 196382de908SSanjay Patel; 197382de908SSanjay Patel; R64-LABEL: minnum_f32_nnan: 198382de908SSanjay Patel; R64: # %bb.0: 199436b875eSSanjay Patel; R64-NEXT: addi sp, sp, -32 200436b875eSSanjay Patel; R64-NEXT: sd ra, 24(sp) # 8-byte Folded Spill 201436b875eSSanjay Patel; R64-NEXT: sd s0, 16(sp) # 8-byte Folded Spill 202436b875eSSanjay Patel; R64-NEXT: sd s1, 8(sp) # 8-byte Folded Spill 2037b3bbd83SJay Foad; R64-NEXT: mv s1, a1 2047b3bbd83SJay Foad; R64-NEXT: mv s0, a0 205*eabaee0cSFangrui Song; R64-NEXT: call __ltsf2 206436b875eSSanjay Patel; R64-NEXT: bltz a0, .LBB5_2 207436b875eSSanjay Patel; R64-NEXT: # %bb.1: 2087b3bbd83SJay Foad; R64-NEXT: mv s0, s1 209436b875eSSanjay Patel; R64-NEXT: .LBB5_2: 2107b3bbd83SJay Foad; R64-NEXT: mv a0, s0 211436b875eSSanjay Patel; R64-NEXT: ld ra, 24(sp) # 8-byte Folded Reload 212436b875eSSanjay Patel; R64-NEXT: ld s0, 16(sp) # 8-byte Folded Reload 213436b875eSSanjay Patel; R64-NEXT: ld s1, 8(sp) # 8-byte Folded Reload 214436b875eSSanjay Patel; R64-NEXT: addi sp, sp, 32 215382de908SSanjay Patel; R64-NEXT: ret 216382de908SSanjay Patel %r = call nnan float @llvm.minnum.f32(float %x, float %y) 217382de908SSanjay Patel ret float %r 218382de908SSanjay Patel} 219382de908SSanjay Patel 220382de908SSanjay Pateldefine double @minnum_f64(double %x, double %y) nounwind { 221382de908SSanjay Patel; R32-LABEL: minnum_f64: 222382de908SSanjay Patel; R32: # %bb.0: 223382de908SSanjay Patel; R32-NEXT: addi sp, sp, -16 224382de908SSanjay Patel; R32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 225*eabaee0cSFangrui Song; R32-NEXT: call fmin 226382de908SSanjay Patel; R32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 227382de908SSanjay Patel; R32-NEXT: addi sp, sp, 16 228382de908SSanjay Patel; R32-NEXT: ret 229382de908SSanjay Patel; 230382de908SSanjay Patel; R64-LABEL: minnum_f64: 231382de908SSanjay Patel; R64: # %bb.0: 232382de908SSanjay Patel; R64-NEXT: addi sp, sp, -16 233382de908SSanjay Patel; R64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 234*eabaee0cSFangrui Song; R64-NEXT: call fmin 235382de908SSanjay Patel; R64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 236382de908SSanjay Patel; R64-NEXT: addi sp, sp, 16 237382de908SSanjay Patel; R64-NEXT: ret 238382de908SSanjay Patel %r = call double @llvm.minnum.f64(double %x, double %y) 239382de908SSanjay Patel ret double %r 240382de908SSanjay Patel} 241382de908SSanjay Patel 242382de908SSanjay Pateldefine double @minnum_f64_fast(double %x, double %y) nounwind { 243382de908SSanjay Patel; R32-LABEL: minnum_f64_fast: 244382de908SSanjay Patel; R32: # %bb.0: 245436b875eSSanjay Patel; R32-NEXT: addi sp, sp, -32 246436b875eSSanjay Patel; R32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill 247436b875eSSanjay Patel; R32-NEXT: sw s0, 24(sp) # 4-byte Folded Spill 248436b875eSSanjay Patel; R32-NEXT: sw s1, 20(sp) # 4-byte Folded Spill 249436b875eSSanjay Patel; R32-NEXT: sw s2, 16(sp) # 4-byte Folded Spill 250436b875eSSanjay Patel; R32-NEXT: sw s3, 12(sp) # 4-byte Folded Spill 2517b3bbd83SJay Foad; R32-NEXT: mv s1, a3 252436b875eSSanjay Patel; R32-NEXT: mv s2, a2 2537b3bbd83SJay Foad; R32-NEXT: mv s0, a1 254cbdccb30SGuozhi Wei; R32-NEXT: mv s3, a0 255*eabaee0cSFangrui Song; R32-NEXT: call __ltdf2 256cbdccb30SGuozhi Wei; R32-NEXT: mv a1, a0 257cbdccb30SGuozhi Wei; R32-NEXT: mv a0, s3 258cbdccb30SGuozhi Wei; R32-NEXT: bltz a1, .LBB7_2 259436b875eSSanjay Patel; R32-NEXT: # %bb.1: 260436b875eSSanjay Patel; R32-NEXT: mv s3, s2 261436b875eSSanjay Patel; R32-NEXT: .LBB7_2: 2627b3bbd83SJay Foad; R32-NEXT: mv a1, s0 263436b875eSSanjay Patel; R32-NEXT: mv a2, s2 2647b3bbd83SJay Foad; R32-NEXT: mv a3, s1 265*eabaee0cSFangrui Song; R32-NEXT: call __ltdf2 266436b875eSSanjay Patel; R32-NEXT: bltz a0, .LBB7_4 267436b875eSSanjay Patel; R32-NEXT: # %bb.3: 2687b3bbd83SJay Foad; R32-NEXT: mv s0, s1 269436b875eSSanjay Patel; R32-NEXT: .LBB7_4: 270436b875eSSanjay Patel; R32-NEXT: mv a0, s3 2717b3bbd83SJay Foad; R32-NEXT: mv a1, s0 272436b875eSSanjay Patel; R32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload 273436b875eSSanjay Patel; R32-NEXT: lw s0, 24(sp) # 4-byte Folded Reload 274436b875eSSanjay Patel; R32-NEXT: lw s1, 20(sp) # 4-byte Folded Reload 275436b875eSSanjay Patel; R32-NEXT: lw s2, 16(sp) # 4-byte Folded Reload 276436b875eSSanjay Patel; R32-NEXT: lw s3, 12(sp) # 4-byte Folded Reload 277436b875eSSanjay Patel; R32-NEXT: addi sp, sp, 32 278382de908SSanjay Patel; R32-NEXT: ret 279382de908SSanjay Patel; 280382de908SSanjay Patel; R64-LABEL: minnum_f64_fast: 281382de908SSanjay Patel; R64: # %bb.0: 282436b875eSSanjay Patel; R64-NEXT: addi sp, sp, -32 283436b875eSSanjay Patel; R64-NEXT: sd ra, 24(sp) # 8-byte Folded Spill 284436b875eSSanjay Patel; R64-NEXT: sd s0, 16(sp) # 8-byte Folded Spill 285436b875eSSanjay Patel; R64-NEXT: sd s1, 8(sp) # 8-byte Folded Spill 2867b3bbd83SJay Foad; R64-NEXT: mv s1, a1 2877b3bbd83SJay Foad; R64-NEXT: mv s0, a0 288*eabaee0cSFangrui Song; R64-NEXT: call __ltdf2 289436b875eSSanjay Patel; R64-NEXT: bltz a0, .LBB7_2 290436b875eSSanjay Patel; R64-NEXT: # %bb.1: 2917b3bbd83SJay Foad; R64-NEXT: mv s0, s1 292436b875eSSanjay Patel; R64-NEXT: .LBB7_2: 2937b3bbd83SJay Foad; R64-NEXT: mv a0, s0 294436b875eSSanjay Patel; R64-NEXT: ld ra, 24(sp) # 8-byte Folded Reload 295436b875eSSanjay Patel; R64-NEXT: ld s0, 16(sp) # 8-byte Folded Reload 296436b875eSSanjay Patel; R64-NEXT: ld s1, 8(sp) # 8-byte Folded Reload 297436b875eSSanjay Patel; R64-NEXT: addi sp, sp, 32 298382de908SSanjay Patel; R64-NEXT: ret 299382de908SSanjay Patel %r = call fast double @llvm.minnum.f64(double %x, double %y) 300382de908SSanjay Patel ret double %r 301382de908SSanjay Patel} 302382de908SSanjay Patel 303382de908SSanjay Pateldeclare float @llvm.maxnum.f32(float, float) 304382de908SSanjay Pateldeclare double @llvm.maxnum.f64(double, double) 305382de908SSanjay Pateldeclare float @llvm.minnum.f32(float, float) 306382de908SSanjay Pateldeclare double @llvm.minnum.f64(double, double) 307