1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=riscv32 -global-isel -mattr=+d < %s \ 3; RUN: -target-abi=ilp32d | FileCheck -check-prefix=CHECKIFD %s 4; RUN: llc -mtriple=riscv64 -global-isel -mattr=+d < %s \ 5; RUN: -target-abi=lp64d | FileCheck -check-prefix=CHECKIFD %s 6; RUN: llc -mtriple=riscv32 -global-isel < %s \ 7; RUN: | FileCheck -check-prefix=RV32I %s 8; RUN: llc -mtriple=riscv64 -global-isel < %s \ 9; RUN: | FileCheck -check-prefix=RV64I %s 10 11define i32 @fcmp_false(double %a, double %b) nounwind { 12; CHECKIFD-LABEL: fcmp_false: 13; CHECKIFD: # %bb.0: 14; CHECKIFD-NEXT: li a0, 0 15; CHECKIFD-NEXT: ret 16; 17; RV32I-LABEL: fcmp_false: 18; RV32I: # %bb.0: 19; RV32I-NEXT: li a0, 0 20; RV32I-NEXT: ret 21; 22; RV64I-LABEL: fcmp_false: 23; RV64I: # %bb.0: 24; RV64I-NEXT: li a0, 0 25; RV64I-NEXT: ret 26 %1 = fcmp false double %a, %b 27 %2 = zext i1 %1 to i32 28 ret i32 %2 29} 30 31; FIXME: slli+srli on RV64 are unnecessary 32define i32 @fcmp_oeq(double %a, double %b) nounwind { 33; CHECKIFD-LABEL: fcmp_oeq: 34; CHECKIFD: # %bb.0: 35; CHECKIFD-NEXT: feq.d a0, fa0, fa1 36; CHECKIFD-NEXT: ret 37; 38; RV32I-LABEL: fcmp_oeq: 39; RV32I: # %bb.0: 40; RV32I-NEXT: addi sp, sp, -16 41; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 42; RV32I-NEXT: call __eqdf2 43; RV32I-NEXT: seqz a0, a0 44; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 45; RV32I-NEXT: addi sp, sp, 16 46; RV32I-NEXT: ret 47; 48; RV64I-LABEL: fcmp_oeq: 49; RV64I: # %bb.0: 50; RV64I-NEXT: addi sp, sp, -16 51; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 52; RV64I-NEXT: call __eqdf2 53; RV64I-NEXT: sext.w a0, a0 54; RV64I-NEXT: seqz a0, a0 55; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 56; RV64I-NEXT: addi sp, sp, 16 57; RV64I-NEXT: ret 58 %1 = fcmp oeq double %a, %b 59 %2 = zext i1 %1 to i32 60 ret i32 %2 61} 62 63; FIXME: sext.w on RV64 is unnecessary 64define i32 @fcmp_ogt(double %a, double %b) nounwind { 65; CHECKIFD-LABEL: fcmp_ogt: 66; CHECKIFD: # %bb.0: 67; CHECKIFD-NEXT: flt.d a0, fa1, fa0 68; CHECKIFD-NEXT: ret 69; 70; RV32I-LABEL: fcmp_ogt: 71; RV32I: # %bb.0: 72; RV32I-NEXT: addi sp, sp, -16 73; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 74; RV32I-NEXT: call __gtdf2 75; RV32I-NEXT: sgtz a0, a0 76; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 77; RV32I-NEXT: addi sp, sp, 16 78; RV32I-NEXT: ret 79; 80; RV64I-LABEL: fcmp_ogt: 81; RV64I: # %bb.0: 82; RV64I-NEXT: addi sp, sp, -16 83; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 84; RV64I-NEXT: call __gtdf2 85; RV64I-NEXT: sext.w a0, a0 86; RV64I-NEXT: sgtz a0, a0 87; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 88; RV64I-NEXT: addi sp, sp, 16 89; RV64I-NEXT: ret 90 %1 = fcmp ogt double %a, %b 91 %2 = zext i1 %1 to i32 92 ret i32 %2 93} 94 95; FIXME: sext.w on RV64 is unnecessary 96define i32 @fcmp_oge(double %a, double %b) nounwind { 97; CHECKIFD-LABEL: fcmp_oge: 98; CHECKIFD: # %bb.0: 99; CHECKIFD-NEXT: fle.d a0, fa1, fa0 100; CHECKIFD-NEXT: ret 101; 102; RV32I-LABEL: fcmp_oge: 103; RV32I: # %bb.0: 104; RV32I-NEXT: addi sp, sp, -16 105; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 106; RV32I-NEXT: call __gedf2 107; RV32I-NEXT: slti a0, a0, 0 108; RV32I-NEXT: xori a0, a0, 1 109; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 110; RV32I-NEXT: addi sp, sp, 16 111; RV32I-NEXT: ret 112; 113; RV64I-LABEL: fcmp_oge: 114; RV64I: # %bb.0: 115; RV64I-NEXT: addi sp, sp, -16 116; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 117; RV64I-NEXT: call __gedf2 118; RV64I-NEXT: sext.w a0, a0 119; RV64I-NEXT: slti a0, a0, 0 120; RV64I-NEXT: xori a0, a0, 1 121; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 122; RV64I-NEXT: addi sp, sp, 16 123; RV64I-NEXT: ret 124 %1 = fcmp oge double %a, %b 125 %2 = zext i1 %1 to i32 126 ret i32 %2 127} 128 129; FIXME: sext.w on RV64 is unnecessary 130define i32 @fcmp_olt(double %a, double %b) nounwind { 131; CHECKIFD-LABEL: fcmp_olt: 132; CHECKIFD: # %bb.0: 133; CHECKIFD-NEXT: flt.d a0, fa0, fa1 134; CHECKIFD-NEXT: ret 135; 136; RV32I-LABEL: fcmp_olt: 137; RV32I: # %bb.0: 138; RV32I-NEXT: addi sp, sp, -16 139; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 140; RV32I-NEXT: call __ltdf2 141; RV32I-NEXT: slti a0, a0, 0 142; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 143; RV32I-NEXT: addi sp, sp, 16 144; RV32I-NEXT: ret 145; 146; RV64I-LABEL: fcmp_olt: 147; RV64I: # %bb.0: 148; RV64I-NEXT: addi sp, sp, -16 149; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 150; RV64I-NEXT: call __ltdf2 151; RV64I-NEXT: sext.w a0, a0 152; RV64I-NEXT: slti a0, a0, 0 153; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 154; RV64I-NEXT: addi sp, sp, 16 155; RV64I-NEXT: ret 156 %1 = fcmp olt double %a, %b 157 %2 = zext i1 %1 to i32 158 ret i32 %2 159} 160 161; FIXME: sext.w on RV64 is unnecessary 162; FIXME: sgtz+xori can be slti a0, a0, 1 163define i32 @fcmp_ole(double %a, double %b) nounwind { 164; CHECKIFD-LABEL: fcmp_ole: 165; CHECKIFD: # %bb.0: 166; CHECKIFD-NEXT: fle.d a0, fa0, fa1 167; CHECKIFD-NEXT: ret 168; 169; RV32I-LABEL: fcmp_ole: 170; RV32I: # %bb.0: 171; RV32I-NEXT: addi sp, sp, -16 172; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 173; RV32I-NEXT: call __ledf2 174; RV32I-NEXT: sgtz a0, a0 175; RV32I-NEXT: xori a0, a0, 1 176; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 177; RV32I-NEXT: addi sp, sp, 16 178; RV32I-NEXT: ret 179; 180; RV64I-LABEL: fcmp_ole: 181; RV64I: # %bb.0: 182; RV64I-NEXT: addi sp, sp, -16 183; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 184; RV64I-NEXT: call __ledf2 185; RV64I-NEXT: sext.w a0, a0 186; RV64I-NEXT: sgtz a0, a0 187; RV64I-NEXT: xori a0, a0, 1 188; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 189; RV64I-NEXT: addi sp, sp, 16 190; RV64I-NEXT: ret 191 %1 = fcmp ole double %a, %b 192 %2 = zext i1 %1 to i32 193 ret i32 %2 194} 195 196; FIXME: slli+srli on RV64 are unnecessary 197define i32 @fcmp_one(double %a, double %b) nounwind { 198; CHECKIFD-LABEL: fcmp_one: 199; CHECKIFD: # %bb.0: 200; CHECKIFD-NEXT: flt.d a0, fa0, fa1 201; CHECKIFD-NEXT: flt.d a1, fa1, fa0 202; CHECKIFD-NEXT: or a0, a0, a1 203; CHECKIFD-NEXT: ret 204; 205; RV32I-LABEL: fcmp_one: 206; RV32I: # %bb.0: 207; RV32I-NEXT: addi sp, sp, -32 208; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill 209; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill 210; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill 211; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill 212; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill 213; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill 214; RV32I-NEXT: mv s0, a0 215; RV32I-NEXT: mv s1, a1 216; RV32I-NEXT: mv s2, a2 217; RV32I-NEXT: mv s3, a3 218; RV32I-NEXT: call __eqdf2 219; RV32I-NEXT: snez s4, a0 220; RV32I-NEXT: mv a0, s0 221; RV32I-NEXT: mv a1, s1 222; RV32I-NEXT: mv a2, s2 223; RV32I-NEXT: mv a3, s3 224; RV32I-NEXT: call __unorddf2 225; RV32I-NEXT: seqz a0, a0 226; RV32I-NEXT: and a0, s4, a0 227; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload 228; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload 229; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload 230; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload 231; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload 232; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload 233; RV32I-NEXT: addi sp, sp, 32 234; RV32I-NEXT: ret 235; 236; RV64I-LABEL: fcmp_one: 237; RV64I: # %bb.0: 238; RV64I-NEXT: addi sp, sp, -32 239; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill 240; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill 241; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill 242; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill 243; RV64I-NEXT: mv s0, a0 244; RV64I-NEXT: mv s1, a1 245; RV64I-NEXT: call __eqdf2 246; RV64I-NEXT: sext.w a0, a0 247; RV64I-NEXT: snez s2, a0 248; RV64I-NEXT: mv a0, s0 249; RV64I-NEXT: mv a1, s1 250; RV64I-NEXT: call __unorddf2 251; RV64I-NEXT: sext.w a0, a0 252; RV64I-NEXT: seqz a0, a0 253; RV64I-NEXT: and a0, s2, a0 254; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload 255; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload 256; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload 257; RV64I-NEXT: ld s2, 0(sp) # 8-byte Folded Reload 258; RV64I-NEXT: addi sp, sp, 32 259; RV64I-NEXT: ret 260 %1 = fcmp one double %a, %b 261 %2 = zext i1 %1 to i32 262 ret i32 %2 263} 264 265; FIXME: slli+srli on RV64 are unnecessary 266define i32 @fcmp_ord(double %a, double %b) nounwind { 267; CHECKIFD-LABEL: fcmp_ord: 268; CHECKIFD: # %bb.0: 269; CHECKIFD-NEXT: feq.d a0, fa0, fa0 270; CHECKIFD-NEXT: feq.d a1, fa1, fa1 271; CHECKIFD-NEXT: and a0, a0, a1 272; CHECKIFD-NEXT: ret 273; 274; RV32I-LABEL: fcmp_ord: 275; RV32I: # %bb.0: 276; RV32I-NEXT: addi sp, sp, -16 277; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 278; RV32I-NEXT: call __unorddf2 279; RV32I-NEXT: seqz a0, a0 280; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 281; RV32I-NEXT: addi sp, sp, 16 282; RV32I-NEXT: ret 283; 284; RV64I-LABEL: fcmp_ord: 285; RV64I: # %bb.0: 286; RV64I-NEXT: addi sp, sp, -16 287; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 288; RV64I-NEXT: call __unorddf2 289; RV64I-NEXT: sext.w a0, a0 290; RV64I-NEXT: seqz a0, a0 291; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 292; RV64I-NEXT: addi sp, sp, 16 293; RV64I-NEXT: ret 294 %1 = fcmp ord double %a, %b 295 %2 = zext i1 %1 to i32 296 ret i32 %2 297} 298 299; FIXME: slli+srli on RV64 are unnecessary 300define i32 @fcmp_ueq(double %a, double %b) nounwind { 301; CHECKIFD-LABEL: fcmp_ueq: 302; CHECKIFD: # %bb.0: 303; CHECKIFD-NEXT: flt.d a0, fa0, fa1 304; CHECKIFD-NEXT: flt.d a1, fa1, fa0 305; CHECKIFD-NEXT: or a0, a0, a1 306; CHECKIFD-NEXT: xori a0, a0, 1 307; CHECKIFD-NEXT: ret 308; 309; RV32I-LABEL: fcmp_ueq: 310; RV32I: # %bb.0: 311; RV32I-NEXT: addi sp, sp, -32 312; RV32I-NEXT: sw ra, 28(sp) # 4-byte Folded Spill 313; RV32I-NEXT: sw s0, 24(sp) # 4-byte Folded Spill 314; RV32I-NEXT: sw s1, 20(sp) # 4-byte Folded Spill 315; RV32I-NEXT: sw s2, 16(sp) # 4-byte Folded Spill 316; RV32I-NEXT: sw s3, 12(sp) # 4-byte Folded Spill 317; RV32I-NEXT: sw s4, 8(sp) # 4-byte Folded Spill 318; RV32I-NEXT: mv s0, a0 319; RV32I-NEXT: mv s1, a1 320; RV32I-NEXT: mv s2, a2 321; RV32I-NEXT: mv s3, a3 322; RV32I-NEXT: call __eqdf2 323; RV32I-NEXT: seqz s4, a0 324; RV32I-NEXT: mv a0, s0 325; RV32I-NEXT: mv a1, s1 326; RV32I-NEXT: mv a2, s2 327; RV32I-NEXT: mv a3, s3 328; RV32I-NEXT: call __unorddf2 329; RV32I-NEXT: snez a0, a0 330; RV32I-NEXT: or a0, s4, a0 331; RV32I-NEXT: lw ra, 28(sp) # 4-byte Folded Reload 332; RV32I-NEXT: lw s0, 24(sp) # 4-byte Folded Reload 333; RV32I-NEXT: lw s1, 20(sp) # 4-byte Folded Reload 334; RV32I-NEXT: lw s2, 16(sp) # 4-byte Folded Reload 335; RV32I-NEXT: lw s3, 12(sp) # 4-byte Folded Reload 336; RV32I-NEXT: lw s4, 8(sp) # 4-byte Folded Reload 337; RV32I-NEXT: addi sp, sp, 32 338; RV32I-NEXT: ret 339; 340; RV64I-LABEL: fcmp_ueq: 341; RV64I: # %bb.0: 342; RV64I-NEXT: addi sp, sp, -32 343; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill 344; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill 345; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill 346; RV64I-NEXT: sd s2, 0(sp) # 8-byte Folded Spill 347; RV64I-NEXT: mv s0, a0 348; RV64I-NEXT: mv s1, a1 349; RV64I-NEXT: call __eqdf2 350; RV64I-NEXT: sext.w a0, a0 351; RV64I-NEXT: seqz s2, a0 352; RV64I-NEXT: mv a0, s0 353; RV64I-NEXT: mv a1, s1 354; RV64I-NEXT: call __unorddf2 355; RV64I-NEXT: sext.w a0, a0 356; RV64I-NEXT: snez a0, a0 357; RV64I-NEXT: or a0, s2, a0 358; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload 359; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload 360; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload 361; RV64I-NEXT: ld s2, 0(sp) # 8-byte Folded Reload 362; RV64I-NEXT: addi sp, sp, 32 363; RV64I-NEXT: ret 364 %1 = fcmp ueq double %a, %b 365 %2 = zext i1 %1 to i32 366 ret i32 %2 367} 368 369; FIXME: sext.w on RV64 is unnecessary 370define i32 @fcmp_ugt(double %a, double %b) nounwind { 371; CHECKIFD-LABEL: fcmp_ugt: 372; CHECKIFD: # %bb.0: 373; CHECKIFD-NEXT: fle.d a0, fa0, fa1 374; CHECKIFD-NEXT: xori a0, a0, 1 375; CHECKIFD-NEXT: ret 376; 377; RV32I-LABEL: fcmp_ugt: 378; RV32I: # %bb.0: 379; RV32I-NEXT: addi sp, sp, -16 380; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 381; RV32I-NEXT: call __ledf2 382; RV32I-NEXT: sgtz a0, a0 383; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 384; RV32I-NEXT: addi sp, sp, 16 385; RV32I-NEXT: ret 386; 387; RV64I-LABEL: fcmp_ugt: 388; RV64I: # %bb.0: 389; RV64I-NEXT: addi sp, sp, -16 390; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 391; RV64I-NEXT: call __ledf2 392; RV64I-NEXT: sext.w a0, a0 393; RV64I-NEXT: sgtz a0, a0 394; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 395; RV64I-NEXT: addi sp, sp, 16 396; RV64I-NEXT: ret 397 %1 = fcmp ugt double %a, %b 398 %2 = zext i1 %1 to i32 399 ret i32 %2 400} 401 402define i32 @fcmp_uge(double %a, double %b) nounwind { 403; CHECKIFD-LABEL: fcmp_uge: 404; CHECKIFD: # %bb.0: 405; CHECKIFD-NEXT: flt.d a0, fa0, fa1 406; CHECKIFD-NEXT: xori a0, a0, 1 407; CHECKIFD-NEXT: ret 408; 409; RV32I-LABEL: fcmp_uge: 410; RV32I: # %bb.0: 411; RV32I-NEXT: addi sp, sp, -16 412; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 413; RV32I-NEXT: call __ltdf2 414; RV32I-NEXT: slti a0, a0, 0 415; RV32I-NEXT: xori a0, a0, 1 416; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 417; RV32I-NEXT: addi sp, sp, 16 418; RV32I-NEXT: ret 419; 420; RV64I-LABEL: fcmp_uge: 421; RV64I: # %bb.0: 422; RV64I-NEXT: addi sp, sp, -16 423; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 424; RV64I-NEXT: call __ltdf2 425; RV64I-NEXT: sext.w a0, a0 426; RV64I-NEXT: slti a0, a0, 0 427; RV64I-NEXT: xori a0, a0, 1 428; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 429; RV64I-NEXT: addi sp, sp, 16 430; RV64I-NEXT: ret 431 %1 = fcmp uge double %a, %b 432 %2 = zext i1 %1 to i32 433 ret i32 %2 434} 435 436; FIXME: sext.w on RV64 is unnecessary 437define i32 @fcmp_ult(double %a, double %b) nounwind { 438; CHECKIFD-LABEL: fcmp_ult: 439; CHECKIFD: # %bb.0: 440; CHECKIFD-NEXT: fle.d a0, fa1, fa0 441; CHECKIFD-NEXT: xori a0, a0, 1 442; CHECKIFD-NEXT: ret 443; 444; RV32I-LABEL: fcmp_ult: 445; RV32I: # %bb.0: 446; RV32I-NEXT: addi sp, sp, -16 447; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 448; RV32I-NEXT: call __gedf2 449; RV32I-NEXT: slti a0, a0, 0 450; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 451; RV32I-NEXT: addi sp, sp, 16 452; RV32I-NEXT: ret 453; 454; RV64I-LABEL: fcmp_ult: 455; RV64I: # %bb.0: 456; RV64I-NEXT: addi sp, sp, -16 457; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 458; RV64I-NEXT: call __gedf2 459; RV64I-NEXT: sext.w a0, a0 460; RV64I-NEXT: slti a0, a0, 0 461; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 462; RV64I-NEXT: addi sp, sp, 16 463; RV64I-NEXT: ret 464 %1 = fcmp ult double %a, %b 465 %2 = zext i1 %1 to i32 466 ret i32 %2 467} 468 469; FIXME: sext.w on RV64 is unnecessary 470; FIXME: sgtz+xori can be slti a0, a0, 1 471define i32 @fcmp_ule(double %a, double %b) nounwind { 472; CHECKIFD-LABEL: fcmp_ule: 473; CHECKIFD: # %bb.0: 474; CHECKIFD-NEXT: flt.d a0, fa1, fa0 475; CHECKIFD-NEXT: xori a0, a0, 1 476; CHECKIFD-NEXT: ret 477; 478; RV32I-LABEL: fcmp_ule: 479; RV32I: # %bb.0: 480; RV32I-NEXT: addi sp, sp, -16 481; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 482; RV32I-NEXT: call __gtdf2 483; RV32I-NEXT: sgtz a0, a0 484; RV32I-NEXT: xori a0, a0, 1 485; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 486; RV32I-NEXT: addi sp, sp, 16 487; RV32I-NEXT: ret 488; 489; RV64I-LABEL: fcmp_ule: 490; RV64I: # %bb.0: 491; RV64I-NEXT: addi sp, sp, -16 492; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 493; RV64I-NEXT: call __gtdf2 494; RV64I-NEXT: sext.w a0, a0 495; RV64I-NEXT: sgtz a0, a0 496; RV64I-NEXT: xori a0, a0, 1 497; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 498; RV64I-NEXT: addi sp, sp, 16 499; RV64I-NEXT: ret 500 %1 = fcmp ule double %a, %b 501 %2 = zext i1 %1 to i32 502 ret i32 %2 503} 504 505; FIXME: slli+srli on RV64 are unnecessary 506define i32 @fcmp_une(double %a, double %b) nounwind { 507; CHECKIFD-LABEL: fcmp_une: 508; CHECKIFD: # %bb.0: 509; CHECKIFD-NEXT: feq.d a0, fa0, fa1 510; CHECKIFD-NEXT: xori a0, a0, 1 511; CHECKIFD-NEXT: ret 512; 513; RV32I-LABEL: fcmp_une: 514; RV32I: # %bb.0: 515; RV32I-NEXT: addi sp, sp, -16 516; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 517; RV32I-NEXT: call __nedf2 518; RV32I-NEXT: snez a0, a0 519; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 520; RV32I-NEXT: addi sp, sp, 16 521; RV32I-NEXT: ret 522; 523; RV64I-LABEL: fcmp_une: 524; RV64I: # %bb.0: 525; RV64I-NEXT: addi sp, sp, -16 526; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 527; RV64I-NEXT: call __nedf2 528; RV64I-NEXT: sext.w a0, a0 529; RV64I-NEXT: snez a0, a0 530; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 531; RV64I-NEXT: addi sp, sp, 16 532; RV64I-NEXT: ret 533 %1 = fcmp une double %a, %b 534 %2 = zext i1 %1 to i32 535 ret i32 %2 536} 537 538; FIXME: slli+srli on RV64 are unnecessary 539define i32 @fcmp_uno(double %a, double %b) nounwind { 540; CHECKIFD-LABEL: fcmp_uno: 541; CHECKIFD: # %bb.0: 542; CHECKIFD-NEXT: feq.d a0, fa0, fa0 543; CHECKIFD-NEXT: feq.d a1, fa1, fa1 544; CHECKIFD-NEXT: and a0, a0, a1 545; CHECKIFD-NEXT: xori a0, a0, 1 546; CHECKIFD-NEXT: ret 547; 548; RV32I-LABEL: fcmp_uno: 549; RV32I: # %bb.0: 550; RV32I-NEXT: addi sp, sp, -16 551; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 552; RV32I-NEXT: call __unorddf2 553; RV32I-NEXT: snez a0, a0 554; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 555; RV32I-NEXT: addi sp, sp, 16 556; RV32I-NEXT: ret 557; 558; RV64I-LABEL: fcmp_uno: 559; RV64I: # %bb.0: 560; RV64I-NEXT: addi sp, sp, -16 561; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 562; RV64I-NEXT: call __unorddf2 563; RV64I-NEXT: sext.w a0, a0 564; RV64I-NEXT: snez a0, a0 565; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 566; RV64I-NEXT: addi sp, sp, 16 567; RV64I-NEXT: ret 568 %1 = fcmp uno double %a, %b 569 %2 = zext i1 %1 to i32 570 ret i32 %2 571} 572 573define i32 @fcmp_true(double %a, double %b) nounwind { 574; CHECKIFD-LABEL: fcmp_true: 575; CHECKIFD: # %bb.0: 576; CHECKIFD-NEXT: li a0, 1 577; CHECKIFD-NEXT: ret 578; 579; RV32I-LABEL: fcmp_true: 580; RV32I: # %bb.0: 581; RV32I-NEXT: li a0, 1 582; RV32I-NEXT: ret 583; 584; RV64I-LABEL: fcmp_true: 585; RV64I: # %bb.0: 586; RV64I-NEXT: li a0, 1 587; RV64I-NEXT: ret 588 %1 = fcmp true double %a, %b 589 %2 = zext i1 %1 to i32 590 ret i32 %2 591} 592