1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc -mtriple=riscv32 -mattr=+f -verify-machineinstrs < %s \ 3; RUN: -target-abi=ilp32f | FileCheck -check-prefixes=RV32,RV32IF %s 4; RUN: llc -mtriple=riscv64 -mattr=+f -verify-machineinstrs < %s \ 5; RUN: -target-abi=lp64f | FileCheck -check-prefixes=RV64,RV64IF %s 6; RUN: llc -mtriple=riscv32 -mattr=+f,+d -verify-machineinstrs < %s \ 7; RUN: -target-abi=ilp32d | FileCheck -check-prefixes=RV32,RV32IFD %s 8; RUN: llc -mtriple=riscv64 -mattr=+f,+d -verify-machineinstrs < %s \ 9; RUN: -target-abi=lp64d | FileCheck -check-prefixes=RV64,RV64IFD %s 10 11; i32 saturate 12 13define i32 @stest_f64i32(double %x) { 14; RV32IF-LABEL: stest_f64i32: 15; RV32IF: # %bb.0: # %entry 16; RV32IF-NEXT: addi sp, sp, -16 17; RV32IF-NEXT: .cfi_def_cfa_offset 16 18; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 19; RV32IF-NEXT: .cfi_offset ra, -4 20; RV32IF-NEXT: call __fixdfdi 21; RV32IF-NEXT: lui a2, 524288 22; RV32IF-NEXT: addi a3, a2, -1 23; RV32IF-NEXT: beqz a1, .LBB0_2 24; RV32IF-NEXT: # %bb.1: # %entry 25; RV32IF-NEXT: slti a4, a1, 0 26; RV32IF-NEXT: j .LBB0_3 27; RV32IF-NEXT: .LBB0_2: 28; RV32IF-NEXT: sltu a4, a0, a3 29; RV32IF-NEXT: .LBB0_3: # %entry 30; RV32IF-NEXT: neg a5, a4 31; RV32IF-NEXT: and a1, a5, a1 32; RV32IF-NEXT: bnez a4, .LBB0_5 33; RV32IF-NEXT: # %bb.4: # %entry 34; RV32IF-NEXT: mv a0, a3 35; RV32IF-NEXT: .LBB0_5: # %entry 36; RV32IF-NEXT: li a3, -1 37; RV32IF-NEXT: beq a1, a3, .LBB0_7 38; RV32IF-NEXT: # %bb.6: # %entry 39; RV32IF-NEXT: slti a1, a1, 0 40; RV32IF-NEXT: xori a1, a1, 1 41; RV32IF-NEXT: beqz a1, .LBB0_8 42; RV32IF-NEXT: j .LBB0_9 43; RV32IF-NEXT: .LBB0_7: 44; RV32IF-NEXT: sltu a1, a2, a0 45; RV32IF-NEXT: bnez a1, .LBB0_9 46; RV32IF-NEXT: .LBB0_8: # %entry 47; RV32IF-NEXT: lui a0, 524288 48; RV32IF-NEXT: .LBB0_9: # %entry 49; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 50; RV32IF-NEXT: .cfi_restore ra 51; RV32IF-NEXT: addi sp, sp, 16 52; RV32IF-NEXT: .cfi_def_cfa_offset 0 53; RV32IF-NEXT: ret 54; 55; RV64IF-LABEL: stest_f64i32: 56; RV64IF: # %bb.0: # %entry 57; RV64IF-NEXT: addi sp, sp, -16 58; RV64IF-NEXT: .cfi_def_cfa_offset 16 59; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 60; RV64IF-NEXT: .cfi_offset ra, -8 61; RV64IF-NEXT: call __fixdfdi 62; RV64IF-NEXT: lui a1, 524288 63; RV64IF-NEXT: addiw a2, a1, -1 64; RV64IF-NEXT: blt a0, a2, .LBB0_2 65; RV64IF-NEXT: # %bb.1: # %entry 66; RV64IF-NEXT: mv a0, a2 67; RV64IF-NEXT: .LBB0_2: # %entry 68; RV64IF-NEXT: blt a1, a0, .LBB0_4 69; RV64IF-NEXT: # %bb.3: # %entry 70; RV64IF-NEXT: lui a0, 524288 71; RV64IF-NEXT: .LBB0_4: # %entry 72; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 73; RV64IF-NEXT: .cfi_restore ra 74; RV64IF-NEXT: addi sp, sp, 16 75; RV64IF-NEXT: .cfi_def_cfa_offset 0 76; RV64IF-NEXT: ret 77; 78; RV32IFD-LABEL: stest_f64i32: 79; RV32IFD: # %bb.0: # %entry 80; RV32IFD-NEXT: fcvt.w.d a0, fa0, rtz 81; RV32IFD-NEXT: feq.d a1, fa0, fa0 82; RV32IFD-NEXT: seqz a1, a1 83; RV32IFD-NEXT: addi a1, a1, -1 84; RV32IFD-NEXT: and a0, a1, a0 85; RV32IFD-NEXT: ret 86; 87; RV64IFD-LABEL: stest_f64i32: 88; RV64IFD: # %bb.0: # %entry 89; RV64IFD-NEXT: fcvt.l.d a0, fa0, rtz 90; RV64IFD-NEXT: lui a1, 524288 91; RV64IFD-NEXT: addiw a2, a1, -1 92; RV64IFD-NEXT: bge a0, a2, .LBB0_3 93; RV64IFD-NEXT: # %bb.1: # %entry 94; RV64IFD-NEXT: bge a1, a0, .LBB0_4 95; RV64IFD-NEXT: .LBB0_2: # %entry 96; RV64IFD-NEXT: ret 97; RV64IFD-NEXT: .LBB0_3: # %entry 98; RV64IFD-NEXT: mv a0, a2 99; RV64IFD-NEXT: blt a1, a2, .LBB0_2 100; RV64IFD-NEXT: .LBB0_4: # %entry 101; RV64IFD-NEXT: lui a0, 524288 102; RV64IFD-NEXT: ret 103entry: 104 %conv = fptosi double %x to i64 105 %0 = icmp slt i64 %conv, 2147483647 106 %spec.store.select = select i1 %0, i64 %conv, i64 2147483647 107 %1 = icmp sgt i64 %spec.store.select, -2147483648 108 %spec.store.select7 = select i1 %1, i64 %spec.store.select, i64 -2147483648 109 %conv6 = trunc i64 %spec.store.select7 to i32 110 ret i32 %conv6 111} 112 113define i32 @utest_f64i32(double %x) { 114; RV32IF-LABEL: utest_f64i32: 115; RV32IF: # %bb.0: # %entry 116; RV32IF-NEXT: addi sp, sp, -16 117; RV32IF-NEXT: .cfi_def_cfa_offset 16 118; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 119; RV32IF-NEXT: .cfi_offset ra, -4 120; RV32IF-NEXT: call __fixunsdfdi 121; RV32IF-NEXT: sltiu a2, a0, -1 122; RV32IF-NEXT: seqz a1, a1 123; RV32IF-NEXT: and a1, a1, a2 124; RV32IF-NEXT: addi a1, a1, -1 125; RV32IF-NEXT: or a0, a1, a0 126; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 127; RV32IF-NEXT: .cfi_restore ra 128; RV32IF-NEXT: addi sp, sp, 16 129; RV32IF-NEXT: .cfi_def_cfa_offset 0 130; RV32IF-NEXT: ret 131; 132; RV64IF-LABEL: utest_f64i32: 133; RV64IF: # %bb.0: # %entry 134; RV64IF-NEXT: addi sp, sp, -16 135; RV64IF-NEXT: .cfi_def_cfa_offset 16 136; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 137; RV64IF-NEXT: .cfi_offset ra, -8 138; RV64IF-NEXT: call __fixunsdfdi 139; RV64IF-NEXT: li a1, -1 140; RV64IF-NEXT: srli a1, a1, 32 141; RV64IF-NEXT: bltu a0, a1, .LBB1_2 142; RV64IF-NEXT: # %bb.1: # %entry 143; RV64IF-NEXT: mv a0, a1 144; RV64IF-NEXT: .LBB1_2: # %entry 145; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 146; RV64IF-NEXT: .cfi_restore ra 147; RV64IF-NEXT: addi sp, sp, 16 148; RV64IF-NEXT: .cfi_def_cfa_offset 0 149; RV64IF-NEXT: ret 150; 151; RV32IFD-LABEL: utest_f64i32: 152; RV32IFD: # %bb.0: # %entry 153; RV32IFD-NEXT: fcvt.wu.d a0, fa0, rtz 154; RV32IFD-NEXT: feq.d a1, fa0, fa0 155; RV32IFD-NEXT: seqz a1, a1 156; RV32IFD-NEXT: addi a1, a1, -1 157; RV32IFD-NEXT: and a0, a1, a0 158; RV32IFD-NEXT: ret 159; 160; RV64IFD-LABEL: utest_f64i32: 161; RV64IFD: # %bb.0: # %entry 162; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rtz 163; RV64IFD-NEXT: li a1, -1 164; RV64IFD-NEXT: srli a1, a1, 32 165; RV64IFD-NEXT: bltu a0, a1, .LBB1_2 166; RV64IFD-NEXT: # %bb.1: # %entry 167; RV64IFD-NEXT: mv a0, a1 168; RV64IFD-NEXT: .LBB1_2: # %entry 169; RV64IFD-NEXT: ret 170entry: 171 %conv = fptoui double %x to i64 172 %0 = icmp ult i64 %conv, 4294967295 173 %spec.store.select = select i1 %0, i64 %conv, i64 4294967295 174 %conv6 = trunc i64 %spec.store.select to i32 175 ret i32 %conv6 176} 177 178define i32 @ustest_f64i32(double %x) { 179; RV32IF-LABEL: ustest_f64i32: 180; RV32IF: # %bb.0: # %entry 181; RV32IF-NEXT: addi sp, sp, -16 182; RV32IF-NEXT: .cfi_def_cfa_offset 16 183; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 184; RV32IF-NEXT: .cfi_offset ra, -4 185; RV32IF-NEXT: call __fixdfdi 186; RV32IF-NEXT: beqz a1, .LBB2_2 187; RV32IF-NEXT: # %bb.1: # %entry 188; RV32IF-NEXT: slti a2, a1, 0 189; RV32IF-NEXT: j .LBB2_3 190; RV32IF-NEXT: .LBB2_2: 191; RV32IF-NEXT: sltiu a2, a0, -1 192; RV32IF-NEXT: .LBB2_3: # %entry 193; RV32IF-NEXT: addi a3, a2, -1 194; RV32IF-NEXT: neg a2, a2 195; RV32IF-NEXT: and a1, a2, a1 196; RV32IF-NEXT: or a0, a3, a0 197; RV32IF-NEXT: beqz a1, .LBB2_5 198; RV32IF-NEXT: # %bb.4: # %entry 199; RV32IF-NEXT: sgtz a1, a1 200; RV32IF-NEXT: j .LBB2_6 201; RV32IF-NEXT: .LBB2_5: 202; RV32IF-NEXT: snez a1, a0 203; RV32IF-NEXT: .LBB2_6: # %entry 204; RV32IF-NEXT: neg a1, a1 205; RV32IF-NEXT: and a0, a1, a0 206; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 207; RV32IF-NEXT: .cfi_restore ra 208; RV32IF-NEXT: addi sp, sp, 16 209; RV32IF-NEXT: .cfi_def_cfa_offset 0 210; RV32IF-NEXT: ret 211; 212; RV64IF-LABEL: ustest_f64i32: 213; RV64IF: # %bb.0: # %entry 214; RV64IF-NEXT: addi sp, sp, -16 215; RV64IF-NEXT: .cfi_def_cfa_offset 16 216; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 217; RV64IF-NEXT: .cfi_offset ra, -8 218; RV64IF-NEXT: call __fixdfdi 219; RV64IF-NEXT: li a1, -1 220; RV64IF-NEXT: srli a1, a1, 32 221; RV64IF-NEXT: blt a0, a1, .LBB2_2 222; RV64IF-NEXT: # %bb.1: # %entry 223; RV64IF-NEXT: mv a0, a1 224; RV64IF-NEXT: .LBB2_2: # %entry 225; RV64IF-NEXT: sgtz a1, a0 226; RV64IF-NEXT: neg a1, a1 227; RV64IF-NEXT: and a0, a1, a0 228; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 229; RV64IF-NEXT: .cfi_restore ra 230; RV64IF-NEXT: addi sp, sp, 16 231; RV64IF-NEXT: .cfi_def_cfa_offset 0 232; RV64IF-NEXT: ret 233; 234; RV32IFD-LABEL: ustest_f64i32: 235; RV32IFD: # %bb.0: # %entry 236; RV32IFD-NEXT: fcvt.wu.d a0, fa0, rtz 237; RV32IFD-NEXT: feq.d a1, fa0, fa0 238; RV32IFD-NEXT: seqz a1, a1 239; RV32IFD-NEXT: addi a1, a1, -1 240; RV32IFD-NEXT: and a0, a1, a0 241; RV32IFD-NEXT: ret 242; 243; RV64IFD-LABEL: ustest_f64i32: 244; RV64IFD: # %bb.0: # %entry 245; RV64IFD-NEXT: fcvt.l.d a0, fa0, rtz 246; RV64IFD-NEXT: li a1, -1 247; RV64IFD-NEXT: srli a1, a1, 32 248; RV64IFD-NEXT: blt a0, a1, .LBB2_2 249; RV64IFD-NEXT: # %bb.1: # %entry 250; RV64IFD-NEXT: mv a0, a1 251; RV64IFD-NEXT: .LBB2_2: # %entry 252; RV64IFD-NEXT: sgtz a1, a0 253; RV64IFD-NEXT: neg a1, a1 254; RV64IFD-NEXT: and a0, a1, a0 255; RV64IFD-NEXT: ret 256entry: 257 %conv = fptosi double %x to i64 258 %0 = icmp slt i64 %conv, 4294967295 259 %spec.store.select = select i1 %0, i64 %conv, i64 4294967295 260 %1 = icmp sgt i64 %spec.store.select, 0 261 %spec.store.select7 = select i1 %1, i64 %spec.store.select, i64 0 262 %conv6 = trunc i64 %spec.store.select7 to i32 263 ret i32 %conv6 264} 265 266define i32 @stest_f32i32(float %x) { 267; RV32-LABEL: stest_f32i32: 268; RV32: # %bb.0: # %entry 269; RV32-NEXT: fcvt.w.s a0, fa0, rtz 270; RV32-NEXT: feq.s a1, fa0, fa0 271; RV32-NEXT: seqz a1, a1 272; RV32-NEXT: addi a1, a1, -1 273; RV32-NEXT: and a0, a1, a0 274; RV32-NEXT: ret 275; 276; RV64-LABEL: stest_f32i32: 277; RV64: # %bb.0: # %entry 278; RV64-NEXT: fcvt.l.s a0, fa0, rtz 279; RV64-NEXT: lui a1, 524288 280; RV64-NEXT: addiw a2, a1, -1 281; RV64-NEXT: bge a0, a2, .LBB3_3 282; RV64-NEXT: # %bb.1: # %entry 283; RV64-NEXT: bge a1, a0, .LBB3_4 284; RV64-NEXT: .LBB3_2: # %entry 285; RV64-NEXT: ret 286; RV64-NEXT: .LBB3_3: # %entry 287; RV64-NEXT: mv a0, a2 288; RV64-NEXT: blt a1, a2, .LBB3_2 289; RV64-NEXT: .LBB3_4: # %entry 290; RV64-NEXT: lui a0, 524288 291; RV64-NEXT: ret 292entry: 293 %conv = fptosi float %x to i64 294 %0 = icmp slt i64 %conv, 2147483647 295 %spec.store.select = select i1 %0, i64 %conv, i64 2147483647 296 %1 = icmp sgt i64 %spec.store.select, -2147483648 297 %spec.store.select7 = select i1 %1, i64 %spec.store.select, i64 -2147483648 298 %conv6 = trunc i64 %spec.store.select7 to i32 299 ret i32 %conv6 300} 301 302define i32 @utest_f32i32(float %x) { 303; RV32-LABEL: utest_f32i32: 304; RV32: # %bb.0: # %entry 305; RV32-NEXT: fcvt.wu.s a0, fa0, rtz 306; RV32-NEXT: feq.s a1, fa0, fa0 307; RV32-NEXT: seqz a1, a1 308; RV32-NEXT: addi a1, a1, -1 309; RV32-NEXT: and a0, a1, a0 310; RV32-NEXT: ret 311; 312; RV64-LABEL: utest_f32i32: 313; RV64: # %bb.0: # %entry 314; RV64-NEXT: fcvt.lu.s a0, fa0, rtz 315; RV64-NEXT: li a1, -1 316; RV64-NEXT: srli a1, a1, 32 317; RV64-NEXT: bltu a0, a1, .LBB4_2 318; RV64-NEXT: # %bb.1: # %entry 319; RV64-NEXT: mv a0, a1 320; RV64-NEXT: .LBB4_2: # %entry 321; RV64-NEXT: ret 322entry: 323 %conv = fptoui float %x to i64 324 %0 = icmp ult i64 %conv, 4294967295 325 %spec.store.select = select i1 %0, i64 %conv, i64 4294967295 326 %conv6 = trunc i64 %spec.store.select to i32 327 ret i32 %conv6 328} 329 330define i32 @ustest_f32i32(float %x) { 331; RV32-LABEL: ustest_f32i32: 332; RV32: # %bb.0: # %entry 333; RV32-NEXT: fcvt.wu.s a0, fa0, rtz 334; RV32-NEXT: feq.s a1, fa0, fa0 335; RV32-NEXT: seqz a1, a1 336; RV32-NEXT: addi a1, a1, -1 337; RV32-NEXT: and a0, a1, a0 338; RV32-NEXT: ret 339; 340; RV64-LABEL: ustest_f32i32: 341; RV64: # %bb.0: # %entry 342; RV64-NEXT: fcvt.l.s a0, fa0, rtz 343; RV64-NEXT: li a1, -1 344; RV64-NEXT: srli a1, a1, 32 345; RV64-NEXT: blt a0, a1, .LBB5_2 346; RV64-NEXT: # %bb.1: # %entry 347; RV64-NEXT: mv a0, a1 348; RV64-NEXT: .LBB5_2: # %entry 349; RV64-NEXT: sgtz a1, a0 350; RV64-NEXT: neg a1, a1 351; RV64-NEXT: and a0, a1, a0 352; RV64-NEXT: ret 353entry: 354 %conv = fptosi float %x to i64 355 %0 = icmp slt i64 %conv, 4294967295 356 %spec.store.select = select i1 %0, i64 %conv, i64 4294967295 357 %1 = icmp sgt i64 %spec.store.select, 0 358 %spec.store.select7 = select i1 %1, i64 %spec.store.select, i64 0 359 %conv6 = trunc i64 %spec.store.select7 to i32 360 ret i32 %conv6 361} 362 363define i32 @stest_f16i32(half %x) { 364; RV32-LABEL: stest_f16i32: 365; RV32: # %bb.0: # %entry 366; RV32-NEXT: addi sp, sp, -16 367; RV32-NEXT: .cfi_def_cfa_offset 16 368; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 369; RV32-NEXT: .cfi_offset ra, -4 370; RV32-NEXT: call __extendhfsf2 371; RV32-NEXT: call __fixsfdi 372; RV32-NEXT: lui a2, 524288 373; RV32-NEXT: addi a3, a2, -1 374; RV32-NEXT: beqz a1, .LBB6_2 375; RV32-NEXT: # %bb.1: # %entry 376; RV32-NEXT: slti a4, a1, 0 377; RV32-NEXT: j .LBB6_3 378; RV32-NEXT: .LBB6_2: 379; RV32-NEXT: sltu a4, a0, a3 380; RV32-NEXT: .LBB6_3: # %entry 381; RV32-NEXT: neg a5, a4 382; RV32-NEXT: and a1, a5, a1 383; RV32-NEXT: bnez a4, .LBB6_5 384; RV32-NEXT: # %bb.4: # %entry 385; RV32-NEXT: mv a0, a3 386; RV32-NEXT: .LBB6_5: # %entry 387; RV32-NEXT: li a3, -1 388; RV32-NEXT: beq a1, a3, .LBB6_7 389; RV32-NEXT: # %bb.6: # %entry 390; RV32-NEXT: slti a1, a1, 0 391; RV32-NEXT: xori a1, a1, 1 392; RV32-NEXT: beqz a1, .LBB6_8 393; RV32-NEXT: j .LBB6_9 394; RV32-NEXT: .LBB6_7: 395; RV32-NEXT: sltu a1, a2, a0 396; RV32-NEXT: bnez a1, .LBB6_9 397; RV32-NEXT: .LBB6_8: # %entry 398; RV32-NEXT: lui a0, 524288 399; RV32-NEXT: .LBB6_9: # %entry 400; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 401; RV32-NEXT: .cfi_restore ra 402; RV32-NEXT: addi sp, sp, 16 403; RV32-NEXT: .cfi_def_cfa_offset 0 404; RV32-NEXT: ret 405; 406; RV64-LABEL: stest_f16i32: 407; RV64: # %bb.0: # %entry 408; RV64-NEXT: addi sp, sp, -16 409; RV64-NEXT: .cfi_def_cfa_offset 16 410; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 411; RV64-NEXT: .cfi_offset ra, -8 412; RV64-NEXT: call __extendhfsf2 413; RV64-NEXT: fcvt.l.s a0, fa0, rtz 414; RV64-NEXT: lui a1, 524288 415; RV64-NEXT: addiw a2, a1, -1 416; RV64-NEXT: blt a0, a2, .LBB6_2 417; RV64-NEXT: # %bb.1: # %entry 418; RV64-NEXT: mv a0, a2 419; RV64-NEXT: .LBB6_2: # %entry 420; RV64-NEXT: blt a1, a0, .LBB6_4 421; RV64-NEXT: # %bb.3: # %entry 422; RV64-NEXT: lui a0, 524288 423; RV64-NEXT: .LBB6_4: # %entry 424; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 425; RV64-NEXT: .cfi_restore ra 426; RV64-NEXT: addi sp, sp, 16 427; RV64-NEXT: .cfi_def_cfa_offset 0 428; RV64-NEXT: ret 429entry: 430 %conv = fptosi half %x to i64 431 %0 = icmp slt i64 %conv, 2147483647 432 %spec.store.select = select i1 %0, i64 %conv, i64 2147483647 433 %1 = icmp sgt i64 %spec.store.select, -2147483648 434 %spec.store.select7 = select i1 %1, i64 %spec.store.select, i64 -2147483648 435 %conv6 = trunc i64 %spec.store.select7 to i32 436 ret i32 %conv6 437} 438 439define i32 @utesth_f16i32(half %x) { 440; RV32-LABEL: utesth_f16i32: 441; RV32: # %bb.0: # %entry 442; RV32-NEXT: addi sp, sp, -16 443; RV32-NEXT: .cfi_def_cfa_offset 16 444; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 445; RV32-NEXT: .cfi_offset ra, -4 446; RV32-NEXT: call __extendhfsf2 447; RV32-NEXT: call __fixunssfdi 448; RV32-NEXT: sltiu a2, a0, -1 449; RV32-NEXT: seqz a1, a1 450; RV32-NEXT: and a1, a1, a2 451; RV32-NEXT: addi a1, a1, -1 452; RV32-NEXT: or a0, a1, a0 453; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 454; RV32-NEXT: .cfi_restore ra 455; RV32-NEXT: addi sp, sp, 16 456; RV32-NEXT: .cfi_def_cfa_offset 0 457; RV32-NEXT: ret 458; 459; RV64-LABEL: utesth_f16i32: 460; RV64: # %bb.0: # %entry 461; RV64-NEXT: addi sp, sp, -16 462; RV64-NEXT: .cfi_def_cfa_offset 16 463; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 464; RV64-NEXT: .cfi_offset ra, -8 465; RV64-NEXT: call __extendhfsf2 466; RV64-NEXT: fcvt.lu.s a0, fa0, rtz 467; RV64-NEXT: li a1, -1 468; RV64-NEXT: srli a1, a1, 32 469; RV64-NEXT: bltu a0, a1, .LBB7_2 470; RV64-NEXT: # %bb.1: # %entry 471; RV64-NEXT: mv a0, a1 472; RV64-NEXT: .LBB7_2: # %entry 473; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 474; RV64-NEXT: .cfi_restore ra 475; RV64-NEXT: addi sp, sp, 16 476; RV64-NEXT: .cfi_def_cfa_offset 0 477; RV64-NEXT: ret 478entry: 479 %conv = fptoui half %x to i64 480 %0 = icmp ult i64 %conv, 4294967295 481 %spec.store.select = select i1 %0, i64 %conv, i64 4294967295 482 %conv6 = trunc i64 %spec.store.select to i32 483 ret i32 %conv6 484} 485 486define i32 @ustest_f16i32(half %x) { 487; RV32-LABEL: ustest_f16i32: 488; RV32: # %bb.0: # %entry 489; RV32-NEXT: addi sp, sp, -16 490; RV32-NEXT: .cfi_def_cfa_offset 16 491; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 492; RV32-NEXT: .cfi_offset ra, -4 493; RV32-NEXT: call __extendhfsf2 494; RV32-NEXT: call __fixsfdi 495; RV32-NEXT: beqz a1, .LBB8_2 496; RV32-NEXT: # %bb.1: # %entry 497; RV32-NEXT: slti a2, a1, 0 498; RV32-NEXT: j .LBB8_3 499; RV32-NEXT: .LBB8_2: 500; RV32-NEXT: sltiu a2, a0, -1 501; RV32-NEXT: .LBB8_3: # %entry 502; RV32-NEXT: addi a3, a2, -1 503; RV32-NEXT: neg a2, a2 504; RV32-NEXT: and a1, a2, a1 505; RV32-NEXT: or a0, a3, a0 506; RV32-NEXT: beqz a1, .LBB8_5 507; RV32-NEXT: # %bb.4: # %entry 508; RV32-NEXT: sgtz a1, a1 509; RV32-NEXT: j .LBB8_6 510; RV32-NEXT: .LBB8_5: 511; RV32-NEXT: snez a1, a0 512; RV32-NEXT: .LBB8_6: # %entry 513; RV32-NEXT: neg a1, a1 514; RV32-NEXT: and a0, a1, a0 515; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 516; RV32-NEXT: .cfi_restore ra 517; RV32-NEXT: addi sp, sp, 16 518; RV32-NEXT: .cfi_def_cfa_offset 0 519; RV32-NEXT: ret 520; 521; RV64-LABEL: ustest_f16i32: 522; RV64: # %bb.0: # %entry 523; RV64-NEXT: addi sp, sp, -16 524; RV64-NEXT: .cfi_def_cfa_offset 16 525; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 526; RV64-NEXT: .cfi_offset ra, -8 527; RV64-NEXT: call __extendhfsf2 528; RV64-NEXT: fcvt.l.s a0, fa0, rtz 529; RV64-NEXT: li a1, -1 530; RV64-NEXT: srli a1, a1, 32 531; RV64-NEXT: blt a0, a1, .LBB8_2 532; RV64-NEXT: # %bb.1: # %entry 533; RV64-NEXT: mv a0, a1 534; RV64-NEXT: .LBB8_2: # %entry 535; RV64-NEXT: sgtz a1, a0 536; RV64-NEXT: neg a1, a1 537; RV64-NEXT: and a0, a1, a0 538; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 539; RV64-NEXT: .cfi_restore ra 540; RV64-NEXT: addi sp, sp, 16 541; RV64-NEXT: .cfi_def_cfa_offset 0 542; RV64-NEXT: ret 543entry: 544 %conv = fptosi half %x to i64 545 %0 = icmp slt i64 %conv, 4294967295 546 %spec.store.select = select i1 %0, i64 %conv, i64 4294967295 547 %1 = icmp sgt i64 %spec.store.select, 0 548 %spec.store.select7 = select i1 %1, i64 %spec.store.select, i64 0 549 %conv6 = trunc i64 %spec.store.select7 to i32 550 ret i32 %conv6 551} 552 553; i16 saturate 554 555define i16 @stest_f64i16(double %x) { 556; RV32IF-LABEL: stest_f64i16: 557; RV32IF: # %bb.0: # %entry 558; RV32IF-NEXT: addi sp, sp, -16 559; RV32IF-NEXT: .cfi_def_cfa_offset 16 560; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 561; RV32IF-NEXT: .cfi_offset ra, -4 562; RV32IF-NEXT: call __fixdfsi 563; RV32IF-NEXT: lui a1, 8 564; RV32IF-NEXT: addi a1, a1, -1 565; RV32IF-NEXT: blt a0, a1, .LBB9_2 566; RV32IF-NEXT: # %bb.1: # %entry 567; RV32IF-NEXT: mv a0, a1 568; RV32IF-NEXT: .LBB9_2: # %entry 569; RV32IF-NEXT: lui a1, 1048568 570; RV32IF-NEXT: blt a1, a0, .LBB9_4 571; RV32IF-NEXT: # %bb.3: # %entry 572; RV32IF-NEXT: lui a0, 1048568 573; RV32IF-NEXT: .LBB9_4: # %entry 574; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 575; RV32IF-NEXT: .cfi_restore ra 576; RV32IF-NEXT: addi sp, sp, 16 577; RV32IF-NEXT: .cfi_def_cfa_offset 0 578; RV32IF-NEXT: ret 579; 580; RV64IF-LABEL: stest_f64i16: 581; RV64IF: # %bb.0: # %entry 582; RV64IF-NEXT: addi sp, sp, -16 583; RV64IF-NEXT: .cfi_def_cfa_offset 16 584; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 585; RV64IF-NEXT: .cfi_offset ra, -8 586; RV64IF-NEXT: call __fixdfsi 587; RV64IF-NEXT: lui a1, 8 588; RV64IF-NEXT: addiw a1, a1, -1 589; RV64IF-NEXT: blt a0, a1, .LBB9_2 590; RV64IF-NEXT: # %bb.1: # %entry 591; RV64IF-NEXT: mv a0, a1 592; RV64IF-NEXT: .LBB9_2: # %entry 593; RV64IF-NEXT: lui a1, 1048568 594; RV64IF-NEXT: blt a1, a0, .LBB9_4 595; RV64IF-NEXT: # %bb.3: # %entry 596; RV64IF-NEXT: lui a0, 1048568 597; RV64IF-NEXT: .LBB9_4: # %entry 598; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 599; RV64IF-NEXT: .cfi_restore ra 600; RV64IF-NEXT: addi sp, sp, 16 601; RV64IF-NEXT: .cfi_def_cfa_offset 0 602; RV64IF-NEXT: ret 603; 604; RV32IFD-LABEL: stest_f64i16: 605; RV32IFD: # %bb.0: # %entry 606; RV32IFD-NEXT: fcvt.w.d a0, fa0, rtz 607; RV32IFD-NEXT: lui a1, 8 608; RV32IFD-NEXT: addi a1, a1, -1 609; RV32IFD-NEXT: bge a0, a1, .LBB9_3 610; RV32IFD-NEXT: # %bb.1: # %entry 611; RV32IFD-NEXT: lui a1, 1048568 612; RV32IFD-NEXT: bge a1, a0, .LBB9_4 613; RV32IFD-NEXT: .LBB9_2: # %entry 614; RV32IFD-NEXT: ret 615; RV32IFD-NEXT: .LBB9_3: # %entry 616; RV32IFD-NEXT: mv a0, a1 617; RV32IFD-NEXT: lui a1, 1048568 618; RV32IFD-NEXT: blt a1, a0, .LBB9_2 619; RV32IFD-NEXT: .LBB9_4: # %entry 620; RV32IFD-NEXT: lui a0, 1048568 621; RV32IFD-NEXT: ret 622; 623; RV64IFD-LABEL: stest_f64i16: 624; RV64IFD: # %bb.0: # %entry 625; RV64IFD-NEXT: fcvt.w.d a0, fa0, rtz 626; RV64IFD-NEXT: lui a1, 8 627; RV64IFD-NEXT: addiw a1, a1, -1 628; RV64IFD-NEXT: bge a0, a1, .LBB9_3 629; RV64IFD-NEXT: # %bb.1: # %entry 630; RV64IFD-NEXT: lui a1, 1048568 631; RV64IFD-NEXT: bge a1, a0, .LBB9_4 632; RV64IFD-NEXT: .LBB9_2: # %entry 633; RV64IFD-NEXT: ret 634; RV64IFD-NEXT: .LBB9_3: # %entry 635; RV64IFD-NEXT: mv a0, a1 636; RV64IFD-NEXT: lui a1, 1048568 637; RV64IFD-NEXT: blt a1, a0, .LBB9_2 638; RV64IFD-NEXT: .LBB9_4: # %entry 639; RV64IFD-NEXT: lui a0, 1048568 640; RV64IFD-NEXT: ret 641entry: 642 %conv = fptosi double %x to i32 643 %0 = icmp slt i32 %conv, 32767 644 %spec.store.select = select i1 %0, i32 %conv, i32 32767 645 %1 = icmp sgt i32 %spec.store.select, -32768 646 %spec.store.select7 = select i1 %1, i32 %spec.store.select, i32 -32768 647 %conv6 = trunc i32 %spec.store.select7 to i16 648 ret i16 %conv6 649} 650 651define i16 @utest_f64i16(double %x) { 652; RV32IF-LABEL: utest_f64i16: 653; RV32IF: # %bb.0: # %entry 654; RV32IF-NEXT: addi sp, sp, -16 655; RV32IF-NEXT: .cfi_def_cfa_offset 16 656; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 657; RV32IF-NEXT: .cfi_offset ra, -4 658; RV32IF-NEXT: call __fixunsdfsi 659; RV32IF-NEXT: lui a1, 16 660; RV32IF-NEXT: addi a1, a1, -1 661; RV32IF-NEXT: bltu a0, a1, .LBB10_2 662; RV32IF-NEXT: # %bb.1: # %entry 663; RV32IF-NEXT: mv a0, a1 664; RV32IF-NEXT: .LBB10_2: # %entry 665; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 666; RV32IF-NEXT: .cfi_restore ra 667; RV32IF-NEXT: addi sp, sp, 16 668; RV32IF-NEXT: .cfi_def_cfa_offset 0 669; RV32IF-NEXT: ret 670; 671; RV64IF-LABEL: utest_f64i16: 672; RV64IF: # %bb.0: # %entry 673; RV64IF-NEXT: addi sp, sp, -16 674; RV64IF-NEXT: .cfi_def_cfa_offset 16 675; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 676; RV64IF-NEXT: .cfi_offset ra, -8 677; RV64IF-NEXT: call __fixunsdfsi 678; RV64IF-NEXT: lui a1, 16 679; RV64IF-NEXT: addiw a1, a1, -1 680; RV64IF-NEXT: bltu a0, a1, .LBB10_2 681; RV64IF-NEXT: # %bb.1: # %entry 682; RV64IF-NEXT: mv a0, a1 683; RV64IF-NEXT: .LBB10_2: # %entry 684; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 685; RV64IF-NEXT: .cfi_restore ra 686; RV64IF-NEXT: addi sp, sp, 16 687; RV64IF-NEXT: .cfi_def_cfa_offset 0 688; RV64IF-NEXT: ret 689; 690; RV32IFD-LABEL: utest_f64i16: 691; RV32IFD: # %bb.0: # %entry 692; RV32IFD-NEXT: fcvt.wu.d a0, fa0, rtz 693; RV32IFD-NEXT: lui a1, 16 694; RV32IFD-NEXT: addi a1, a1, -1 695; RV32IFD-NEXT: bltu a0, a1, .LBB10_2 696; RV32IFD-NEXT: # %bb.1: # %entry 697; RV32IFD-NEXT: mv a0, a1 698; RV32IFD-NEXT: .LBB10_2: # %entry 699; RV32IFD-NEXT: ret 700; 701; RV64IFD-LABEL: utest_f64i16: 702; RV64IFD: # %bb.0: # %entry 703; RV64IFD-NEXT: fcvt.wu.d a0, fa0, rtz 704; RV64IFD-NEXT: lui a1, 16 705; RV64IFD-NEXT: addiw a1, a1, -1 706; RV64IFD-NEXT: bltu a0, a1, .LBB10_2 707; RV64IFD-NEXT: # %bb.1: # %entry 708; RV64IFD-NEXT: mv a0, a1 709; RV64IFD-NEXT: .LBB10_2: # %entry 710; RV64IFD-NEXT: ret 711entry: 712 %conv = fptoui double %x to i32 713 %0 = icmp ult i32 %conv, 65535 714 %spec.store.select = select i1 %0, i32 %conv, i32 65535 715 %conv6 = trunc i32 %spec.store.select to i16 716 ret i16 %conv6 717} 718 719define i16 @ustest_f64i16(double %x) { 720; RV32IF-LABEL: ustest_f64i16: 721; RV32IF: # %bb.0: # %entry 722; RV32IF-NEXT: addi sp, sp, -16 723; RV32IF-NEXT: .cfi_def_cfa_offset 16 724; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 725; RV32IF-NEXT: .cfi_offset ra, -4 726; RV32IF-NEXT: call __fixdfsi 727; RV32IF-NEXT: lui a1, 16 728; RV32IF-NEXT: addi a1, a1, -1 729; RV32IF-NEXT: blt a0, a1, .LBB11_2 730; RV32IF-NEXT: # %bb.1: # %entry 731; RV32IF-NEXT: mv a0, a1 732; RV32IF-NEXT: .LBB11_2: # %entry 733; RV32IF-NEXT: sgtz a1, a0 734; RV32IF-NEXT: neg a1, a1 735; RV32IF-NEXT: and a0, a1, a0 736; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 737; RV32IF-NEXT: .cfi_restore ra 738; RV32IF-NEXT: addi sp, sp, 16 739; RV32IF-NEXT: .cfi_def_cfa_offset 0 740; RV32IF-NEXT: ret 741; 742; RV64IF-LABEL: ustest_f64i16: 743; RV64IF: # %bb.0: # %entry 744; RV64IF-NEXT: addi sp, sp, -16 745; RV64IF-NEXT: .cfi_def_cfa_offset 16 746; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 747; RV64IF-NEXT: .cfi_offset ra, -8 748; RV64IF-NEXT: call __fixdfsi 749; RV64IF-NEXT: lui a1, 16 750; RV64IF-NEXT: addiw a1, a1, -1 751; RV64IF-NEXT: blt a0, a1, .LBB11_2 752; RV64IF-NEXT: # %bb.1: # %entry 753; RV64IF-NEXT: mv a0, a1 754; RV64IF-NEXT: .LBB11_2: # %entry 755; RV64IF-NEXT: sgtz a1, a0 756; RV64IF-NEXT: neg a1, a1 757; RV64IF-NEXT: and a0, a1, a0 758; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 759; RV64IF-NEXT: .cfi_restore ra 760; RV64IF-NEXT: addi sp, sp, 16 761; RV64IF-NEXT: .cfi_def_cfa_offset 0 762; RV64IF-NEXT: ret 763; 764; RV32IFD-LABEL: ustest_f64i16: 765; RV32IFD: # %bb.0: # %entry 766; RV32IFD-NEXT: fcvt.w.d a0, fa0, rtz 767; RV32IFD-NEXT: lui a1, 16 768; RV32IFD-NEXT: addi a1, a1, -1 769; RV32IFD-NEXT: blt a0, a1, .LBB11_2 770; RV32IFD-NEXT: # %bb.1: # %entry 771; RV32IFD-NEXT: mv a0, a1 772; RV32IFD-NEXT: .LBB11_2: # %entry 773; RV32IFD-NEXT: sgtz a1, a0 774; RV32IFD-NEXT: neg a1, a1 775; RV32IFD-NEXT: and a0, a1, a0 776; RV32IFD-NEXT: ret 777; 778; RV64IFD-LABEL: ustest_f64i16: 779; RV64IFD: # %bb.0: # %entry 780; RV64IFD-NEXT: fcvt.w.d a0, fa0, rtz 781; RV64IFD-NEXT: lui a1, 16 782; RV64IFD-NEXT: addiw a1, a1, -1 783; RV64IFD-NEXT: blt a0, a1, .LBB11_2 784; RV64IFD-NEXT: # %bb.1: # %entry 785; RV64IFD-NEXT: mv a0, a1 786; RV64IFD-NEXT: .LBB11_2: # %entry 787; RV64IFD-NEXT: sgtz a1, a0 788; RV64IFD-NEXT: neg a1, a1 789; RV64IFD-NEXT: and a0, a1, a0 790; RV64IFD-NEXT: ret 791entry: 792 %conv = fptosi double %x to i32 793 %0 = icmp slt i32 %conv, 65535 794 %spec.store.select = select i1 %0, i32 %conv, i32 65535 795 %1 = icmp sgt i32 %spec.store.select, 0 796 %spec.store.select7 = select i1 %1, i32 %spec.store.select, i32 0 797 %conv6 = trunc i32 %spec.store.select7 to i16 798 ret i16 %conv6 799} 800 801define i16 @stest_f32i16(float %x) { 802; RV32-LABEL: stest_f32i16: 803; RV32: # %bb.0: # %entry 804; RV32-NEXT: fcvt.w.s a0, fa0, rtz 805; RV32-NEXT: lui a1, 8 806; RV32-NEXT: addi a1, a1, -1 807; RV32-NEXT: bge a0, a1, .LBB12_3 808; RV32-NEXT: # %bb.1: # %entry 809; RV32-NEXT: lui a1, 1048568 810; RV32-NEXT: bge a1, a0, .LBB12_4 811; RV32-NEXT: .LBB12_2: # %entry 812; RV32-NEXT: ret 813; RV32-NEXT: .LBB12_3: # %entry 814; RV32-NEXT: mv a0, a1 815; RV32-NEXT: lui a1, 1048568 816; RV32-NEXT: blt a1, a0, .LBB12_2 817; RV32-NEXT: .LBB12_4: # %entry 818; RV32-NEXT: lui a0, 1048568 819; RV32-NEXT: ret 820; 821; RV64-LABEL: stest_f32i16: 822; RV64: # %bb.0: # %entry 823; RV64-NEXT: fcvt.w.s a0, fa0, rtz 824; RV64-NEXT: lui a1, 8 825; RV64-NEXT: addiw a1, a1, -1 826; RV64-NEXT: bge a0, a1, .LBB12_3 827; RV64-NEXT: # %bb.1: # %entry 828; RV64-NEXT: lui a1, 1048568 829; RV64-NEXT: bge a1, a0, .LBB12_4 830; RV64-NEXT: .LBB12_2: # %entry 831; RV64-NEXT: ret 832; RV64-NEXT: .LBB12_3: # %entry 833; RV64-NEXT: mv a0, a1 834; RV64-NEXT: lui a1, 1048568 835; RV64-NEXT: blt a1, a0, .LBB12_2 836; RV64-NEXT: .LBB12_4: # %entry 837; RV64-NEXT: lui a0, 1048568 838; RV64-NEXT: ret 839entry: 840 %conv = fptosi float %x to i32 841 %0 = icmp slt i32 %conv, 32767 842 %spec.store.select = select i1 %0, i32 %conv, i32 32767 843 %1 = icmp sgt i32 %spec.store.select, -32768 844 %spec.store.select7 = select i1 %1, i32 %spec.store.select, i32 -32768 845 %conv6 = trunc i32 %spec.store.select7 to i16 846 ret i16 %conv6 847} 848 849define i16 @utest_f32i16(float %x) { 850; RV32-LABEL: utest_f32i16: 851; RV32: # %bb.0: # %entry 852; RV32-NEXT: fcvt.wu.s a0, fa0, rtz 853; RV32-NEXT: lui a1, 16 854; RV32-NEXT: addi a1, a1, -1 855; RV32-NEXT: bltu a0, a1, .LBB13_2 856; RV32-NEXT: # %bb.1: # %entry 857; RV32-NEXT: mv a0, a1 858; RV32-NEXT: .LBB13_2: # %entry 859; RV32-NEXT: ret 860; 861; RV64-LABEL: utest_f32i16: 862; RV64: # %bb.0: # %entry 863; RV64-NEXT: fcvt.wu.s a0, fa0, rtz 864; RV64-NEXT: lui a1, 16 865; RV64-NEXT: addiw a1, a1, -1 866; RV64-NEXT: bltu a0, a1, .LBB13_2 867; RV64-NEXT: # %bb.1: # %entry 868; RV64-NEXT: mv a0, a1 869; RV64-NEXT: .LBB13_2: # %entry 870; RV64-NEXT: ret 871entry: 872 %conv = fptoui float %x to i32 873 %0 = icmp ult i32 %conv, 65535 874 %spec.store.select = select i1 %0, i32 %conv, i32 65535 875 %conv6 = trunc i32 %spec.store.select to i16 876 ret i16 %conv6 877} 878 879define i16 @ustest_f32i16(float %x) { 880; RV32-LABEL: ustest_f32i16: 881; RV32: # %bb.0: # %entry 882; RV32-NEXT: fcvt.w.s a0, fa0, rtz 883; RV32-NEXT: lui a1, 16 884; RV32-NEXT: addi a1, a1, -1 885; RV32-NEXT: blt a0, a1, .LBB14_2 886; RV32-NEXT: # %bb.1: # %entry 887; RV32-NEXT: mv a0, a1 888; RV32-NEXT: .LBB14_2: # %entry 889; RV32-NEXT: sgtz a1, a0 890; RV32-NEXT: neg a1, a1 891; RV32-NEXT: and a0, a1, a0 892; RV32-NEXT: ret 893; 894; RV64-LABEL: ustest_f32i16: 895; RV64: # %bb.0: # %entry 896; RV64-NEXT: fcvt.w.s a0, fa0, rtz 897; RV64-NEXT: lui a1, 16 898; RV64-NEXT: addiw a1, a1, -1 899; RV64-NEXT: blt a0, a1, .LBB14_2 900; RV64-NEXT: # %bb.1: # %entry 901; RV64-NEXT: mv a0, a1 902; RV64-NEXT: .LBB14_2: # %entry 903; RV64-NEXT: sgtz a1, a0 904; RV64-NEXT: neg a1, a1 905; RV64-NEXT: and a0, a1, a0 906; RV64-NEXT: ret 907entry: 908 %conv = fptosi float %x to i32 909 %0 = icmp slt i32 %conv, 65535 910 %spec.store.select = select i1 %0, i32 %conv, i32 65535 911 %1 = icmp sgt i32 %spec.store.select, 0 912 %spec.store.select7 = select i1 %1, i32 %spec.store.select, i32 0 913 %conv6 = trunc i32 %spec.store.select7 to i16 914 ret i16 %conv6 915} 916 917define i16 @stest_f16i16(half %x) { 918; RV32-LABEL: stest_f16i16: 919; RV32: # %bb.0: # %entry 920; RV32-NEXT: addi sp, sp, -16 921; RV32-NEXT: .cfi_def_cfa_offset 16 922; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 923; RV32-NEXT: .cfi_offset ra, -4 924; RV32-NEXT: call __extendhfsf2 925; RV32-NEXT: fcvt.w.s a0, fa0, rtz 926; RV32-NEXT: lui a1, 8 927; RV32-NEXT: addi a1, a1, -1 928; RV32-NEXT: blt a0, a1, .LBB15_2 929; RV32-NEXT: # %bb.1: # %entry 930; RV32-NEXT: mv a0, a1 931; RV32-NEXT: .LBB15_2: # %entry 932; RV32-NEXT: lui a1, 1048568 933; RV32-NEXT: blt a1, a0, .LBB15_4 934; RV32-NEXT: # %bb.3: # %entry 935; RV32-NEXT: lui a0, 1048568 936; RV32-NEXT: .LBB15_4: # %entry 937; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 938; RV32-NEXT: .cfi_restore ra 939; RV32-NEXT: addi sp, sp, 16 940; RV32-NEXT: .cfi_def_cfa_offset 0 941; RV32-NEXT: ret 942; 943; RV64-LABEL: stest_f16i16: 944; RV64: # %bb.0: # %entry 945; RV64-NEXT: addi sp, sp, -16 946; RV64-NEXT: .cfi_def_cfa_offset 16 947; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 948; RV64-NEXT: .cfi_offset ra, -8 949; RV64-NEXT: call __extendhfsf2 950; RV64-NEXT: fcvt.l.s a0, fa0, rtz 951; RV64-NEXT: lui a1, 8 952; RV64-NEXT: addiw a1, a1, -1 953; RV64-NEXT: blt a0, a1, .LBB15_2 954; RV64-NEXT: # %bb.1: # %entry 955; RV64-NEXT: mv a0, a1 956; RV64-NEXT: .LBB15_2: # %entry 957; RV64-NEXT: lui a1, 1048568 958; RV64-NEXT: blt a1, a0, .LBB15_4 959; RV64-NEXT: # %bb.3: # %entry 960; RV64-NEXT: lui a0, 1048568 961; RV64-NEXT: .LBB15_4: # %entry 962; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 963; RV64-NEXT: .cfi_restore ra 964; RV64-NEXT: addi sp, sp, 16 965; RV64-NEXT: .cfi_def_cfa_offset 0 966; RV64-NEXT: ret 967entry: 968 %conv = fptosi half %x to i32 969 %0 = icmp slt i32 %conv, 32767 970 %spec.store.select = select i1 %0, i32 %conv, i32 32767 971 %1 = icmp sgt i32 %spec.store.select, -32768 972 %spec.store.select7 = select i1 %1, i32 %spec.store.select, i32 -32768 973 %conv6 = trunc i32 %spec.store.select7 to i16 974 ret i16 %conv6 975} 976 977define i16 @utesth_f16i16(half %x) { 978; RV32-LABEL: utesth_f16i16: 979; RV32: # %bb.0: # %entry 980; RV32-NEXT: addi sp, sp, -16 981; RV32-NEXT: .cfi_def_cfa_offset 16 982; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 983; RV32-NEXT: .cfi_offset ra, -4 984; RV32-NEXT: call __extendhfsf2 985; RV32-NEXT: fcvt.wu.s a0, fa0, rtz 986; RV32-NEXT: lui a1, 16 987; RV32-NEXT: addi a1, a1, -1 988; RV32-NEXT: bltu a0, a1, .LBB16_2 989; RV32-NEXT: # %bb.1: # %entry 990; RV32-NEXT: mv a0, a1 991; RV32-NEXT: .LBB16_2: # %entry 992; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 993; RV32-NEXT: .cfi_restore ra 994; RV32-NEXT: addi sp, sp, 16 995; RV32-NEXT: .cfi_def_cfa_offset 0 996; RV32-NEXT: ret 997; 998; RV64-LABEL: utesth_f16i16: 999; RV64: # %bb.0: # %entry 1000; RV64-NEXT: addi sp, sp, -16 1001; RV64-NEXT: .cfi_def_cfa_offset 16 1002; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 1003; RV64-NEXT: .cfi_offset ra, -8 1004; RV64-NEXT: call __extendhfsf2 1005; RV64-NEXT: fcvt.lu.s a0, fa0, rtz 1006; RV64-NEXT: lui a1, 16 1007; RV64-NEXT: addiw a1, a1, -1 1008; RV64-NEXT: bltu a0, a1, .LBB16_2 1009; RV64-NEXT: # %bb.1: # %entry 1010; RV64-NEXT: mv a0, a1 1011; RV64-NEXT: .LBB16_2: # %entry 1012; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 1013; RV64-NEXT: .cfi_restore ra 1014; RV64-NEXT: addi sp, sp, 16 1015; RV64-NEXT: .cfi_def_cfa_offset 0 1016; RV64-NEXT: ret 1017entry: 1018 %conv = fptoui half %x to i32 1019 %0 = icmp ult i32 %conv, 65535 1020 %spec.store.select = select i1 %0, i32 %conv, i32 65535 1021 %conv6 = trunc i32 %spec.store.select to i16 1022 ret i16 %conv6 1023} 1024 1025define i16 @ustest_f16i16(half %x) { 1026; RV32-LABEL: ustest_f16i16: 1027; RV32: # %bb.0: # %entry 1028; RV32-NEXT: addi sp, sp, -16 1029; RV32-NEXT: .cfi_def_cfa_offset 16 1030; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 1031; RV32-NEXT: .cfi_offset ra, -4 1032; RV32-NEXT: call __extendhfsf2 1033; RV32-NEXT: fcvt.w.s a0, fa0, rtz 1034; RV32-NEXT: lui a1, 16 1035; RV32-NEXT: addi a1, a1, -1 1036; RV32-NEXT: blt a0, a1, .LBB17_2 1037; RV32-NEXT: # %bb.1: # %entry 1038; RV32-NEXT: mv a0, a1 1039; RV32-NEXT: .LBB17_2: # %entry 1040; RV32-NEXT: sgtz a1, a0 1041; RV32-NEXT: neg a1, a1 1042; RV32-NEXT: and a0, a1, a0 1043; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 1044; RV32-NEXT: .cfi_restore ra 1045; RV32-NEXT: addi sp, sp, 16 1046; RV32-NEXT: .cfi_def_cfa_offset 0 1047; RV32-NEXT: ret 1048; 1049; RV64-LABEL: ustest_f16i16: 1050; RV64: # %bb.0: # %entry 1051; RV64-NEXT: addi sp, sp, -16 1052; RV64-NEXT: .cfi_def_cfa_offset 16 1053; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 1054; RV64-NEXT: .cfi_offset ra, -8 1055; RV64-NEXT: call __extendhfsf2 1056; RV64-NEXT: fcvt.l.s a0, fa0, rtz 1057; RV64-NEXT: lui a1, 16 1058; RV64-NEXT: addiw a1, a1, -1 1059; RV64-NEXT: blt a0, a1, .LBB17_2 1060; RV64-NEXT: # %bb.1: # %entry 1061; RV64-NEXT: mv a0, a1 1062; RV64-NEXT: .LBB17_2: # %entry 1063; RV64-NEXT: sgtz a1, a0 1064; RV64-NEXT: neg a1, a1 1065; RV64-NEXT: and a0, a1, a0 1066; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 1067; RV64-NEXT: .cfi_restore ra 1068; RV64-NEXT: addi sp, sp, 16 1069; RV64-NEXT: .cfi_def_cfa_offset 0 1070; RV64-NEXT: ret 1071entry: 1072 %conv = fptosi half %x to i32 1073 %0 = icmp slt i32 %conv, 65535 1074 %spec.store.select = select i1 %0, i32 %conv, i32 65535 1075 %1 = icmp sgt i32 %spec.store.select, 0 1076 %spec.store.select7 = select i1 %1, i32 %spec.store.select, i32 0 1077 %conv6 = trunc i32 %spec.store.select7 to i16 1078 ret i16 %conv6 1079} 1080 1081; i64 saturate 1082 1083define i64 @stest_f64i64(double %x) { 1084; RV32IF-LABEL: stest_f64i64: 1085; RV32IF: # %bb.0: # %entry 1086; RV32IF-NEXT: addi sp, sp, -32 1087; RV32IF-NEXT: .cfi_def_cfa_offset 32 1088; RV32IF-NEXT: sw ra, 28(sp) # 4-byte Folded Spill 1089; RV32IF-NEXT: .cfi_offset ra, -4 1090; RV32IF-NEXT: mv a2, a1 1091; RV32IF-NEXT: mv a1, a0 1092; RV32IF-NEXT: addi a0, sp, 8 1093; RV32IF-NEXT: call __fixdfti 1094; RV32IF-NEXT: lw a3, 8(sp) 1095; RV32IF-NEXT: lw a1, 12(sp) 1096; RV32IF-NEXT: lw a2, 16(sp) 1097; RV32IF-NEXT: lw a4, 20(sp) 1098; RV32IF-NEXT: lui a0, 524288 1099; RV32IF-NEXT: addi a5, a0, -1 1100; RV32IF-NEXT: beq a1, a5, .LBB18_2 1101; RV32IF-NEXT: # %bb.1: # %entry 1102; RV32IF-NEXT: sltu a6, a1, a5 1103; RV32IF-NEXT: or a7, a2, a4 1104; RV32IF-NEXT: bnez a7, .LBB18_3 1105; RV32IF-NEXT: j .LBB18_4 1106; RV32IF-NEXT: .LBB18_2: 1107; RV32IF-NEXT: sltiu a6, a3, -1 1108; RV32IF-NEXT: or a7, a2, a4 1109; RV32IF-NEXT: beqz a7, .LBB18_4 1110; RV32IF-NEXT: .LBB18_3: # %entry 1111; RV32IF-NEXT: slti a6, a4, 0 1112; RV32IF-NEXT: .LBB18_4: # %entry 1113; RV32IF-NEXT: addi a7, a6, -1 1114; RV32IF-NEXT: neg t0, a6 1115; RV32IF-NEXT: bnez a6, .LBB18_6 1116; RV32IF-NEXT: # %bb.5: # %entry 1117; RV32IF-NEXT: mv a1, a5 1118; RV32IF-NEXT: .LBB18_6: # %entry 1119; RV32IF-NEXT: or a3, a7, a3 1120; RV32IF-NEXT: and a4, t0, a4 1121; RV32IF-NEXT: and a2, t0, a2 1122; RV32IF-NEXT: beq a1, a0, .LBB18_8 1123; RV32IF-NEXT: # %bb.7: # %entry 1124; RV32IF-NEXT: sltu a0, a0, a1 1125; RV32IF-NEXT: j .LBB18_9 1126; RV32IF-NEXT: .LBB18_8: 1127; RV32IF-NEXT: snez a0, a3 1128; RV32IF-NEXT: .LBB18_9: # %entry 1129; RV32IF-NEXT: and a2, a2, a4 1130; RV32IF-NEXT: li a5, -1 1131; RV32IF-NEXT: beq a2, a5, .LBB18_11 1132; RV32IF-NEXT: # %bb.10: # %entry 1133; RV32IF-NEXT: slti a0, a4, 0 1134; RV32IF-NEXT: xori a0, a0, 1 1135; RV32IF-NEXT: .LBB18_11: # %entry 1136; RV32IF-NEXT: bnez a0, .LBB18_13 1137; RV32IF-NEXT: # %bb.12: # %entry 1138; RV32IF-NEXT: lui a1, 524288 1139; RV32IF-NEXT: .LBB18_13: # %entry 1140; RV32IF-NEXT: neg a0, a0 1141; RV32IF-NEXT: and a0, a0, a3 1142; RV32IF-NEXT: lw ra, 28(sp) # 4-byte Folded Reload 1143; RV32IF-NEXT: .cfi_restore ra 1144; RV32IF-NEXT: addi sp, sp, 32 1145; RV32IF-NEXT: .cfi_def_cfa_offset 0 1146; RV32IF-NEXT: ret 1147; 1148; RV64IF-LABEL: stest_f64i64: 1149; RV64IF: # %bb.0: # %entry 1150; RV64IF-NEXT: addi sp, sp, -16 1151; RV64IF-NEXT: .cfi_def_cfa_offset 16 1152; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 1153; RV64IF-NEXT: .cfi_offset ra, -8 1154; RV64IF-NEXT: call __fixdfti 1155; RV64IF-NEXT: li a2, -1 1156; RV64IF-NEXT: srli a3, a2, 1 1157; RV64IF-NEXT: beqz a1, .LBB18_2 1158; RV64IF-NEXT: # %bb.1: # %entry 1159; RV64IF-NEXT: slti a4, a1, 0 1160; RV64IF-NEXT: j .LBB18_3 1161; RV64IF-NEXT: .LBB18_2: 1162; RV64IF-NEXT: sltu a4, a0, a3 1163; RV64IF-NEXT: .LBB18_3: # %entry 1164; RV64IF-NEXT: neg a5, a4 1165; RV64IF-NEXT: and a5, a5, a1 1166; RV64IF-NEXT: bnez a4, .LBB18_5 1167; RV64IF-NEXT: # %bb.4: # %entry 1168; RV64IF-NEXT: mv a0, a3 1169; RV64IF-NEXT: .LBB18_5: # %entry 1170; RV64IF-NEXT: slli a1, a2, 63 1171; RV64IF-NEXT: beq a5, a2, .LBB18_7 1172; RV64IF-NEXT: # %bb.6: # %entry 1173; RV64IF-NEXT: slti a2, a5, 0 1174; RV64IF-NEXT: xori a2, a2, 1 1175; RV64IF-NEXT: beqz a2, .LBB18_8 1176; RV64IF-NEXT: j .LBB18_9 1177; RV64IF-NEXT: .LBB18_7: 1178; RV64IF-NEXT: sltu a2, a1, a0 1179; RV64IF-NEXT: bnez a2, .LBB18_9 1180; RV64IF-NEXT: .LBB18_8: # %entry 1181; RV64IF-NEXT: mv a0, a1 1182; RV64IF-NEXT: .LBB18_9: # %entry 1183; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 1184; RV64IF-NEXT: .cfi_restore ra 1185; RV64IF-NEXT: addi sp, sp, 16 1186; RV64IF-NEXT: .cfi_def_cfa_offset 0 1187; RV64IF-NEXT: ret 1188; 1189; RV32IFD-LABEL: stest_f64i64: 1190; RV32IFD: # %bb.0: # %entry 1191; RV32IFD-NEXT: addi sp, sp, -32 1192; RV32IFD-NEXT: .cfi_def_cfa_offset 32 1193; RV32IFD-NEXT: sw ra, 28(sp) # 4-byte Folded Spill 1194; RV32IFD-NEXT: .cfi_offset ra, -4 1195; RV32IFD-NEXT: addi a0, sp, 8 1196; RV32IFD-NEXT: call __fixdfti 1197; RV32IFD-NEXT: lw a3, 8(sp) 1198; RV32IFD-NEXT: lw a1, 12(sp) 1199; RV32IFD-NEXT: lw a2, 16(sp) 1200; RV32IFD-NEXT: lw a4, 20(sp) 1201; RV32IFD-NEXT: lui a0, 524288 1202; RV32IFD-NEXT: addi a5, a0, -1 1203; RV32IFD-NEXT: beq a1, a5, .LBB18_2 1204; RV32IFD-NEXT: # %bb.1: # %entry 1205; RV32IFD-NEXT: sltu a6, a1, a5 1206; RV32IFD-NEXT: or a7, a2, a4 1207; RV32IFD-NEXT: bnez a7, .LBB18_3 1208; RV32IFD-NEXT: j .LBB18_4 1209; RV32IFD-NEXT: .LBB18_2: 1210; RV32IFD-NEXT: sltiu a6, a3, -1 1211; RV32IFD-NEXT: or a7, a2, a4 1212; RV32IFD-NEXT: beqz a7, .LBB18_4 1213; RV32IFD-NEXT: .LBB18_3: # %entry 1214; RV32IFD-NEXT: slti a6, a4, 0 1215; RV32IFD-NEXT: .LBB18_4: # %entry 1216; RV32IFD-NEXT: addi a7, a6, -1 1217; RV32IFD-NEXT: neg t0, a6 1218; RV32IFD-NEXT: bnez a6, .LBB18_6 1219; RV32IFD-NEXT: # %bb.5: # %entry 1220; RV32IFD-NEXT: mv a1, a5 1221; RV32IFD-NEXT: .LBB18_6: # %entry 1222; RV32IFD-NEXT: or a3, a7, a3 1223; RV32IFD-NEXT: and a4, t0, a4 1224; RV32IFD-NEXT: and a2, t0, a2 1225; RV32IFD-NEXT: beq a1, a0, .LBB18_8 1226; RV32IFD-NEXT: # %bb.7: # %entry 1227; RV32IFD-NEXT: sltu a0, a0, a1 1228; RV32IFD-NEXT: j .LBB18_9 1229; RV32IFD-NEXT: .LBB18_8: 1230; RV32IFD-NEXT: snez a0, a3 1231; RV32IFD-NEXT: .LBB18_9: # %entry 1232; RV32IFD-NEXT: and a2, a2, a4 1233; RV32IFD-NEXT: li a5, -1 1234; RV32IFD-NEXT: beq a2, a5, .LBB18_11 1235; RV32IFD-NEXT: # %bb.10: # %entry 1236; RV32IFD-NEXT: slti a0, a4, 0 1237; RV32IFD-NEXT: xori a0, a0, 1 1238; RV32IFD-NEXT: .LBB18_11: # %entry 1239; RV32IFD-NEXT: bnez a0, .LBB18_13 1240; RV32IFD-NEXT: # %bb.12: # %entry 1241; RV32IFD-NEXT: lui a1, 524288 1242; RV32IFD-NEXT: .LBB18_13: # %entry 1243; RV32IFD-NEXT: neg a0, a0 1244; RV32IFD-NEXT: and a0, a0, a3 1245; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload 1246; RV32IFD-NEXT: .cfi_restore ra 1247; RV32IFD-NEXT: addi sp, sp, 32 1248; RV32IFD-NEXT: .cfi_def_cfa_offset 0 1249; RV32IFD-NEXT: ret 1250; 1251; RV64IFD-LABEL: stest_f64i64: 1252; RV64IFD: # %bb.0: # %entry 1253; RV64IFD-NEXT: fcvt.l.d a0, fa0, rtz 1254; RV64IFD-NEXT: feq.d a1, fa0, fa0 1255; RV64IFD-NEXT: seqz a1, a1 1256; RV64IFD-NEXT: addi a1, a1, -1 1257; RV64IFD-NEXT: and a0, a1, a0 1258; RV64IFD-NEXT: ret 1259entry: 1260 %conv = fptosi double %x to i128 1261 %0 = icmp slt i128 %conv, 9223372036854775807 1262 %spec.store.select = select i1 %0, i128 %conv, i128 9223372036854775807 1263 %1 = icmp sgt i128 %spec.store.select, -9223372036854775808 1264 %spec.store.select7 = select i1 %1, i128 %spec.store.select, i128 -9223372036854775808 1265 %conv6 = trunc i128 %spec.store.select7 to i64 1266 ret i64 %conv6 1267} 1268 1269define i64 @utest_f64i64(double %x) { 1270; RV32IF-LABEL: utest_f64i64: 1271; RV32IF: # %bb.0: # %entry 1272; RV32IF-NEXT: addi sp, sp, -32 1273; RV32IF-NEXT: .cfi_def_cfa_offset 32 1274; RV32IF-NEXT: sw ra, 28(sp) # 4-byte Folded Spill 1275; RV32IF-NEXT: .cfi_offset ra, -4 1276; RV32IF-NEXT: mv a2, a1 1277; RV32IF-NEXT: mv a1, a0 1278; RV32IF-NEXT: addi a0, sp, 8 1279; RV32IF-NEXT: call __fixunsdfti 1280; RV32IF-NEXT: lw a0, 16(sp) 1281; RV32IF-NEXT: lw a1, 20(sp) 1282; RV32IF-NEXT: lw a2, 12(sp) 1283; RV32IF-NEXT: lw a3, 8(sp) 1284; RV32IF-NEXT: or a4, a1, a0 1285; RV32IF-NEXT: xori a0, a0, 1 1286; RV32IF-NEXT: seqz a4, a4 1287; RV32IF-NEXT: or a0, a0, a1 1288; RV32IF-NEXT: seqz a0, a0 1289; RV32IF-NEXT: addi a0, a0, -1 1290; RV32IF-NEXT: and a0, a0, a4 1291; RV32IF-NEXT: neg a1, a0 1292; RV32IF-NEXT: and a0, a1, a3 1293; RV32IF-NEXT: and a1, a1, a2 1294; RV32IF-NEXT: lw ra, 28(sp) # 4-byte Folded Reload 1295; RV32IF-NEXT: .cfi_restore ra 1296; RV32IF-NEXT: addi sp, sp, 32 1297; RV32IF-NEXT: .cfi_def_cfa_offset 0 1298; RV32IF-NEXT: ret 1299; 1300; RV64-LABEL: utest_f64i64: 1301; RV64: # %bb.0: # %entry 1302; RV64-NEXT: addi sp, sp, -16 1303; RV64-NEXT: .cfi_def_cfa_offset 16 1304; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 1305; RV64-NEXT: .cfi_offset ra, -8 1306; RV64-NEXT: call __fixunsdfti 1307; RV64-NEXT: snez a1, a1 1308; RV64-NEXT: addi a1, a1, -1 1309; RV64-NEXT: and a0, a1, a0 1310; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 1311; RV64-NEXT: .cfi_restore ra 1312; RV64-NEXT: addi sp, sp, 16 1313; RV64-NEXT: .cfi_def_cfa_offset 0 1314; RV64-NEXT: ret 1315; 1316; RV32IFD-LABEL: utest_f64i64: 1317; RV32IFD: # %bb.0: # %entry 1318; RV32IFD-NEXT: addi sp, sp, -32 1319; RV32IFD-NEXT: .cfi_def_cfa_offset 32 1320; RV32IFD-NEXT: sw ra, 28(sp) # 4-byte Folded Spill 1321; RV32IFD-NEXT: .cfi_offset ra, -4 1322; RV32IFD-NEXT: addi a0, sp, 8 1323; RV32IFD-NEXT: call __fixunsdfti 1324; RV32IFD-NEXT: lw a0, 16(sp) 1325; RV32IFD-NEXT: lw a1, 20(sp) 1326; RV32IFD-NEXT: lw a2, 12(sp) 1327; RV32IFD-NEXT: lw a3, 8(sp) 1328; RV32IFD-NEXT: or a4, a1, a0 1329; RV32IFD-NEXT: xori a0, a0, 1 1330; RV32IFD-NEXT: seqz a4, a4 1331; RV32IFD-NEXT: or a0, a0, a1 1332; RV32IFD-NEXT: seqz a0, a0 1333; RV32IFD-NEXT: addi a0, a0, -1 1334; RV32IFD-NEXT: and a0, a0, a4 1335; RV32IFD-NEXT: neg a1, a0 1336; RV32IFD-NEXT: and a0, a1, a3 1337; RV32IFD-NEXT: and a1, a1, a2 1338; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload 1339; RV32IFD-NEXT: .cfi_restore ra 1340; RV32IFD-NEXT: addi sp, sp, 32 1341; RV32IFD-NEXT: .cfi_def_cfa_offset 0 1342; RV32IFD-NEXT: ret 1343entry: 1344 %conv = fptoui double %x to i128 1345 %0 = icmp ult i128 %conv, 18446744073709551616 1346 %spec.store.select = select i1 %0, i128 %conv, i128 18446744073709551616 1347 %conv6 = trunc i128 %spec.store.select to i64 1348 ret i64 %conv6 1349} 1350 1351define i64 @ustest_f64i64(double %x) { 1352; RV32IF-LABEL: ustest_f64i64: 1353; RV32IF: # %bb.0: # %entry 1354; RV32IF-NEXT: addi sp, sp, -32 1355; RV32IF-NEXT: .cfi_def_cfa_offset 32 1356; RV32IF-NEXT: sw ra, 28(sp) # 4-byte Folded Spill 1357; RV32IF-NEXT: .cfi_offset ra, -4 1358; RV32IF-NEXT: mv a2, a1 1359; RV32IF-NEXT: mv a1, a0 1360; RV32IF-NEXT: addi a0, sp, 8 1361; RV32IF-NEXT: call __fixdfti 1362; RV32IF-NEXT: lw a1, 20(sp) 1363; RV32IF-NEXT: lw a0, 16(sp) 1364; RV32IF-NEXT: beqz a1, .LBB20_2 1365; RV32IF-NEXT: # %bb.1: # %entry 1366; RV32IF-NEXT: slti a2, a1, 0 1367; RV32IF-NEXT: j .LBB20_3 1368; RV32IF-NEXT: .LBB20_2: 1369; RV32IF-NEXT: seqz a2, a0 1370; RV32IF-NEXT: .LBB20_3: # %entry 1371; RV32IF-NEXT: xori a3, a0, 1 1372; RV32IF-NEXT: or a3, a3, a1 1373; RV32IF-NEXT: seqz a3, a3 1374; RV32IF-NEXT: addi a3, a3, -1 1375; RV32IF-NEXT: and a3, a3, a2 1376; RV32IF-NEXT: neg a2, a3 1377; RV32IF-NEXT: bnez a3, .LBB20_5 1378; RV32IF-NEXT: # %bb.4: # %entry 1379; RV32IF-NEXT: li a0, 1 1380; RV32IF-NEXT: .LBB20_5: # %entry 1381; RV32IF-NEXT: lw a3, 8(sp) 1382; RV32IF-NEXT: lw a4, 12(sp) 1383; RV32IF-NEXT: and a5, a2, a1 1384; RV32IF-NEXT: beqz a5, .LBB20_7 1385; RV32IF-NEXT: # %bb.6: # %entry 1386; RV32IF-NEXT: sgtz a1, a5 1387; RV32IF-NEXT: j .LBB20_8 1388; RV32IF-NEXT: .LBB20_7: 1389; RV32IF-NEXT: snez a1, a0 1390; RV32IF-NEXT: .LBB20_8: # %entry 1391; RV32IF-NEXT: and a4, a2, a4 1392; RV32IF-NEXT: or a0, a0, a5 1393; RV32IF-NEXT: and a2, a2, a3 1394; RV32IF-NEXT: bnez a0, .LBB20_10 1395; RV32IF-NEXT: # %bb.9: 1396; RV32IF-NEXT: or a0, a2, a4 1397; RV32IF-NEXT: snez a1, a0 1398; RV32IF-NEXT: .LBB20_10: # %entry 1399; RV32IF-NEXT: neg a1, a1 1400; RV32IF-NEXT: and a0, a1, a2 1401; RV32IF-NEXT: and a1, a1, a4 1402; RV32IF-NEXT: lw ra, 28(sp) # 4-byte Folded Reload 1403; RV32IF-NEXT: .cfi_restore ra 1404; RV32IF-NEXT: addi sp, sp, 32 1405; RV32IF-NEXT: .cfi_def_cfa_offset 0 1406; RV32IF-NEXT: ret 1407; 1408; RV64-LABEL: ustest_f64i64: 1409; RV64: # %bb.0: # %entry 1410; RV64-NEXT: addi sp, sp, -16 1411; RV64-NEXT: .cfi_def_cfa_offset 16 1412; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 1413; RV64-NEXT: .cfi_offset ra, -8 1414; RV64-NEXT: call __fixdfti 1415; RV64-NEXT: slti a2, a1, 1 1416; RV64-NEXT: blez a1, .LBB20_2 1417; RV64-NEXT: # %bb.1: # %entry 1418; RV64-NEXT: li a1, 1 1419; RV64-NEXT: .LBB20_2: # %entry 1420; RV64-NEXT: neg a2, a2 1421; RV64-NEXT: and a0, a2, a0 1422; RV64-NEXT: beqz a1, .LBB20_4 1423; RV64-NEXT: # %bb.3: # %entry 1424; RV64-NEXT: sgtz a1, a1 1425; RV64-NEXT: j .LBB20_5 1426; RV64-NEXT: .LBB20_4: 1427; RV64-NEXT: snez a1, a0 1428; RV64-NEXT: .LBB20_5: # %entry 1429; RV64-NEXT: neg a1, a1 1430; RV64-NEXT: and a0, a1, a0 1431; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 1432; RV64-NEXT: .cfi_restore ra 1433; RV64-NEXT: addi sp, sp, 16 1434; RV64-NEXT: .cfi_def_cfa_offset 0 1435; RV64-NEXT: ret 1436; 1437; RV32IFD-LABEL: ustest_f64i64: 1438; RV32IFD: # %bb.0: # %entry 1439; RV32IFD-NEXT: addi sp, sp, -32 1440; RV32IFD-NEXT: .cfi_def_cfa_offset 32 1441; RV32IFD-NEXT: sw ra, 28(sp) # 4-byte Folded Spill 1442; RV32IFD-NEXT: .cfi_offset ra, -4 1443; RV32IFD-NEXT: addi a0, sp, 8 1444; RV32IFD-NEXT: call __fixdfti 1445; RV32IFD-NEXT: lw a1, 20(sp) 1446; RV32IFD-NEXT: lw a0, 16(sp) 1447; RV32IFD-NEXT: beqz a1, .LBB20_2 1448; RV32IFD-NEXT: # %bb.1: # %entry 1449; RV32IFD-NEXT: slti a2, a1, 0 1450; RV32IFD-NEXT: j .LBB20_3 1451; RV32IFD-NEXT: .LBB20_2: 1452; RV32IFD-NEXT: seqz a2, a0 1453; RV32IFD-NEXT: .LBB20_3: # %entry 1454; RV32IFD-NEXT: xori a3, a0, 1 1455; RV32IFD-NEXT: or a3, a3, a1 1456; RV32IFD-NEXT: seqz a3, a3 1457; RV32IFD-NEXT: addi a3, a3, -1 1458; RV32IFD-NEXT: and a3, a3, a2 1459; RV32IFD-NEXT: neg a2, a3 1460; RV32IFD-NEXT: bnez a3, .LBB20_5 1461; RV32IFD-NEXT: # %bb.4: # %entry 1462; RV32IFD-NEXT: li a0, 1 1463; RV32IFD-NEXT: .LBB20_5: # %entry 1464; RV32IFD-NEXT: lw a3, 8(sp) 1465; RV32IFD-NEXT: lw a4, 12(sp) 1466; RV32IFD-NEXT: and a5, a2, a1 1467; RV32IFD-NEXT: beqz a5, .LBB20_7 1468; RV32IFD-NEXT: # %bb.6: # %entry 1469; RV32IFD-NEXT: sgtz a1, a5 1470; RV32IFD-NEXT: j .LBB20_8 1471; RV32IFD-NEXT: .LBB20_7: 1472; RV32IFD-NEXT: snez a1, a0 1473; RV32IFD-NEXT: .LBB20_8: # %entry 1474; RV32IFD-NEXT: and a4, a2, a4 1475; RV32IFD-NEXT: or a0, a0, a5 1476; RV32IFD-NEXT: and a2, a2, a3 1477; RV32IFD-NEXT: bnez a0, .LBB20_10 1478; RV32IFD-NEXT: # %bb.9: 1479; RV32IFD-NEXT: or a0, a2, a4 1480; RV32IFD-NEXT: snez a1, a0 1481; RV32IFD-NEXT: .LBB20_10: # %entry 1482; RV32IFD-NEXT: neg a1, a1 1483; RV32IFD-NEXT: and a0, a1, a2 1484; RV32IFD-NEXT: and a1, a1, a4 1485; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload 1486; RV32IFD-NEXT: .cfi_restore ra 1487; RV32IFD-NEXT: addi sp, sp, 32 1488; RV32IFD-NEXT: .cfi_def_cfa_offset 0 1489; RV32IFD-NEXT: ret 1490entry: 1491 %conv = fptosi double %x to i128 1492 %0 = icmp slt i128 %conv, 18446744073709551616 1493 %spec.store.select = select i1 %0, i128 %conv, i128 18446744073709551616 1494 %1 = icmp sgt i128 %spec.store.select, 0 1495 %spec.store.select7 = select i1 %1, i128 %spec.store.select, i128 0 1496 %conv6 = trunc i128 %spec.store.select7 to i64 1497 ret i64 %conv6 1498} 1499 1500define i64 @stest_f32i64(float %x) { 1501; RV32-LABEL: stest_f32i64: 1502; RV32: # %bb.0: # %entry 1503; RV32-NEXT: addi sp, sp, -32 1504; RV32-NEXT: .cfi_def_cfa_offset 32 1505; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill 1506; RV32-NEXT: .cfi_offset ra, -4 1507; RV32-NEXT: addi a0, sp, 8 1508; RV32-NEXT: call __fixsfti 1509; RV32-NEXT: lw a3, 8(sp) 1510; RV32-NEXT: lw a1, 12(sp) 1511; RV32-NEXT: lw a2, 16(sp) 1512; RV32-NEXT: lw a4, 20(sp) 1513; RV32-NEXT: lui a0, 524288 1514; RV32-NEXT: addi a5, a0, -1 1515; RV32-NEXT: beq a1, a5, .LBB21_2 1516; RV32-NEXT: # %bb.1: # %entry 1517; RV32-NEXT: sltu a6, a1, a5 1518; RV32-NEXT: or a7, a2, a4 1519; RV32-NEXT: bnez a7, .LBB21_3 1520; RV32-NEXT: j .LBB21_4 1521; RV32-NEXT: .LBB21_2: 1522; RV32-NEXT: sltiu a6, a3, -1 1523; RV32-NEXT: or a7, a2, a4 1524; RV32-NEXT: beqz a7, .LBB21_4 1525; RV32-NEXT: .LBB21_3: # %entry 1526; RV32-NEXT: slti a6, a4, 0 1527; RV32-NEXT: .LBB21_4: # %entry 1528; RV32-NEXT: addi a7, a6, -1 1529; RV32-NEXT: neg t0, a6 1530; RV32-NEXT: bnez a6, .LBB21_6 1531; RV32-NEXT: # %bb.5: # %entry 1532; RV32-NEXT: mv a1, a5 1533; RV32-NEXT: .LBB21_6: # %entry 1534; RV32-NEXT: or a3, a7, a3 1535; RV32-NEXT: and a4, t0, a4 1536; RV32-NEXT: and a2, t0, a2 1537; RV32-NEXT: beq a1, a0, .LBB21_8 1538; RV32-NEXT: # %bb.7: # %entry 1539; RV32-NEXT: sltu a0, a0, a1 1540; RV32-NEXT: j .LBB21_9 1541; RV32-NEXT: .LBB21_8: 1542; RV32-NEXT: snez a0, a3 1543; RV32-NEXT: .LBB21_9: # %entry 1544; RV32-NEXT: and a2, a2, a4 1545; RV32-NEXT: li a5, -1 1546; RV32-NEXT: beq a2, a5, .LBB21_11 1547; RV32-NEXT: # %bb.10: # %entry 1548; RV32-NEXT: slti a0, a4, 0 1549; RV32-NEXT: xori a0, a0, 1 1550; RV32-NEXT: .LBB21_11: # %entry 1551; RV32-NEXT: bnez a0, .LBB21_13 1552; RV32-NEXT: # %bb.12: # %entry 1553; RV32-NEXT: lui a1, 524288 1554; RV32-NEXT: .LBB21_13: # %entry 1555; RV32-NEXT: neg a0, a0 1556; RV32-NEXT: and a0, a0, a3 1557; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload 1558; RV32-NEXT: .cfi_restore ra 1559; RV32-NEXT: addi sp, sp, 32 1560; RV32-NEXT: .cfi_def_cfa_offset 0 1561; RV32-NEXT: ret 1562; 1563; RV64-LABEL: stest_f32i64: 1564; RV64: # %bb.0: # %entry 1565; RV64-NEXT: fcvt.l.s a0, fa0, rtz 1566; RV64-NEXT: feq.s a1, fa0, fa0 1567; RV64-NEXT: seqz a1, a1 1568; RV64-NEXT: addi a1, a1, -1 1569; RV64-NEXT: and a0, a1, a0 1570; RV64-NEXT: ret 1571entry: 1572 %conv = fptosi float %x to i128 1573 %0 = icmp slt i128 %conv, 9223372036854775807 1574 %spec.store.select = select i1 %0, i128 %conv, i128 9223372036854775807 1575 %1 = icmp sgt i128 %spec.store.select, -9223372036854775808 1576 %spec.store.select7 = select i1 %1, i128 %spec.store.select, i128 -9223372036854775808 1577 %conv6 = trunc i128 %spec.store.select7 to i64 1578 ret i64 %conv6 1579} 1580 1581define i64 @utest_f32i64(float %x) { 1582; RV32-LABEL: utest_f32i64: 1583; RV32: # %bb.0: # %entry 1584; RV32-NEXT: addi sp, sp, -32 1585; RV32-NEXT: .cfi_def_cfa_offset 32 1586; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill 1587; RV32-NEXT: .cfi_offset ra, -4 1588; RV32-NEXT: addi a0, sp, 8 1589; RV32-NEXT: call __fixunssfti 1590; RV32-NEXT: lw a0, 16(sp) 1591; RV32-NEXT: lw a1, 20(sp) 1592; RV32-NEXT: lw a2, 12(sp) 1593; RV32-NEXT: lw a3, 8(sp) 1594; RV32-NEXT: or a4, a1, a0 1595; RV32-NEXT: xori a0, a0, 1 1596; RV32-NEXT: seqz a4, a4 1597; RV32-NEXT: or a0, a0, a1 1598; RV32-NEXT: seqz a0, a0 1599; RV32-NEXT: addi a0, a0, -1 1600; RV32-NEXT: and a0, a0, a4 1601; RV32-NEXT: neg a1, a0 1602; RV32-NEXT: and a0, a1, a3 1603; RV32-NEXT: and a1, a1, a2 1604; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload 1605; RV32-NEXT: .cfi_restore ra 1606; RV32-NEXT: addi sp, sp, 32 1607; RV32-NEXT: .cfi_def_cfa_offset 0 1608; RV32-NEXT: ret 1609; 1610; RV64-LABEL: utest_f32i64: 1611; RV64: # %bb.0: # %entry 1612; RV64-NEXT: addi sp, sp, -16 1613; RV64-NEXT: .cfi_def_cfa_offset 16 1614; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 1615; RV64-NEXT: .cfi_offset ra, -8 1616; RV64-NEXT: call __fixunssfti 1617; RV64-NEXT: snez a1, a1 1618; RV64-NEXT: addi a1, a1, -1 1619; RV64-NEXT: and a0, a1, a0 1620; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 1621; RV64-NEXT: .cfi_restore ra 1622; RV64-NEXT: addi sp, sp, 16 1623; RV64-NEXT: .cfi_def_cfa_offset 0 1624; RV64-NEXT: ret 1625entry: 1626 %conv = fptoui float %x to i128 1627 %0 = icmp ult i128 %conv, 18446744073709551616 1628 %spec.store.select = select i1 %0, i128 %conv, i128 18446744073709551616 1629 %conv6 = trunc i128 %spec.store.select to i64 1630 ret i64 %conv6 1631} 1632 1633define i64 @ustest_f32i64(float %x) { 1634; RV32-LABEL: ustest_f32i64: 1635; RV32: # %bb.0: # %entry 1636; RV32-NEXT: addi sp, sp, -32 1637; RV32-NEXT: .cfi_def_cfa_offset 32 1638; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill 1639; RV32-NEXT: .cfi_offset ra, -4 1640; RV32-NEXT: addi a0, sp, 8 1641; RV32-NEXT: call __fixsfti 1642; RV32-NEXT: lw a1, 20(sp) 1643; RV32-NEXT: lw a0, 16(sp) 1644; RV32-NEXT: beqz a1, .LBB23_2 1645; RV32-NEXT: # %bb.1: # %entry 1646; RV32-NEXT: slti a2, a1, 0 1647; RV32-NEXT: j .LBB23_3 1648; RV32-NEXT: .LBB23_2: 1649; RV32-NEXT: seqz a2, a0 1650; RV32-NEXT: .LBB23_3: # %entry 1651; RV32-NEXT: xori a3, a0, 1 1652; RV32-NEXT: or a3, a3, a1 1653; RV32-NEXT: seqz a3, a3 1654; RV32-NEXT: addi a3, a3, -1 1655; RV32-NEXT: and a3, a3, a2 1656; RV32-NEXT: neg a2, a3 1657; RV32-NEXT: bnez a3, .LBB23_5 1658; RV32-NEXT: # %bb.4: # %entry 1659; RV32-NEXT: li a0, 1 1660; RV32-NEXT: .LBB23_5: # %entry 1661; RV32-NEXT: lw a3, 8(sp) 1662; RV32-NEXT: lw a4, 12(sp) 1663; RV32-NEXT: and a5, a2, a1 1664; RV32-NEXT: beqz a5, .LBB23_7 1665; RV32-NEXT: # %bb.6: # %entry 1666; RV32-NEXT: sgtz a1, a5 1667; RV32-NEXT: j .LBB23_8 1668; RV32-NEXT: .LBB23_7: 1669; RV32-NEXT: snez a1, a0 1670; RV32-NEXT: .LBB23_8: # %entry 1671; RV32-NEXT: and a4, a2, a4 1672; RV32-NEXT: or a0, a0, a5 1673; RV32-NEXT: and a2, a2, a3 1674; RV32-NEXT: bnez a0, .LBB23_10 1675; RV32-NEXT: # %bb.9: 1676; RV32-NEXT: or a0, a2, a4 1677; RV32-NEXT: snez a1, a0 1678; RV32-NEXT: .LBB23_10: # %entry 1679; RV32-NEXT: neg a1, a1 1680; RV32-NEXT: and a0, a1, a2 1681; RV32-NEXT: and a1, a1, a4 1682; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload 1683; RV32-NEXT: .cfi_restore ra 1684; RV32-NEXT: addi sp, sp, 32 1685; RV32-NEXT: .cfi_def_cfa_offset 0 1686; RV32-NEXT: ret 1687; 1688; RV64-LABEL: ustest_f32i64: 1689; RV64: # %bb.0: # %entry 1690; RV64-NEXT: addi sp, sp, -16 1691; RV64-NEXT: .cfi_def_cfa_offset 16 1692; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 1693; RV64-NEXT: .cfi_offset ra, -8 1694; RV64-NEXT: call __fixsfti 1695; RV64-NEXT: slti a2, a1, 1 1696; RV64-NEXT: blez a1, .LBB23_2 1697; RV64-NEXT: # %bb.1: # %entry 1698; RV64-NEXT: li a1, 1 1699; RV64-NEXT: .LBB23_2: # %entry 1700; RV64-NEXT: neg a2, a2 1701; RV64-NEXT: and a0, a2, a0 1702; RV64-NEXT: beqz a1, .LBB23_4 1703; RV64-NEXT: # %bb.3: # %entry 1704; RV64-NEXT: sgtz a1, a1 1705; RV64-NEXT: j .LBB23_5 1706; RV64-NEXT: .LBB23_4: 1707; RV64-NEXT: snez a1, a0 1708; RV64-NEXT: .LBB23_5: # %entry 1709; RV64-NEXT: neg a1, a1 1710; RV64-NEXT: and a0, a1, a0 1711; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 1712; RV64-NEXT: .cfi_restore ra 1713; RV64-NEXT: addi sp, sp, 16 1714; RV64-NEXT: .cfi_def_cfa_offset 0 1715; RV64-NEXT: ret 1716entry: 1717 %conv = fptosi float %x to i128 1718 %0 = icmp slt i128 %conv, 18446744073709551616 1719 %spec.store.select = select i1 %0, i128 %conv, i128 18446744073709551616 1720 %1 = icmp sgt i128 %spec.store.select, 0 1721 %spec.store.select7 = select i1 %1, i128 %spec.store.select, i128 0 1722 %conv6 = trunc i128 %spec.store.select7 to i64 1723 ret i64 %conv6 1724} 1725 1726define i64 @stest_f16i64(half %x) { 1727; RV32-LABEL: stest_f16i64: 1728; RV32: # %bb.0: # %entry 1729; RV32-NEXT: addi sp, sp, -32 1730; RV32-NEXT: .cfi_def_cfa_offset 32 1731; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill 1732; RV32-NEXT: .cfi_offset ra, -4 1733; RV32-NEXT: call __extendhfsf2 1734; RV32-NEXT: addi a0, sp, 8 1735; RV32-NEXT: call __fixsfti 1736; RV32-NEXT: lw a3, 8(sp) 1737; RV32-NEXT: lw a1, 12(sp) 1738; RV32-NEXT: lw a2, 16(sp) 1739; RV32-NEXT: lw a4, 20(sp) 1740; RV32-NEXT: lui a0, 524288 1741; RV32-NEXT: addi a5, a0, -1 1742; RV32-NEXT: beq a1, a5, .LBB24_2 1743; RV32-NEXT: # %bb.1: # %entry 1744; RV32-NEXT: sltu a6, a1, a5 1745; RV32-NEXT: or a7, a2, a4 1746; RV32-NEXT: bnez a7, .LBB24_3 1747; RV32-NEXT: j .LBB24_4 1748; RV32-NEXT: .LBB24_2: 1749; RV32-NEXT: sltiu a6, a3, -1 1750; RV32-NEXT: or a7, a2, a4 1751; RV32-NEXT: beqz a7, .LBB24_4 1752; RV32-NEXT: .LBB24_3: # %entry 1753; RV32-NEXT: slti a6, a4, 0 1754; RV32-NEXT: .LBB24_4: # %entry 1755; RV32-NEXT: addi a7, a6, -1 1756; RV32-NEXT: neg t0, a6 1757; RV32-NEXT: bnez a6, .LBB24_6 1758; RV32-NEXT: # %bb.5: # %entry 1759; RV32-NEXT: mv a1, a5 1760; RV32-NEXT: .LBB24_6: # %entry 1761; RV32-NEXT: or a3, a7, a3 1762; RV32-NEXT: and a4, t0, a4 1763; RV32-NEXT: and a2, t0, a2 1764; RV32-NEXT: beq a1, a0, .LBB24_8 1765; RV32-NEXT: # %bb.7: # %entry 1766; RV32-NEXT: sltu a0, a0, a1 1767; RV32-NEXT: j .LBB24_9 1768; RV32-NEXT: .LBB24_8: 1769; RV32-NEXT: snez a0, a3 1770; RV32-NEXT: .LBB24_9: # %entry 1771; RV32-NEXT: and a2, a2, a4 1772; RV32-NEXT: li a5, -1 1773; RV32-NEXT: beq a2, a5, .LBB24_11 1774; RV32-NEXT: # %bb.10: # %entry 1775; RV32-NEXT: slti a0, a4, 0 1776; RV32-NEXT: xori a0, a0, 1 1777; RV32-NEXT: .LBB24_11: # %entry 1778; RV32-NEXT: bnez a0, .LBB24_13 1779; RV32-NEXT: # %bb.12: # %entry 1780; RV32-NEXT: lui a1, 524288 1781; RV32-NEXT: .LBB24_13: # %entry 1782; RV32-NEXT: neg a0, a0 1783; RV32-NEXT: and a0, a0, a3 1784; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload 1785; RV32-NEXT: .cfi_restore ra 1786; RV32-NEXT: addi sp, sp, 32 1787; RV32-NEXT: .cfi_def_cfa_offset 0 1788; RV32-NEXT: ret 1789; 1790; RV64-LABEL: stest_f16i64: 1791; RV64: # %bb.0: # %entry 1792; RV64-NEXT: addi sp, sp, -16 1793; RV64-NEXT: .cfi_def_cfa_offset 16 1794; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 1795; RV64-NEXT: .cfi_offset ra, -8 1796; RV64-NEXT: call __extendhfsf2 1797; RV64-NEXT: call __fixsfti 1798; RV64-NEXT: li a2, -1 1799; RV64-NEXT: srli a3, a2, 1 1800; RV64-NEXT: beqz a1, .LBB24_2 1801; RV64-NEXT: # %bb.1: # %entry 1802; RV64-NEXT: slti a4, a1, 0 1803; RV64-NEXT: j .LBB24_3 1804; RV64-NEXT: .LBB24_2: 1805; RV64-NEXT: sltu a4, a0, a3 1806; RV64-NEXT: .LBB24_3: # %entry 1807; RV64-NEXT: neg a5, a4 1808; RV64-NEXT: and a5, a5, a1 1809; RV64-NEXT: bnez a4, .LBB24_5 1810; RV64-NEXT: # %bb.4: # %entry 1811; RV64-NEXT: mv a0, a3 1812; RV64-NEXT: .LBB24_5: # %entry 1813; RV64-NEXT: slli a1, a2, 63 1814; RV64-NEXT: beq a5, a2, .LBB24_7 1815; RV64-NEXT: # %bb.6: # %entry 1816; RV64-NEXT: slti a2, a5, 0 1817; RV64-NEXT: xori a2, a2, 1 1818; RV64-NEXT: beqz a2, .LBB24_8 1819; RV64-NEXT: j .LBB24_9 1820; RV64-NEXT: .LBB24_7: 1821; RV64-NEXT: sltu a2, a1, a0 1822; RV64-NEXT: bnez a2, .LBB24_9 1823; RV64-NEXT: .LBB24_8: # %entry 1824; RV64-NEXT: mv a0, a1 1825; RV64-NEXT: .LBB24_9: # %entry 1826; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 1827; RV64-NEXT: .cfi_restore ra 1828; RV64-NEXT: addi sp, sp, 16 1829; RV64-NEXT: .cfi_def_cfa_offset 0 1830; RV64-NEXT: ret 1831entry: 1832 %conv = fptosi half %x to i128 1833 %0 = icmp slt i128 %conv, 9223372036854775807 1834 %spec.store.select = select i1 %0, i128 %conv, i128 9223372036854775807 1835 %1 = icmp sgt i128 %spec.store.select, -9223372036854775808 1836 %spec.store.select7 = select i1 %1, i128 %spec.store.select, i128 -9223372036854775808 1837 %conv6 = trunc i128 %spec.store.select7 to i64 1838 ret i64 %conv6 1839} 1840 1841define i64 @utesth_f16i64(half %x) { 1842; RV32-LABEL: utesth_f16i64: 1843; RV32: # %bb.0: # %entry 1844; RV32-NEXT: addi sp, sp, -32 1845; RV32-NEXT: .cfi_def_cfa_offset 32 1846; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill 1847; RV32-NEXT: .cfi_offset ra, -4 1848; RV32-NEXT: call __extendhfsf2 1849; RV32-NEXT: addi a0, sp, 8 1850; RV32-NEXT: call __fixunssfti 1851; RV32-NEXT: lw a0, 16(sp) 1852; RV32-NEXT: lw a1, 20(sp) 1853; RV32-NEXT: lw a2, 12(sp) 1854; RV32-NEXT: lw a3, 8(sp) 1855; RV32-NEXT: or a4, a1, a0 1856; RV32-NEXT: xori a0, a0, 1 1857; RV32-NEXT: seqz a4, a4 1858; RV32-NEXT: or a0, a0, a1 1859; RV32-NEXT: seqz a0, a0 1860; RV32-NEXT: addi a0, a0, -1 1861; RV32-NEXT: and a0, a0, a4 1862; RV32-NEXT: neg a1, a0 1863; RV32-NEXT: and a0, a1, a3 1864; RV32-NEXT: and a1, a1, a2 1865; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload 1866; RV32-NEXT: .cfi_restore ra 1867; RV32-NEXT: addi sp, sp, 32 1868; RV32-NEXT: .cfi_def_cfa_offset 0 1869; RV32-NEXT: ret 1870; 1871; RV64-LABEL: utesth_f16i64: 1872; RV64: # %bb.0: # %entry 1873; RV64-NEXT: addi sp, sp, -16 1874; RV64-NEXT: .cfi_def_cfa_offset 16 1875; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 1876; RV64-NEXT: .cfi_offset ra, -8 1877; RV64-NEXT: call __extendhfsf2 1878; RV64-NEXT: call __fixunssfti 1879; RV64-NEXT: snez a1, a1 1880; RV64-NEXT: addi a1, a1, -1 1881; RV64-NEXT: and a0, a1, a0 1882; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 1883; RV64-NEXT: .cfi_restore ra 1884; RV64-NEXT: addi sp, sp, 16 1885; RV64-NEXT: .cfi_def_cfa_offset 0 1886; RV64-NEXT: ret 1887entry: 1888 %conv = fptoui half %x to i128 1889 %0 = icmp ult i128 %conv, 18446744073709551616 1890 %spec.store.select = select i1 %0, i128 %conv, i128 18446744073709551616 1891 %conv6 = trunc i128 %spec.store.select to i64 1892 ret i64 %conv6 1893} 1894 1895define i64 @ustest_f16i64(half %x) { 1896; RV32-LABEL: ustest_f16i64: 1897; RV32: # %bb.0: # %entry 1898; RV32-NEXT: addi sp, sp, -32 1899; RV32-NEXT: .cfi_def_cfa_offset 32 1900; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill 1901; RV32-NEXT: .cfi_offset ra, -4 1902; RV32-NEXT: call __extendhfsf2 1903; RV32-NEXT: addi a0, sp, 8 1904; RV32-NEXT: call __fixsfti 1905; RV32-NEXT: lw a1, 20(sp) 1906; RV32-NEXT: lw a0, 16(sp) 1907; RV32-NEXT: beqz a1, .LBB26_2 1908; RV32-NEXT: # %bb.1: # %entry 1909; RV32-NEXT: slti a2, a1, 0 1910; RV32-NEXT: j .LBB26_3 1911; RV32-NEXT: .LBB26_2: 1912; RV32-NEXT: seqz a2, a0 1913; RV32-NEXT: .LBB26_3: # %entry 1914; RV32-NEXT: xori a3, a0, 1 1915; RV32-NEXT: or a3, a3, a1 1916; RV32-NEXT: seqz a3, a3 1917; RV32-NEXT: addi a3, a3, -1 1918; RV32-NEXT: and a3, a3, a2 1919; RV32-NEXT: neg a2, a3 1920; RV32-NEXT: bnez a3, .LBB26_5 1921; RV32-NEXT: # %bb.4: # %entry 1922; RV32-NEXT: li a0, 1 1923; RV32-NEXT: .LBB26_5: # %entry 1924; RV32-NEXT: lw a3, 8(sp) 1925; RV32-NEXT: lw a4, 12(sp) 1926; RV32-NEXT: and a5, a2, a1 1927; RV32-NEXT: beqz a5, .LBB26_7 1928; RV32-NEXT: # %bb.6: # %entry 1929; RV32-NEXT: sgtz a1, a5 1930; RV32-NEXT: j .LBB26_8 1931; RV32-NEXT: .LBB26_7: 1932; RV32-NEXT: snez a1, a0 1933; RV32-NEXT: .LBB26_8: # %entry 1934; RV32-NEXT: and a4, a2, a4 1935; RV32-NEXT: or a0, a0, a5 1936; RV32-NEXT: and a2, a2, a3 1937; RV32-NEXT: bnez a0, .LBB26_10 1938; RV32-NEXT: # %bb.9: 1939; RV32-NEXT: or a0, a2, a4 1940; RV32-NEXT: snez a1, a0 1941; RV32-NEXT: .LBB26_10: # %entry 1942; RV32-NEXT: neg a1, a1 1943; RV32-NEXT: and a0, a1, a2 1944; RV32-NEXT: and a1, a1, a4 1945; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload 1946; RV32-NEXT: .cfi_restore ra 1947; RV32-NEXT: addi sp, sp, 32 1948; RV32-NEXT: .cfi_def_cfa_offset 0 1949; RV32-NEXT: ret 1950; 1951; RV64-LABEL: ustest_f16i64: 1952; RV64: # %bb.0: # %entry 1953; RV64-NEXT: addi sp, sp, -16 1954; RV64-NEXT: .cfi_def_cfa_offset 16 1955; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 1956; RV64-NEXT: .cfi_offset ra, -8 1957; RV64-NEXT: call __extendhfsf2 1958; RV64-NEXT: call __fixsfti 1959; RV64-NEXT: slti a2, a1, 1 1960; RV64-NEXT: blez a1, .LBB26_2 1961; RV64-NEXT: # %bb.1: # %entry 1962; RV64-NEXT: li a1, 1 1963; RV64-NEXT: .LBB26_2: # %entry 1964; RV64-NEXT: neg a2, a2 1965; RV64-NEXT: and a0, a2, a0 1966; RV64-NEXT: beqz a1, .LBB26_4 1967; RV64-NEXT: # %bb.3: # %entry 1968; RV64-NEXT: sgtz a1, a1 1969; RV64-NEXT: j .LBB26_5 1970; RV64-NEXT: .LBB26_4: 1971; RV64-NEXT: snez a1, a0 1972; RV64-NEXT: .LBB26_5: # %entry 1973; RV64-NEXT: neg a1, a1 1974; RV64-NEXT: and a0, a1, a0 1975; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 1976; RV64-NEXT: .cfi_restore ra 1977; RV64-NEXT: addi sp, sp, 16 1978; RV64-NEXT: .cfi_def_cfa_offset 0 1979; RV64-NEXT: ret 1980entry: 1981 %conv = fptosi half %x to i128 1982 %0 = icmp slt i128 %conv, 18446744073709551616 1983 %spec.store.select = select i1 %0, i128 %conv, i128 18446744073709551616 1984 %1 = icmp sgt i128 %spec.store.select, 0 1985 %spec.store.select7 = select i1 %1, i128 %spec.store.select, i128 0 1986 %conv6 = trunc i128 %spec.store.select7 to i64 1987 ret i64 %conv6 1988} 1989 1990 1991 1992 1993; i32 saturate 1994 1995define i32 @stest_f64i32_mm(double %x) { 1996; RV32IF-LABEL: stest_f64i32_mm: 1997; RV32IF: # %bb.0: # %entry 1998; RV32IF-NEXT: addi sp, sp, -16 1999; RV32IF-NEXT: .cfi_def_cfa_offset 16 2000; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 2001; RV32IF-NEXT: .cfi_offset ra, -4 2002; RV32IF-NEXT: call __fixdfdi 2003; RV32IF-NEXT: lui a2, 524288 2004; RV32IF-NEXT: addi a3, a2, -1 2005; RV32IF-NEXT: beqz a1, .LBB27_2 2006; RV32IF-NEXT: # %bb.1: # %entry 2007; RV32IF-NEXT: slti a4, a1, 0 2008; RV32IF-NEXT: j .LBB27_3 2009; RV32IF-NEXT: .LBB27_2: 2010; RV32IF-NEXT: sltu a4, a0, a3 2011; RV32IF-NEXT: .LBB27_3: # %entry 2012; RV32IF-NEXT: neg a5, a4 2013; RV32IF-NEXT: and a1, a5, a1 2014; RV32IF-NEXT: bnez a4, .LBB27_5 2015; RV32IF-NEXT: # %bb.4: # %entry 2016; RV32IF-NEXT: mv a0, a3 2017; RV32IF-NEXT: .LBB27_5: # %entry 2018; RV32IF-NEXT: li a3, -1 2019; RV32IF-NEXT: beq a1, a3, .LBB27_7 2020; RV32IF-NEXT: # %bb.6: # %entry 2021; RV32IF-NEXT: slti a1, a1, 0 2022; RV32IF-NEXT: xori a1, a1, 1 2023; RV32IF-NEXT: beqz a1, .LBB27_8 2024; RV32IF-NEXT: j .LBB27_9 2025; RV32IF-NEXT: .LBB27_7: 2026; RV32IF-NEXT: sltu a1, a2, a0 2027; RV32IF-NEXT: bnez a1, .LBB27_9 2028; RV32IF-NEXT: .LBB27_8: # %entry 2029; RV32IF-NEXT: lui a0, 524288 2030; RV32IF-NEXT: .LBB27_9: # %entry 2031; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 2032; RV32IF-NEXT: .cfi_restore ra 2033; RV32IF-NEXT: addi sp, sp, 16 2034; RV32IF-NEXT: .cfi_def_cfa_offset 0 2035; RV32IF-NEXT: ret 2036; 2037; RV64IF-LABEL: stest_f64i32_mm: 2038; RV64IF: # %bb.0: # %entry 2039; RV64IF-NEXT: addi sp, sp, -16 2040; RV64IF-NEXT: .cfi_def_cfa_offset 16 2041; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 2042; RV64IF-NEXT: .cfi_offset ra, -8 2043; RV64IF-NEXT: call __fixdfdi 2044; RV64IF-NEXT: lui a1, 524288 2045; RV64IF-NEXT: addiw a2, a1, -1 2046; RV64IF-NEXT: blt a0, a2, .LBB27_2 2047; RV64IF-NEXT: # %bb.1: # %entry 2048; RV64IF-NEXT: mv a0, a2 2049; RV64IF-NEXT: .LBB27_2: # %entry 2050; RV64IF-NEXT: blt a1, a0, .LBB27_4 2051; RV64IF-NEXT: # %bb.3: # %entry 2052; RV64IF-NEXT: lui a0, 524288 2053; RV64IF-NEXT: .LBB27_4: # %entry 2054; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 2055; RV64IF-NEXT: .cfi_restore ra 2056; RV64IF-NEXT: addi sp, sp, 16 2057; RV64IF-NEXT: .cfi_def_cfa_offset 0 2058; RV64IF-NEXT: ret 2059; 2060; RV32IFD-LABEL: stest_f64i32_mm: 2061; RV32IFD: # %bb.0: # %entry 2062; RV32IFD-NEXT: fcvt.w.d a0, fa0, rtz 2063; RV32IFD-NEXT: feq.d a1, fa0, fa0 2064; RV32IFD-NEXT: seqz a1, a1 2065; RV32IFD-NEXT: addi a1, a1, -1 2066; RV32IFD-NEXT: and a0, a1, a0 2067; RV32IFD-NEXT: ret 2068; 2069; RV64IFD-LABEL: stest_f64i32_mm: 2070; RV64IFD: # %bb.0: # %entry 2071; RV64IFD-NEXT: fcvt.l.d a0, fa0, rtz 2072; RV64IFD-NEXT: lui a1, 524288 2073; RV64IFD-NEXT: addiw a2, a1, -1 2074; RV64IFD-NEXT: bge a0, a2, .LBB27_3 2075; RV64IFD-NEXT: # %bb.1: # %entry 2076; RV64IFD-NEXT: bge a1, a0, .LBB27_4 2077; RV64IFD-NEXT: .LBB27_2: # %entry 2078; RV64IFD-NEXT: ret 2079; RV64IFD-NEXT: .LBB27_3: # %entry 2080; RV64IFD-NEXT: mv a0, a2 2081; RV64IFD-NEXT: blt a1, a2, .LBB27_2 2082; RV64IFD-NEXT: .LBB27_4: # %entry 2083; RV64IFD-NEXT: lui a0, 524288 2084; RV64IFD-NEXT: ret 2085entry: 2086 %conv = fptosi double %x to i64 2087 %spec.store.select = call i64 @llvm.smin.i64(i64 %conv, i64 2147483647) 2088 %spec.store.select7 = call i64 @llvm.smax.i64(i64 %spec.store.select, i64 -2147483648) 2089 %conv6 = trunc i64 %spec.store.select7 to i32 2090 ret i32 %conv6 2091} 2092 2093define i32 @utest_f64i32_mm(double %x) { 2094; RV32IF-LABEL: utest_f64i32_mm: 2095; RV32IF: # %bb.0: # %entry 2096; RV32IF-NEXT: addi sp, sp, -16 2097; RV32IF-NEXT: .cfi_def_cfa_offset 16 2098; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 2099; RV32IF-NEXT: .cfi_offset ra, -4 2100; RV32IF-NEXT: call __fixunsdfdi 2101; RV32IF-NEXT: seqz a1, a1 2102; RV32IF-NEXT: addi a1, a1, -1 2103; RV32IF-NEXT: or a0, a1, a0 2104; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 2105; RV32IF-NEXT: .cfi_restore ra 2106; RV32IF-NEXT: addi sp, sp, 16 2107; RV32IF-NEXT: .cfi_def_cfa_offset 0 2108; RV32IF-NEXT: ret 2109; 2110; RV64IF-LABEL: utest_f64i32_mm: 2111; RV64IF: # %bb.0: # %entry 2112; RV64IF-NEXT: addi sp, sp, -16 2113; RV64IF-NEXT: .cfi_def_cfa_offset 16 2114; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 2115; RV64IF-NEXT: .cfi_offset ra, -8 2116; RV64IF-NEXT: call __fixunsdfdi 2117; RV64IF-NEXT: li a1, -1 2118; RV64IF-NEXT: srli a1, a1, 32 2119; RV64IF-NEXT: bltu a0, a1, .LBB28_2 2120; RV64IF-NEXT: # %bb.1: # %entry 2121; RV64IF-NEXT: mv a0, a1 2122; RV64IF-NEXT: .LBB28_2: # %entry 2123; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 2124; RV64IF-NEXT: .cfi_restore ra 2125; RV64IF-NEXT: addi sp, sp, 16 2126; RV64IF-NEXT: .cfi_def_cfa_offset 0 2127; RV64IF-NEXT: ret 2128; 2129; RV32IFD-LABEL: utest_f64i32_mm: 2130; RV32IFD: # %bb.0: # %entry 2131; RV32IFD-NEXT: fcvt.wu.d a0, fa0, rtz 2132; RV32IFD-NEXT: feq.d a1, fa0, fa0 2133; RV32IFD-NEXT: seqz a1, a1 2134; RV32IFD-NEXT: addi a1, a1, -1 2135; RV32IFD-NEXT: and a0, a1, a0 2136; RV32IFD-NEXT: ret 2137; 2138; RV64IFD-LABEL: utest_f64i32_mm: 2139; RV64IFD: # %bb.0: # %entry 2140; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rtz 2141; RV64IFD-NEXT: li a1, -1 2142; RV64IFD-NEXT: srli a1, a1, 32 2143; RV64IFD-NEXT: bltu a0, a1, .LBB28_2 2144; RV64IFD-NEXT: # %bb.1: # %entry 2145; RV64IFD-NEXT: mv a0, a1 2146; RV64IFD-NEXT: .LBB28_2: # %entry 2147; RV64IFD-NEXT: ret 2148entry: 2149 %conv = fptoui double %x to i64 2150 %spec.store.select = call i64 @llvm.umin.i64(i64 %conv, i64 4294967295) 2151 %conv6 = trunc i64 %spec.store.select to i32 2152 ret i32 %conv6 2153} 2154 2155define i32 @ustest_f64i32_mm(double %x) { 2156; RV32IF-LABEL: ustest_f64i32_mm: 2157; RV32IF: # %bb.0: # %entry 2158; RV32IF-NEXT: addi sp, sp, -16 2159; RV32IF-NEXT: .cfi_def_cfa_offset 16 2160; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 2161; RV32IF-NEXT: .cfi_offset ra, -4 2162; RV32IF-NEXT: call __fixdfdi 2163; RV32IF-NEXT: bnez a1, .LBB29_2 2164; RV32IF-NEXT: # %bb.1: # %entry 2165; RV32IF-NEXT: li a2, 1 2166; RV32IF-NEXT: j .LBB29_3 2167; RV32IF-NEXT: .LBB29_2: 2168; RV32IF-NEXT: slti a2, a1, 1 2169; RV32IF-NEXT: .LBB29_3: # %entry 2170; RV32IF-NEXT: addi a3, a2, -1 2171; RV32IF-NEXT: neg a2, a2 2172; RV32IF-NEXT: or a0, a3, a0 2173; RV32IF-NEXT: and a1, a2, a1 2174; RV32IF-NEXT: slti a1, a1, 0 2175; RV32IF-NEXT: addi a1, a1, -1 2176; RV32IF-NEXT: and a0, a1, a0 2177; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 2178; RV32IF-NEXT: .cfi_restore ra 2179; RV32IF-NEXT: addi sp, sp, 16 2180; RV32IF-NEXT: .cfi_def_cfa_offset 0 2181; RV32IF-NEXT: ret 2182; 2183; RV64IF-LABEL: ustest_f64i32_mm: 2184; RV64IF: # %bb.0: # %entry 2185; RV64IF-NEXT: addi sp, sp, -16 2186; RV64IF-NEXT: .cfi_def_cfa_offset 16 2187; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 2188; RV64IF-NEXT: .cfi_offset ra, -8 2189; RV64IF-NEXT: call __fixdfdi 2190; RV64IF-NEXT: li a1, -1 2191; RV64IF-NEXT: srli a1, a1, 32 2192; RV64IF-NEXT: blt a0, a1, .LBB29_2 2193; RV64IF-NEXT: # %bb.1: # %entry 2194; RV64IF-NEXT: mv a0, a1 2195; RV64IF-NEXT: .LBB29_2: # %entry 2196; RV64IF-NEXT: sgtz a1, a0 2197; RV64IF-NEXT: neg a1, a1 2198; RV64IF-NEXT: and a0, a1, a0 2199; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 2200; RV64IF-NEXT: .cfi_restore ra 2201; RV64IF-NEXT: addi sp, sp, 16 2202; RV64IF-NEXT: .cfi_def_cfa_offset 0 2203; RV64IF-NEXT: ret 2204; 2205; RV32IFD-LABEL: ustest_f64i32_mm: 2206; RV32IFD: # %bb.0: # %entry 2207; RV32IFD-NEXT: fcvt.wu.d a0, fa0, rtz 2208; RV32IFD-NEXT: feq.d a1, fa0, fa0 2209; RV32IFD-NEXT: seqz a1, a1 2210; RV32IFD-NEXT: addi a1, a1, -1 2211; RV32IFD-NEXT: and a0, a1, a0 2212; RV32IFD-NEXT: ret 2213; 2214; RV64IFD-LABEL: ustest_f64i32_mm: 2215; RV64IFD: # %bb.0: # %entry 2216; RV64IFD-NEXT: fcvt.l.d a0, fa0, rtz 2217; RV64IFD-NEXT: li a1, -1 2218; RV64IFD-NEXT: srli a1, a1, 32 2219; RV64IFD-NEXT: blt a0, a1, .LBB29_2 2220; RV64IFD-NEXT: # %bb.1: # %entry 2221; RV64IFD-NEXT: mv a0, a1 2222; RV64IFD-NEXT: .LBB29_2: # %entry 2223; RV64IFD-NEXT: sgtz a1, a0 2224; RV64IFD-NEXT: neg a1, a1 2225; RV64IFD-NEXT: and a0, a1, a0 2226; RV64IFD-NEXT: ret 2227entry: 2228 %conv = fptosi double %x to i64 2229 %spec.store.select = call i64 @llvm.smin.i64(i64 %conv, i64 4294967295) 2230 %spec.store.select7 = call i64 @llvm.smax.i64(i64 %spec.store.select, i64 0) 2231 %conv6 = trunc i64 %spec.store.select7 to i32 2232 ret i32 %conv6 2233} 2234 2235define i32 @stest_f32i32_mm(float %x) { 2236; RV32-LABEL: stest_f32i32_mm: 2237; RV32: # %bb.0: # %entry 2238; RV32-NEXT: fcvt.w.s a0, fa0, rtz 2239; RV32-NEXT: feq.s a1, fa0, fa0 2240; RV32-NEXT: seqz a1, a1 2241; RV32-NEXT: addi a1, a1, -1 2242; RV32-NEXT: and a0, a1, a0 2243; RV32-NEXT: ret 2244; 2245; RV64-LABEL: stest_f32i32_mm: 2246; RV64: # %bb.0: # %entry 2247; RV64-NEXT: fcvt.l.s a0, fa0, rtz 2248; RV64-NEXT: lui a1, 524288 2249; RV64-NEXT: addiw a2, a1, -1 2250; RV64-NEXT: bge a0, a2, .LBB30_3 2251; RV64-NEXT: # %bb.1: # %entry 2252; RV64-NEXT: bge a1, a0, .LBB30_4 2253; RV64-NEXT: .LBB30_2: # %entry 2254; RV64-NEXT: ret 2255; RV64-NEXT: .LBB30_3: # %entry 2256; RV64-NEXT: mv a0, a2 2257; RV64-NEXT: blt a1, a2, .LBB30_2 2258; RV64-NEXT: .LBB30_4: # %entry 2259; RV64-NEXT: lui a0, 524288 2260; RV64-NEXT: ret 2261entry: 2262 %conv = fptosi float %x to i64 2263 %spec.store.select = call i64 @llvm.smin.i64(i64 %conv, i64 2147483647) 2264 %spec.store.select7 = call i64 @llvm.smax.i64(i64 %spec.store.select, i64 -2147483648) 2265 %conv6 = trunc i64 %spec.store.select7 to i32 2266 ret i32 %conv6 2267} 2268 2269define i32 @utest_f32i32_mm(float %x) { 2270; RV32-LABEL: utest_f32i32_mm: 2271; RV32: # %bb.0: # %entry 2272; RV32-NEXT: fcvt.wu.s a0, fa0, rtz 2273; RV32-NEXT: feq.s a1, fa0, fa0 2274; RV32-NEXT: seqz a1, a1 2275; RV32-NEXT: addi a1, a1, -1 2276; RV32-NEXT: and a0, a1, a0 2277; RV32-NEXT: ret 2278; 2279; RV64-LABEL: utest_f32i32_mm: 2280; RV64: # %bb.0: # %entry 2281; RV64-NEXT: fcvt.lu.s a0, fa0, rtz 2282; RV64-NEXT: li a1, -1 2283; RV64-NEXT: srli a1, a1, 32 2284; RV64-NEXT: bltu a0, a1, .LBB31_2 2285; RV64-NEXT: # %bb.1: # %entry 2286; RV64-NEXT: mv a0, a1 2287; RV64-NEXT: .LBB31_2: # %entry 2288; RV64-NEXT: ret 2289entry: 2290 %conv = fptoui float %x to i64 2291 %spec.store.select = call i64 @llvm.umin.i64(i64 %conv, i64 4294967295) 2292 %conv6 = trunc i64 %spec.store.select to i32 2293 ret i32 %conv6 2294} 2295 2296define i32 @ustest_f32i32_mm(float %x) { 2297; RV32-LABEL: ustest_f32i32_mm: 2298; RV32: # %bb.0: # %entry 2299; RV32-NEXT: fcvt.wu.s a0, fa0, rtz 2300; RV32-NEXT: feq.s a1, fa0, fa0 2301; RV32-NEXT: seqz a1, a1 2302; RV32-NEXT: addi a1, a1, -1 2303; RV32-NEXT: and a0, a1, a0 2304; RV32-NEXT: ret 2305; 2306; RV64-LABEL: ustest_f32i32_mm: 2307; RV64: # %bb.0: # %entry 2308; RV64-NEXT: fcvt.l.s a0, fa0, rtz 2309; RV64-NEXT: li a1, -1 2310; RV64-NEXT: srli a1, a1, 32 2311; RV64-NEXT: blt a0, a1, .LBB32_2 2312; RV64-NEXT: # %bb.1: # %entry 2313; RV64-NEXT: mv a0, a1 2314; RV64-NEXT: .LBB32_2: # %entry 2315; RV64-NEXT: sgtz a1, a0 2316; RV64-NEXT: neg a1, a1 2317; RV64-NEXT: and a0, a1, a0 2318; RV64-NEXT: ret 2319entry: 2320 %conv = fptosi float %x to i64 2321 %spec.store.select = call i64 @llvm.smin.i64(i64 %conv, i64 4294967295) 2322 %spec.store.select7 = call i64 @llvm.smax.i64(i64 %spec.store.select, i64 0) 2323 %conv6 = trunc i64 %spec.store.select7 to i32 2324 ret i32 %conv6 2325} 2326 2327define i32 @stest_f16i32_mm(half %x) { 2328; RV32-LABEL: stest_f16i32_mm: 2329; RV32: # %bb.0: # %entry 2330; RV32-NEXT: addi sp, sp, -16 2331; RV32-NEXT: .cfi_def_cfa_offset 16 2332; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 2333; RV32-NEXT: .cfi_offset ra, -4 2334; RV32-NEXT: call __extendhfsf2 2335; RV32-NEXT: call __fixsfdi 2336; RV32-NEXT: lui a2, 524288 2337; RV32-NEXT: addi a3, a2, -1 2338; RV32-NEXT: beqz a1, .LBB33_2 2339; RV32-NEXT: # %bb.1: # %entry 2340; RV32-NEXT: slti a4, a1, 0 2341; RV32-NEXT: j .LBB33_3 2342; RV32-NEXT: .LBB33_2: 2343; RV32-NEXT: sltu a4, a0, a3 2344; RV32-NEXT: .LBB33_3: # %entry 2345; RV32-NEXT: neg a5, a4 2346; RV32-NEXT: and a1, a5, a1 2347; RV32-NEXT: bnez a4, .LBB33_5 2348; RV32-NEXT: # %bb.4: # %entry 2349; RV32-NEXT: mv a0, a3 2350; RV32-NEXT: .LBB33_5: # %entry 2351; RV32-NEXT: li a3, -1 2352; RV32-NEXT: beq a1, a3, .LBB33_7 2353; RV32-NEXT: # %bb.6: # %entry 2354; RV32-NEXT: slti a1, a1, 0 2355; RV32-NEXT: xori a1, a1, 1 2356; RV32-NEXT: beqz a1, .LBB33_8 2357; RV32-NEXT: j .LBB33_9 2358; RV32-NEXT: .LBB33_7: 2359; RV32-NEXT: sltu a1, a2, a0 2360; RV32-NEXT: bnez a1, .LBB33_9 2361; RV32-NEXT: .LBB33_8: # %entry 2362; RV32-NEXT: lui a0, 524288 2363; RV32-NEXT: .LBB33_9: # %entry 2364; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 2365; RV32-NEXT: .cfi_restore ra 2366; RV32-NEXT: addi sp, sp, 16 2367; RV32-NEXT: .cfi_def_cfa_offset 0 2368; RV32-NEXT: ret 2369; 2370; RV64-LABEL: stest_f16i32_mm: 2371; RV64: # %bb.0: # %entry 2372; RV64-NEXT: addi sp, sp, -16 2373; RV64-NEXT: .cfi_def_cfa_offset 16 2374; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 2375; RV64-NEXT: .cfi_offset ra, -8 2376; RV64-NEXT: call __extendhfsf2 2377; RV64-NEXT: fcvt.l.s a0, fa0, rtz 2378; RV64-NEXT: lui a1, 524288 2379; RV64-NEXT: addiw a2, a1, -1 2380; RV64-NEXT: blt a0, a2, .LBB33_2 2381; RV64-NEXT: # %bb.1: # %entry 2382; RV64-NEXT: mv a0, a2 2383; RV64-NEXT: .LBB33_2: # %entry 2384; RV64-NEXT: blt a1, a0, .LBB33_4 2385; RV64-NEXT: # %bb.3: # %entry 2386; RV64-NEXT: lui a0, 524288 2387; RV64-NEXT: .LBB33_4: # %entry 2388; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 2389; RV64-NEXT: .cfi_restore ra 2390; RV64-NEXT: addi sp, sp, 16 2391; RV64-NEXT: .cfi_def_cfa_offset 0 2392; RV64-NEXT: ret 2393entry: 2394 %conv = fptosi half %x to i64 2395 %spec.store.select = call i64 @llvm.smin.i64(i64 %conv, i64 2147483647) 2396 %spec.store.select7 = call i64 @llvm.smax.i64(i64 %spec.store.select, i64 -2147483648) 2397 %conv6 = trunc i64 %spec.store.select7 to i32 2398 ret i32 %conv6 2399} 2400 2401define i32 @utesth_f16i32_mm(half %x) { 2402; RV32-LABEL: utesth_f16i32_mm: 2403; RV32: # %bb.0: # %entry 2404; RV32-NEXT: addi sp, sp, -16 2405; RV32-NEXT: .cfi_def_cfa_offset 16 2406; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 2407; RV32-NEXT: .cfi_offset ra, -4 2408; RV32-NEXT: call __extendhfsf2 2409; RV32-NEXT: call __fixunssfdi 2410; RV32-NEXT: seqz a1, a1 2411; RV32-NEXT: addi a1, a1, -1 2412; RV32-NEXT: or a0, a1, a0 2413; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 2414; RV32-NEXT: .cfi_restore ra 2415; RV32-NEXT: addi sp, sp, 16 2416; RV32-NEXT: .cfi_def_cfa_offset 0 2417; RV32-NEXT: ret 2418; 2419; RV64-LABEL: utesth_f16i32_mm: 2420; RV64: # %bb.0: # %entry 2421; RV64-NEXT: addi sp, sp, -16 2422; RV64-NEXT: .cfi_def_cfa_offset 16 2423; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 2424; RV64-NEXT: .cfi_offset ra, -8 2425; RV64-NEXT: call __extendhfsf2 2426; RV64-NEXT: fcvt.lu.s a0, fa0, rtz 2427; RV64-NEXT: li a1, -1 2428; RV64-NEXT: srli a1, a1, 32 2429; RV64-NEXT: bltu a0, a1, .LBB34_2 2430; RV64-NEXT: # %bb.1: # %entry 2431; RV64-NEXT: mv a0, a1 2432; RV64-NEXT: .LBB34_2: # %entry 2433; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 2434; RV64-NEXT: .cfi_restore ra 2435; RV64-NEXT: addi sp, sp, 16 2436; RV64-NEXT: .cfi_def_cfa_offset 0 2437; RV64-NEXT: ret 2438entry: 2439 %conv = fptoui half %x to i64 2440 %spec.store.select = call i64 @llvm.umin.i64(i64 %conv, i64 4294967295) 2441 %conv6 = trunc i64 %spec.store.select to i32 2442 ret i32 %conv6 2443} 2444 2445define i32 @ustest_f16i32_mm(half %x) { 2446; RV32-LABEL: ustest_f16i32_mm: 2447; RV32: # %bb.0: # %entry 2448; RV32-NEXT: addi sp, sp, -16 2449; RV32-NEXT: .cfi_def_cfa_offset 16 2450; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 2451; RV32-NEXT: .cfi_offset ra, -4 2452; RV32-NEXT: call __extendhfsf2 2453; RV32-NEXT: call __fixsfdi 2454; RV32-NEXT: bnez a1, .LBB35_2 2455; RV32-NEXT: # %bb.1: # %entry 2456; RV32-NEXT: li a2, 1 2457; RV32-NEXT: j .LBB35_3 2458; RV32-NEXT: .LBB35_2: 2459; RV32-NEXT: slti a2, a1, 1 2460; RV32-NEXT: .LBB35_3: # %entry 2461; RV32-NEXT: addi a3, a2, -1 2462; RV32-NEXT: neg a2, a2 2463; RV32-NEXT: or a0, a3, a0 2464; RV32-NEXT: and a1, a2, a1 2465; RV32-NEXT: slti a1, a1, 0 2466; RV32-NEXT: addi a1, a1, -1 2467; RV32-NEXT: and a0, a1, a0 2468; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 2469; RV32-NEXT: .cfi_restore ra 2470; RV32-NEXT: addi sp, sp, 16 2471; RV32-NEXT: .cfi_def_cfa_offset 0 2472; RV32-NEXT: ret 2473; 2474; RV64-LABEL: ustest_f16i32_mm: 2475; RV64: # %bb.0: # %entry 2476; RV64-NEXT: addi sp, sp, -16 2477; RV64-NEXT: .cfi_def_cfa_offset 16 2478; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 2479; RV64-NEXT: .cfi_offset ra, -8 2480; RV64-NEXT: call __extendhfsf2 2481; RV64-NEXT: fcvt.l.s a0, fa0, rtz 2482; RV64-NEXT: li a1, -1 2483; RV64-NEXT: srli a1, a1, 32 2484; RV64-NEXT: blt a0, a1, .LBB35_2 2485; RV64-NEXT: # %bb.1: # %entry 2486; RV64-NEXT: mv a0, a1 2487; RV64-NEXT: .LBB35_2: # %entry 2488; RV64-NEXT: sgtz a1, a0 2489; RV64-NEXT: neg a1, a1 2490; RV64-NEXT: and a0, a1, a0 2491; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 2492; RV64-NEXT: .cfi_restore ra 2493; RV64-NEXT: addi sp, sp, 16 2494; RV64-NEXT: .cfi_def_cfa_offset 0 2495; RV64-NEXT: ret 2496entry: 2497 %conv = fptosi half %x to i64 2498 %spec.store.select = call i64 @llvm.smin.i64(i64 %conv, i64 4294967295) 2499 %spec.store.select7 = call i64 @llvm.smax.i64(i64 %spec.store.select, i64 0) 2500 %conv6 = trunc i64 %spec.store.select7 to i32 2501 ret i32 %conv6 2502} 2503 2504; i16 saturate 2505 2506define i16 @stest_f64i16_mm(double %x) { 2507; RV32IF-LABEL: stest_f64i16_mm: 2508; RV32IF: # %bb.0: # %entry 2509; RV32IF-NEXT: addi sp, sp, -16 2510; RV32IF-NEXT: .cfi_def_cfa_offset 16 2511; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 2512; RV32IF-NEXT: .cfi_offset ra, -4 2513; RV32IF-NEXT: call __fixdfsi 2514; RV32IF-NEXT: lui a1, 8 2515; RV32IF-NEXT: addi a1, a1, -1 2516; RV32IF-NEXT: blt a0, a1, .LBB36_2 2517; RV32IF-NEXT: # %bb.1: # %entry 2518; RV32IF-NEXT: mv a0, a1 2519; RV32IF-NEXT: .LBB36_2: # %entry 2520; RV32IF-NEXT: lui a1, 1048568 2521; RV32IF-NEXT: blt a1, a0, .LBB36_4 2522; RV32IF-NEXT: # %bb.3: # %entry 2523; RV32IF-NEXT: lui a0, 1048568 2524; RV32IF-NEXT: .LBB36_4: # %entry 2525; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 2526; RV32IF-NEXT: .cfi_restore ra 2527; RV32IF-NEXT: addi sp, sp, 16 2528; RV32IF-NEXT: .cfi_def_cfa_offset 0 2529; RV32IF-NEXT: ret 2530; 2531; RV64IF-LABEL: stest_f64i16_mm: 2532; RV64IF: # %bb.0: # %entry 2533; RV64IF-NEXT: addi sp, sp, -16 2534; RV64IF-NEXT: .cfi_def_cfa_offset 16 2535; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 2536; RV64IF-NEXT: .cfi_offset ra, -8 2537; RV64IF-NEXT: call __fixdfsi 2538; RV64IF-NEXT: lui a1, 8 2539; RV64IF-NEXT: addiw a1, a1, -1 2540; RV64IF-NEXT: blt a0, a1, .LBB36_2 2541; RV64IF-NEXT: # %bb.1: # %entry 2542; RV64IF-NEXT: mv a0, a1 2543; RV64IF-NEXT: .LBB36_2: # %entry 2544; RV64IF-NEXT: lui a1, 1048568 2545; RV64IF-NEXT: blt a1, a0, .LBB36_4 2546; RV64IF-NEXT: # %bb.3: # %entry 2547; RV64IF-NEXT: lui a0, 1048568 2548; RV64IF-NEXT: .LBB36_4: # %entry 2549; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 2550; RV64IF-NEXT: .cfi_restore ra 2551; RV64IF-NEXT: addi sp, sp, 16 2552; RV64IF-NEXT: .cfi_def_cfa_offset 0 2553; RV64IF-NEXT: ret 2554; 2555; RV32IFD-LABEL: stest_f64i16_mm: 2556; RV32IFD: # %bb.0: # %entry 2557; RV32IFD-NEXT: fcvt.w.d a0, fa0, rtz 2558; RV32IFD-NEXT: lui a1, 8 2559; RV32IFD-NEXT: addi a1, a1, -1 2560; RV32IFD-NEXT: bge a0, a1, .LBB36_3 2561; RV32IFD-NEXT: # %bb.1: # %entry 2562; RV32IFD-NEXT: lui a1, 1048568 2563; RV32IFD-NEXT: bge a1, a0, .LBB36_4 2564; RV32IFD-NEXT: .LBB36_2: # %entry 2565; RV32IFD-NEXT: ret 2566; RV32IFD-NEXT: .LBB36_3: # %entry 2567; RV32IFD-NEXT: mv a0, a1 2568; RV32IFD-NEXT: lui a1, 1048568 2569; RV32IFD-NEXT: blt a1, a0, .LBB36_2 2570; RV32IFD-NEXT: .LBB36_4: # %entry 2571; RV32IFD-NEXT: lui a0, 1048568 2572; RV32IFD-NEXT: ret 2573; 2574; RV64IFD-LABEL: stest_f64i16_mm: 2575; RV64IFD: # %bb.0: # %entry 2576; RV64IFD-NEXT: fcvt.w.d a0, fa0, rtz 2577; RV64IFD-NEXT: lui a1, 8 2578; RV64IFD-NEXT: addiw a1, a1, -1 2579; RV64IFD-NEXT: bge a0, a1, .LBB36_3 2580; RV64IFD-NEXT: # %bb.1: # %entry 2581; RV64IFD-NEXT: lui a1, 1048568 2582; RV64IFD-NEXT: bge a1, a0, .LBB36_4 2583; RV64IFD-NEXT: .LBB36_2: # %entry 2584; RV64IFD-NEXT: ret 2585; RV64IFD-NEXT: .LBB36_3: # %entry 2586; RV64IFD-NEXT: mv a0, a1 2587; RV64IFD-NEXT: lui a1, 1048568 2588; RV64IFD-NEXT: blt a1, a0, .LBB36_2 2589; RV64IFD-NEXT: .LBB36_4: # %entry 2590; RV64IFD-NEXT: lui a0, 1048568 2591; RV64IFD-NEXT: ret 2592entry: 2593 %conv = fptosi double %x to i32 2594 %spec.store.select = call i32 @llvm.smin.i32(i32 %conv, i32 32767) 2595 %spec.store.select7 = call i32 @llvm.smax.i32(i32 %spec.store.select, i32 -32768) 2596 %conv6 = trunc i32 %spec.store.select7 to i16 2597 ret i16 %conv6 2598} 2599 2600define i16 @utest_f64i16_mm(double %x) { 2601; RV32IF-LABEL: utest_f64i16_mm: 2602; RV32IF: # %bb.0: # %entry 2603; RV32IF-NEXT: addi sp, sp, -16 2604; RV32IF-NEXT: .cfi_def_cfa_offset 16 2605; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 2606; RV32IF-NEXT: .cfi_offset ra, -4 2607; RV32IF-NEXT: call __fixunsdfsi 2608; RV32IF-NEXT: lui a1, 16 2609; RV32IF-NEXT: addi a1, a1, -1 2610; RV32IF-NEXT: bltu a0, a1, .LBB37_2 2611; RV32IF-NEXT: # %bb.1: # %entry 2612; RV32IF-NEXT: mv a0, a1 2613; RV32IF-NEXT: .LBB37_2: # %entry 2614; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 2615; RV32IF-NEXT: .cfi_restore ra 2616; RV32IF-NEXT: addi sp, sp, 16 2617; RV32IF-NEXT: .cfi_def_cfa_offset 0 2618; RV32IF-NEXT: ret 2619; 2620; RV64IF-LABEL: utest_f64i16_mm: 2621; RV64IF: # %bb.0: # %entry 2622; RV64IF-NEXT: addi sp, sp, -16 2623; RV64IF-NEXT: .cfi_def_cfa_offset 16 2624; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 2625; RV64IF-NEXT: .cfi_offset ra, -8 2626; RV64IF-NEXT: call __fixunsdfsi 2627; RV64IF-NEXT: lui a1, 16 2628; RV64IF-NEXT: addiw a1, a1, -1 2629; RV64IF-NEXT: bltu a0, a1, .LBB37_2 2630; RV64IF-NEXT: # %bb.1: # %entry 2631; RV64IF-NEXT: mv a0, a1 2632; RV64IF-NEXT: .LBB37_2: # %entry 2633; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 2634; RV64IF-NEXT: .cfi_restore ra 2635; RV64IF-NEXT: addi sp, sp, 16 2636; RV64IF-NEXT: .cfi_def_cfa_offset 0 2637; RV64IF-NEXT: ret 2638; 2639; RV32IFD-LABEL: utest_f64i16_mm: 2640; RV32IFD: # %bb.0: # %entry 2641; RV32IFD-NEXT: fcvt.wu.d a0, fa0, rtz 2642; RV32IFD-NEXT: lui a1, 16 2643; RV32IFD-NEXT: addi a1, a1, -1 2644; RV32IFD-NEXT: bltu a0, a1, .LBB37_2 2645; RV32IFD-NEXT: # %bb.1: # %entry 2646; RV32IFD-NEXT: mv a0, a1 2647; RV32IFD-NEXT: .LBB37_2: # %entry 2648; RV32IFD-NEXT: ret 2649; 2650; RV64IFD-LABEL: utest_f64i16_mm: 2651; RV64IFD: # %bb.0: # %entry 2652; RV64IFD-NEXT: fcvt.wu.d a0, fa0, rtz 2653; RV64IFD-NEXT: lui a1, 16 2654; RV64IFD-NEXT: addiw a1, a1, -1 2655; RV64IFD-NEXT: bltu a0, a1, .LBB37_2 2656; RV64IFD-NEXT: # %bb.1: # %entry 2657; RV64IFD-NEXT: mv a0, a1 2658; RV64IFD-NEXT: .LBB37_2: # %entry 2659; RV64IFD-NEXT: ret 2660entry: 2661 %conv = fptoui double %x to i32 2662 %spec.store.select = call i32 @llvm.umin.i32(i32 %conv, i32 65535) 2663 %conv6 = trunc i32 %spec.store.select to i16 2664 ret i16 %conv6 2665} 2666 2667define i16 @ustest_f64i16_mm(double %x) { 2668; RV32IF-LABEL: ustest_f64i16_mm: 2669; RV32IF: # %bb.0: # %entry 2670; RV32IF-NEXT: addi sp, sp, -16 2671; RV32IF-NEXT: .cfi_def_cfa_offset 16 2672; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 2673; RV32IF-NEXT: .cfi_offset ra, -4 2674; RV32IF-NEXT: call __fixdfsi 2675; RV32IF-NEXT: lui a1, 16 2676; RV32IF-NEXT: addi a1, a1, -1 2677; RV32IF-NEXT: blt a0, a1, .LBB38_2 2678; RV32IF-NEXT: # %bb.1: # %entry 2679; RV32IF-NEXT: mv a0, a1 2680; RV32IF-NEXT: .LBB38_2: # %entry 2681; RV32IF-NEXT: sgtz a1, a0 2682; RV32IF-NEXT: neg a1, a1 2683; RV32IF-NEXT: and a0, a1, a0 2684; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 2685; RV32IF-NEXT: .cfi_restore ra 2686; RV32IF-NEXT: addi sp, sp, 16 2687; RV32IF-NEXT: .cfi_def_cfa_offset 0 2688; RV32IF-NEXT: ret 2689; 2690; RV64IF-LABEL: ustest_f64i16_mm: 2691; RV64IF: # %bb.0: # %entry 2692; RV64IF-NEXT: addi sp, sp, -16 2693; RV64IF-NEXT: .cfi_def_cfa_offset 16 2694; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 2695; RV64IF-NEXT: .cfi_offset ra, -8 2696; RV64IF-NEXT: call __fixdfsi 2697; RV64IF-NEXT: lui a1, 16 2698; RV64IF-NEXT: addiw a1, a1, -1 2699; RV64IF-NEXT: blt a0, a1, .LBB38_2 2700; RV64IF-NEXT: # %bb.1: # %entry 2701; RV64IF-NEXT: mv a0, a1 2702; RV64IF-NEXT: .LBB38_2: # %entry 2703; RV64IF-NEXT: sgtz a1, a0 2704; RV64IF-NEXT: neg a1, a1 2705; RV64IF-NEXT: and a0, a1, a0 2706; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 2707; RV64IF-NEXT: .cfi_restore ra 2708; RV64IF-NEXT: addi sp, sp, 16 2709; RV64IF-NEXT: .cfi_def_cfa_offset 0 2710; RV64IF-NEXT: ret 2711; 2712; RV32IFD-LABEL: ustest_f64i16_mm: 2713; RV32IFD: # %bb.0: # %entry 2714; RV32IFD-NEXT: fcvt.w.d a0, fa0, rtz 2715; RV32IFD-NEXT: lui a1, 16 2716; RV32IFD-NEXT: addi a1, a1, -1 2717; RV32IFD-NEXT: blt a0, a1, .LBB38_2 2718; RV32IFD-NEXT: # %bb.1: # %entry 2719; RV32IFD-NEXT: mv a0, a1 2720; RV32IFD-NEXT: .LBB38_2: # %entry 2721; RV32IFD-NEXT: sgtz a1, a0 2722; RV32IFD-NEXT: neg a1, a1 2723; RV32IFD-NEXT: and a0, a1, a0 2724; RV32IFD-NEXT: ret 2725; 2726; RV64IFD-LABEL: ustest_f64i16_mm: 2727; RV64IFD: # %bb.0: # %entry 2728; RV64IFD-NEXT: fcvt.w.d a0, fa0, rtz 2729; RV64IFD-NEXT: lui a1, 16 2730; RV64IFD-NEXT: addiw a1, a1, -1 2731; RV64IFD-NEXT: blt a0, a1, .LBB38_2 2732; RV64IFD-NEXT: # %bb.1: # %entry 2733; RV64IFD-NEXT: mv a0, a1 2734; RV64IFD-NEXT: .LBB38_2: # %entry 2735; RV64IFD-NEXT: sgtz a1, a0 2736; RV64IFD-NEXT: neg a1, a1 2737; RV64IFD-NEXT: and a0, a1, a0 2738; RV64IFD-NEXT: ret 2739entry: 2740 %conv = fptosi double %x to i32 2741 %spec.store.select = call i32 @llvm.smin.i32(i32 %conv, i32 65535) 2742 %spec.store.select7 = call i32 @llvm.smax.i32(i32 %spec.store.select, i32 0) 2743 %conv6 = trunc i32 %spec.store.select7 to i16 2744 ret i16 %conv6 2745} 2746 2747define i16 @stest_f32i16_mm(float %x) { 2748; RV32-LABEL: stest_f32i16_mm: 2749; RV32: # %bb.0: # %entry 2750; RV32-NEXT: fcvt.w.s a0, fa0, rtz 2751; RV32-NEXT: lui a1, 8 2752; RV32-NEXT: addi a1, a1, -1 2753; RV32-NEXT: bge a0, a1, .LBB39_3 2754; RV32-NEXT: # %bb.1: # %entry 2755; RV32-NEXT: lui a1, 1048568 2756; RV32-NEXT: bge a1, a0, .LBB39_4 2757; RV32-NEXT: .LBB39_2: # %entry 2758; RV32-NEXT: ret 2759; RV32-NEXT: .LBB39_3: # %entry 2760; RV32-NEXT: mv a0, a1 2761; RV32-NEXT: lui a1, 1048568 2762; RV32-NEXT: blt a1, a0, .LBB39_2 2763; RV32-NEXT: .LBB39_4: # %entry 2764; RV32-NEXT: lui a0, 1048568 2765; RV32-NEXT: ret 2766; 2767; RV64-LABEL: stest_f32i16_mm: 2768; RV64: # %bb.0: # %entry 2769; RV64-NEXT: fcvt.w.s a0, fa0, rtz 2770; RV64-NEXT: lui a1, 8 2771; RV64-NEXT: addiw a1, a1, -1 2772; RV64-NEXT: bge a0, a1, .LBB39_3 2773; RV64-NEXT: # %bb.1: # %entry 2774; RV64-NEXT: lui a1, 1048568 2775; RV64-NEXT: bge a1, a0, .LBB39_4 2776; RV64-NEXT: .LBB39_2: # %entry 2777; RV64-NEXT: ret 2778; RV64-NEXT: .LBB39_3: # %entry 2779; RV64-NEXT: mv a0, a1 2780; RV64-NEXT: lui a1, 1048568 2781; RV64-NEXT: blt a1, a0, .LBB39_2 2782; RV64-NEXT: .LBB39_4: # %entry 2783; RV64-NEXT: lui a0, 1048568 2784; RV64-NEXT: ret 2785entry: 2786 %conv = fptosi float %x to i32 2787 %spec.store.select = call i32 @llvm.smin.i32(i32 %conv, i32 32767) 2788 %spec.store.select7 = call i32 @llvm.smax.i32(i32 %spec.store.select, i32 -32768) 2789 %conv6 = trunc i32 %spec.store.select7 to i16 2790 ret i16 %conv6 2791} 2792 2793define i16 @utest_f32i16_mm(float %x) { 2794; RV32-LABEL: utest_f32i16_mm: 2795; RV32: # %bb.0: # %entry 2796; RV32-NEXT: fcvt.wu.s a0, fa0, rtz 2797; RV32-NEXT: lui a1, 16 2798; RV32-NEXT: addi a1, a1, -1 2799; RV32-NEXT: bltu a0, a1, .LBB40_2 2800; RV32-NEXT: # %bb.1: # %entry 2801; RV32-NEXT: mv a0, a1 2802; RV32-NEXT: .LBB40_2: # %entry 2803; RV32-NEXT: ret 2804; 2805; RV64-LABEL: utest_f32i16_mm: 2806; RV64: # %bb.0: # %entry 2807; RV64-NEXT: fcvt.wu.s a0, fa0, rtz 2808; RV64-NEXT: lui a1, 16 2809; RV64-NEXT: addiw a1, a1, -1 2810; RV64-NEXT: bltu a0, a1, .LBB40_2 2811; RV64-NEXT: # %bb.1: # %entry 2812; RV64-NEXT: mv a0, a1 2813; RV64-NEXT: .LBB40_2: # %entry 2814; RV64-NEXT: ret 2815entry: 2816 %conv = fptoui float %x to i32 2817 %spec.store.select = call i32 @llvm.umin.i32(i32 %conv, i32 65535) 2818 %conv6 = trunc i32 %spec.store.select to i16 2819 ret i16 %conv6 2820} 2821 2822define i16 @ustest_f32i16_mm(float %x) { 2823; RV32-LABEL: ustest_f32i16_mm: 2824; RV32: # %bb.0: # %entry 2825; RV32-NEXT: fcvt.w.s a0, fa0, rtz 2826; RV32-NEXT: lui a1, 16 2827; RV32-NEXT: addi a1, a1, -1 2828; RV32-NEXT: blt a0, a1, .LBB41_2 2829; RV32-NEXT: # %bb.1: # %entry 2830; RV32-NEXT: mv a0, a1 2831; RV32-NEXT: .LBB41_2: # %entry 2832; RV32-NEXT: sgtz a1, a0 2833; RV32-NEXT: neg a1, a1 2834; RV32-NEXT: and a0, a1, a0 2835; RV32-NEXT: ret 2836; 2837; RV64-LABEL: ustest_f32i16_mm: 2838; RV64: # %bb.0: # %entry 2839; RV64-NEXT: fcvt.w.s a0, fa0, rtz 2840; RV64-NEXT: lui a1, 16 2841; RV64-NEXT: addiw a1, a1, -1 2842; RV64-NEXT: blt a0, a1, .LBB41_2 2843; RV64-NEXT: # %bb.1: # %entry 2844; RV64-NEXT: mv a0, a1 2845; RV64-NEXT: .LBB41_2: # %entry 2846; RV64-NEXT: sgtz a1, a0 2847; RV64-NEXT: neg a1, a1 2848; RV64-NEXT: and a0, a1, a0 2849; RV64-NEXT: ret 2850entry: 2851 %conv = fptosi float %x to i32 2852 %spec.store.select = call i32 @llvm.smin.i32(i32 %conv, i32 65535) 2853 %spec.store.select7 = call i32 @llvm.smax.i32(i32 %spec.store.select, i32 0) 2854 %conv6 = trunc i32 %spec.store.select7 to i16 2855 ret i16 %conv6 2856} 2857 2858define i16 @stest_f16i16_mm(half %x) { 2859; RV32-LABEL: stest_f16i16_mm: 2860; RV32: # %bb.0: # %entry 2861; RV32-NEXT: addi sp, sp, -16 2862; RV32-NEXT: .cfi_def_cfa_offset 16 2863; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 2864; RV32-NEXT: .cfi_offset ra, -4 2865; RV32-NEXT: call __extendhfsf2 2866; RV32-NEXT: fcvt.w.s a0, fa0, rtz 2867; RV32-NEXT: lui a1, 8 2868; RV32-NEXT: addi a1, a1, -1 2869; RV32-NEXT: blt a0, a1, .LBB42_2 2870; RV32-NEXT: # %bb.1: # %entry 2871; RV32-NEXT: mv a0, a1 2872; RV32-NEXT: .LBB42_2: # %entry 2873; RV32-NEXT: lui a1, 1048568 2874; RV32-NEXT: blt a1, a0, .LBB42_4 2875; RV32-NEXT: # %bb.3: # %entry 2876; RV32-NEXT: lui a0, 1048568 2877; RV32-NEXT: .LBB42_4: # %entry 2878; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 2879; RV32-NEXT: .cfi_restore ra 2880; RV32-NEXT: addi sp, sp, 16 2881; RV32-NEXT: .cfi_def_cfa_offset 0 2882; RV32-NEXT: ret 2883; 2884; RV64-LABEL: stest_f16i16_mm: 2885; RV64: # %bb.0: # %entry 2886; RV64-NEXT: addi sp, sp, -16 2887; RV64-NEXT: .cfi_def_cfa_offset 16 2888; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 2889; RV64-NEXT: .cfi_offset ra, -8 2890; RV64-NEXT: call __extendhfsf2 2891; RV64-NEXT: fcvt.l.s a0, fa0, rtz 2892; RV64-NEXT: lui a1, 8 2893; RV64-NEXT: addiw a1, a1, -1 2894; RV64-NEXT: blt a0, a1, .LBB42_2 2895; RV64-NEXT: # %bb.1: # %entry 2896; RV64-NEXT: mv a0, a1 2897; RV64-NEXT: .LBB42_2: # %entry 2898; RV64-NEXT: lui a1, 1048568 2899; RV64-NEXT: blt a1, a0, .LBB42_4 2900; RV64-NEXT: # %bb.3: # %entry 2901; RV64-NEXT: lui a0, 1048568 2902; RV64-NEXT: .LBB42_4: # %entry 2903; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 2904; RV64-NEXT: .cfi_restore ra 2905; RV64-NEXT: addi sp, sp, 16 2906; RV64-NEXT: .cfi_def_cfa_offset 0 2907; RV64-NEXT: ret 2908entry: 2909 %conv = fptosi half %x to i32 2910 %spec.store.select = call i32 @llvm.smin.i32(i32 %conv, i32 32767) 2911 %spec.store.select7 = call i32 @llvm.smax.i32(i32 %spec.store.select, i32 -32768) 2912 %conv6 = trunc i32 %spec.store.select7 to i16 2913 ret i16 %conv6 2914} 2915 2916define i16 @utesth_f16i16_mm(half %x) { 2917; RV32-LABEL: utesth_f16i16_mm: 2918; RV32: # %bb.0: # %entry 2919; RV32-NEXT: addi sp, sp, -16 2920; RV32-NEXT: .cfi_def_cfa_offset 16 2921; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 2922; RV32-NEXT: .cfi_offset ra, -4 2923; RV32-NEXT: call __extendhfsf2 2924; RV32-NEXT: fcvt.wu.s a0, fa0, rtz 2925; RV32-NEXT: lui a1, 16 2926; RV32-NEXT: addi a1, a1, -1 2927; RV32-NEXT: bltu a0, a1, .LBB43_2 2928; RV32-NEXT: # %bb.1: # %entry 2929; RV32-NEXT: mv a0, a1 2930; RV32-NEXT: .LBB43_2: # %entry 2931; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 2932; RV32-NEXT: .cfi_restore ra 2933; RV32-NEXT: addi sp, sp, 16 2934; RV32-NEXT: .cfi_def_cfa_offset 0 2935; RV32-NEXT: ret 2936; 2937; RV64-LABEL: utesth_f16i16_mm: 2938; RV64: # %bb.0: # %entry 2939; RV64-NEXT: addi sp, sp, -16 2940; RV64-NEXT: .cfi_def_cfa_offset 16 2941; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 2942; RV64-NEXT: .cfi_offset ra, -8 2943; RV64-NEXT: call __extendhfsf2 2944; RV64-NEXT: fcvt.lu.s a0, fa0, rtz 2945; RV64-NEXT: lui a1, 16 2946; RV64-NEXT: addiw a1, a1, -1 2947; RV64-NEXT: bltu a0, a1, .LBB43_2 2948; RV64-NEXT: # %bb.1: # %entry 2949; RV64-NEXT: mv a0, a1 2950; RV64-NEXT: .LBB43_2: # %entry 2951; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 2952; RV64-NEXT: .cfi_restore ra 2953; RV64-NEXT: addi sp, sp, 16 2954; RV64-NEXT: .cfi_def_cfa_offset 0 2955; RV64-NEXT: ret 2956entry: 2957 %conv = fptoui half %x to i32 2958 %spec.store.select = call i32 @llvm.umin.i32(i32 %conv, i32 65535) 2959 %conv6 = trunc i32 %spec.store.select to i16 2960 ret i16 %conv6 2961} 2962 2963define i16 @ustest_f16i16_mm(half %x) { 2964; RV32-LABEL: ustest_f16i16_mm: 2965; RV32: # %bb.0: # %entry 2966; RV32-NEXT: addi sp, sp, -16 2967; RV32-NEXT: .cfi_def_cfa_offset 16 2968; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill 2969; RV32-NEXT: .cfi_offset ra, -4 2970; RV32-NEXT: call __extendhfsf2 2971; RV32-NEXT: fcvt.w.s a0, fa0, rtz 2972; RV32-NEXT: lui a1, 16 2973; RV32-NEXT: addi a1, a1, -1 2974; RV32-NEXT: blt a0, a1, .LBB44_2 2975; RV32-NEXT: # %bb.1: # %entry 2976; RV32-NEXT: mv a0, a1 2977; RV32-NEXT: .LBB44_2: # %entry 2978; RV32-NEXT: sgtz a1, a0 2979; RV32-NEXT: neg a1, a1 2980; RV32-NEXT: and a0, a1, a0 2981; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload 2982; RV32-NEXT: .cfi_restore ra 2983; RV32-NEXT: addi sp, sp, 16 2984; RV32-NEXT: .cfi_def_cfa_offset 0 2985; RV32-NEXT: ret 2986; 2987; RV64-LABEL: ustest_f16i16_mm: 2988; RV64: # %bb.0: # %entry 2989; RV64-NEXT: addi sp, sp, -16 2990; RV64-NEXT: .cfi_def_cfa_offset 16 2991; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 2992; RV64-NEXT: .cfi_offset ra, -8 2993; RV64-NEXT: call __extendhfsf2 2994; RV64-NEXT: fcvt.l.s a0, fa0, rtz 2995; RV64-NEXT: lui a1, 16 2996; RV64-NEXT: addiw a1, a1, -1 2997; RV64-NEXT: blt a0, a1, .LBB44_2 2998; RV64-NEXT: # %bb.1: # %entry 2999; RV64-NEXT: mv a0, a1 3000; RV64-NEXT: .LBB44_2: # %entry 3001; RV64-NEXT: sgtz a1, a0 3002; RV64-NEXT: neg a1, a1 3003; RV64-NEXT: and a0, a1, a0 3004; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 3005; RV64-NEXT: .cfi_restore ra 3006; RV64-NEXT: addi sp, sp, 16 3007; RV64-NEXT: .cfi_def_cfa_offset 0 3008; RV64-NEXT: ret 3009entry: 3010 %conv = fptosi half %x to i32 3011 %spec.store.select = call i32 @llvm.smin.i32(i32 %conv, i32 65535) 3012 %spec.store.select7 = call i32 @llvm.smax.i32(i32 %spec.store.select, i32 0) 3013 %conv6 = trunc i32 %spec.store.select7 to i16 3014 ret i16 %conv6 3015} 3016 3017; i64 saturate 3018 3019define i64 @stest_f64i64_mm(double %x) { 3020; RV32IF-LABEL: stest_f64i64_mm: 3021; RV32IF: # %bb.0: # %entry 3022; RV32IF-NEXT: addi sp, sp, -32 3023; RV32IF-NEXT: .cfi_def_cfa_offset 32 3024; RV32IF-NEXT: sw ra, 28(sp) # 4-byte Folded Spill 3025; RV32IF-NEXT: .cfi_offset ra, -4 3026; RV32IF-NEXT: mv a2, a1 3027; RV32IF-NEXT: mv a1, a0 3028; RV32IF-NEXT: addi a0, sp, 8 3029; RV32IF-NEXT: call __fixdfti 3030; RV32IF-NEXT: lw a3, 8(sp) 3031; RV32IF-NEXT: lw a1, 12(sp) 3032; RV32IF-NEXT: lw a2, 16(sp) 3033; RV32IF-NEXT: lw a4, 20(sp) 3034; RV32IF-NEXT: lui a0, 524288 3035; RV32IF-NEXT: addi a5, a0, -1 3036; RV32IF-NEXT: beq a1, a5, .LBB45_2 3037; RV32IF-NEXT: # %bb.1: # %entry 3038; RV32IF-NEXT: sltu a6, a1, a5 3039; RV32IF-NEXT: or a7, a2, a4 3040; RV32IF-NEXT: bnez a7, .LBB45_3 3041; RV32IF-NEXT: j .LBB45_4 3042; RV32IF-NEXT: .LBB45_2: 3043; RV32IF-NEXT: sltiu a6, a3, -1 3044; RV32IF-NEXT: or a7, a2, a4 3045; RV32IF-NEXT: beqz a7, .LBB45_4 3046; RV32IF-NEXT: .LBB45_3: # %entry 3047; RV32IF-NEXT: slti a6, a4, 0 3048; RV32IF-NEXT: .LBB45_4: # %entry 3049; RV32IF-NEXT: addi a7, a6, -1 3050; RV32IF-NEXT: neg t0, a6 3051; RV32IF-NEXT: bnez a6, .LBB45_6 3052; RV32IF-NEXT: # %bb.5: # %entry 3053; RV32IF-NEXT: mv a1, a5 3054; RV32IF-NEXT: .LBB45_6: # %entry 3055; RV32IF-NEXT: or a3, a7, a3 3056; RV32IF-NEXT: and a4, t0, a4 3057; RV32IF-NEXT: and a2, t0, a2 3058; RV32IF-NEXT: beq a1, a0, .LBB45_8 3059; RV32IF-NEXT: # %bb.7: # %entry 3060; RV32IF-NEXT: sltu a0, a0, a1 3061; RV32IF-NEXT: j .LBB45_9 3062; RV32IF-NEXT: .LBB45_8: 3063; RV32IF-NEXT: snez a0, a3 3064; RV32IF-NEXT: .LBB45_9: # %entry 3065; RV32IF-NEXT: and a2, a2, a4 3066; RV32IF-NEXT: li a5, -1 3067; RV32IF-NEXT: beq a2, a5, .LBB45_11 3068; RV32IF-NEXT: # %bb.10: # %entry 3069; RV32IF-NEXT: slti a0, a4, 0 3070; RV32IF-NEXT: xori a0, a0, 1 3071; RV32IF-NEXT: .LBB45_11: # %entry 3072; RV32IF-NEXT: bnez a0, .LBB45_13 3073; RV32IF-NEXT: # %bb.12: # %entry 3074; RV32IF-NEXT: lui a1, 524288 3075; RV32IF-NEXT: .LBB45_13: # %entry 3076; RV32IF-NEXT: neg a0, a0 3077; RV32IF-NEXT: and a0, a0, a3 3078; RV32IF-NEXT: lw ra, 28(sp) # 4-byte Folded Reload 3079; RV32IF-NEXT: .cfi_restore ra 3080; RV32IF-NEXT: addi sp, sp, 32 3081; RV32IF-NEXT: .cfi_def_cfa_offset 0 3082; RV32IF-NEXT: ret 3083; 3084; RV64IF-LABEL: stest_f64i64_mm: 3085; RV64IF: # %bb.0: # %entry 3086; RV64IF-NEXT: addi sp, sp, -16 3087; RV64IF-NEXT: .cfi_def_cfa_offset 16 3088; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 3089; RV64IF-NEXT: .cfi_offset ra, -8 3090; RV64IF-NEXT: call __fixdfti 3091; RV64IF-NEXT: li a2, -1 3092; RV64IF-NEXT: srli a3, a2, 1 3093; RV64IF-NEXT: beqz a1, .LBB45_2 3094; RV64IF-NEXT: # %bb.1: # %entry 3095; RV64IF-NEXT: slti a4, a1, 0 3096; RV64IF-NEXT: j .LBB45_3 3097; RV64IF-NEXT: .LBB45_2: 3098; RV64IF-NEXT: sltu a4, a0, a3 3099; RV64IF-NEXT: .LBB45_3: # %entry 3100; RV64IF-NEXT: neg a5, a4 3101; RV64IF-NEXT: and a5, a5, a1 3102; RV64IF-NEXT: bnez a4, .LBB45_5 3103; RV64IF-NEXT: # %bb.4: # %entry 3104; RV64IF-NEXT: mv a0, a3 3105; RV64IF-NEXT: .LBB45_5: # %entry 3106; RV64IF-NEXT: slli a1, a2, 63 3107; RV64IF-NEXT: beq a5, a2, .LBB45_7 3108; RV64IF-NEXT: # %bb.6: # %entry 3109; RV64IF-NEXT: slti a2, a5, 0 3110; RV64IF-NEXT: xori a2, a2, 1 3111; RV64IF-NEXT: beqz a2, .LBB45_8 3112; RV64IF-NEXT: j .LBB45_9 3113; RV64IF-NEXT: .LBB45_7: 3114; RV64IF-NEXT: sltu a2, a1, a0 3115; RV64IF-NEXT: bnez a2, .LBB45_9 3116; RV64IF-NEXT: .LBB45_8: # %entry 3117; RV64IF-NEXT: mv a0, a1 3118; RV64IF-NEXT: .LBB45_9: # %entry 3119; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 3120; RV64IF-NEXT: .cfi_restore ra 3121; RV64IF-NEXT: addi sp, sp, 16 3122; RV64IF-NEXT: .cfi_def_cfa_offset 0 3123; RV64IF-NEXT: ret 3124; 3125; RV32IFD-LABEL: stest_f64i64_mm: 3126; RV32IFD: # %bb.0: # %entry 3127; RV32IFD-NEXT: addi sp, sp, -32 3128; RV32IFD-NEXT: .cfi_def_cfa_offset 32 3129; RV32IFD-NEXT: sw ra, 28(sp) # 4-byte Folded Spill 3130; RV32IFD-NEXT: .cfi_offset ra, -4 3131; RV32IFD-NEXT: addi a0, sp, 8 3132; RV32IFD-NEXT: call __fixdfti 3133; RV32IFD-NEXT: lw a3, 8(sp) 3134; RV32IFD-NEXT: lw a1, 12(sp) 3135; RV32IFD-NEXT: lw a2, 16(sp) 3136; RV32IFD-NEXT: lw a4, 20(sp) 3137; RV32IFD-NEXT: lui a0, 524288 3138; RV32IFD-NEXT: addi a5, a0, -1 3139; RV32IFD-NEXT: beq a1, a5, .LBB45_2 3140; RV32IFD-NEXT: # %bb.1: # %entry 3141; RV32IFD-NEXT: sltu a6, a1, a5 3142; RV32IFD-NEXT: or a7, a2, a4 3143; RV32IFD-NEXT: bnez a7, .LBB45_3 3144; RV32IFD-NEXT: j .LBB45_4 3145; RV32IFD-NEXT: .LBB45_2: 3146; RV32IFD-NEXT: sltiu a6, a3, -1 3147; RV32IFD-NEXT: or a7, a2, a4 3148; RV32IFD-NEXT: beqz a7, .LBB45_4 3149; RV32IFD-NEXT: .LBB45_3: # %entry 3150; RV32IFD-NEXT: slti a6, a4, 0 3151; RV32IFD-NEXT: .LBB45_4: # %entry 3152; RV32IFD-NEXT: addi a7, a6, -1 3153; RV32IFD-NEXT: neg t0, a6 3154; RV32IFD-NEXT: bnez a6, .LBB45_6 3155; RV32IFD-NEXT: # %bb.5: # %entry 3156; RV32IFD-NEXT: mv a1, a5 3157; RV32IFD-NEXT: .LBB45_6: # %entry 3158; RV32IFD-NEXT: or a3, a7, a3 3159; RV32IFD-NEXT: and a4, t0, a4 3160; RV32IFD-NEXT: and a2, t0, a2 3161; RV32IFD-NEXT: beq a1, a0, .LBB45_8 3162; RV32IFD-NEXT: # %bb.7: # %entry 3163; RV32IFD-NEXT: sltu a0, a0, a1 3164; RV32IFD-NEXT: j .LBB45_9 3165; RV32IFD-NEXT: .LBB45_8: 3166; RV32IFD-NEXT: snez a0, a3 3167; RV32IFD-NEXT: .LBB45_9: # %entry 3168; RV32IFD-NEXT: and a2, a2, a4 3169; RV32IFD-NEXT: li a5, -1 3170; RV32IFD-NEXT: beq a2, a5, .LBB45_11 3171; RV32IFD-NEXT: # %bb.10: # %entry 3172; RV32IFD-NEXT: slti a0, a4, 0 3173; RV32IFD-NEXT: xori a0, a0, 1 3174; RV32IFD-NEXT: .LBB45_11: # %entry 3175; RV32IFD-NEXT: bnez a0, .LBB45_13 3176; RV32IFD-NEXT: # %bb.12: # %entry 3177; RV32IFD-NEXT: lui a1, 524288 3178; RV32IFD-NEXT: .LBB45_13: # %entry 3179; RV32IFD-NEXT: neg a0, a0 3180; RV32IFD-NEXT: and a0, a0, a3 3181; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload 3182; RV32IFD-NEXT: .cfi_restore ra 3183; RV32IFD-NEXT: addi sp, sp, 32 3184; RV32IFD-NEXT: .cfi_def_cfa_offset 0 3185; RV32IFD-NEXT: ret 3186; 3187; RV64IFD-LABEL: stest_f64i64_mm: 3188; RV64IFD: # %bb.0: # %entry 3189; RV64IFD-NEXT: fcvt.l.d a0, fa0, rtz 3190; RV64IFD-NEXT: feq.d a1, fa0, fa0 3191; RV64IFD-NEXT: seqz a1, a1 3192; RV64IFD-NEXT: addi a1, a1, -1 3193; RV64IFD-NEXT: and a0, a1, a0 3194; RV64IFD-NEXT: ret 3195entry: 3196 %conv = fptosi double %x to i128 3197 %spec.store.select = call i128 @llvm.smin.i128(i128 %conv, i128 9223372036854775807) 3198 %spec.store.select7 = call i128 @llvm.smax.i128(i128 %spec.store.select, i128 -9223372036854775808) 3199 %conv6 = trunc i128 %spec.store.select7 to i64 3200 ret i64 %conv6 3201} 3202 3203define i64 @utest_f64i64_mm(double %x) { 3204; RV32IF-LABEL: utest_f64i64_mm: 3205; RV32IF: # %bb.0: # %entry 3206; RV32IF-NEXT: addi sp, sp, -32 3207; RV32IF-NEXT: .cfi_def_cfa_offset 32 3208; RV32IF-NEXT: sw ra, 28(sp) # 4-byte Folded Spill 3209; RV32IF-NEXT: .cfi_offset ra, -4 3210; RV32IF-NEXT: mv a2, a1 3211; RV32IF-NEXT: mv a1, a0 3212; RV32IF-NEXT: addi a0, sp, 8 3213; RV32IF-NEXT: call __fixunsdfti 3214; RV32IF-NEXT: lw a0, 16(sp) 3215; RV32IF-NEXT: lw a1, 20(sp) 3216; RV32IF-NEXT: lw a2, 12(sp) 3217; RV32IF-NEXT: lw a3, 8(sp) 3218; RV32IF-NEXT: or a4, a1, a0 3219; RV32IF-NEXT: xori a0, a0, 1 3220; RV32IF-NEXT: seqz a4, a4 3221; RV32IF-NEXT: or a0, a0, a1 3222; RV32IF-NEXT: seqz a0, a0 3223; RV32IF-NEXT: addi a0, a0, -1 3224; RV32IF-NEXT: and a0, a0, a4 3225; RV32IF-NEXT: neg a1, a0 3226; RV32IF-NEXT: and a0, a1, a3 3227; RV32IF-NEXT: and a1, a1, a2 3228; RV32IF-NEXT: lw ra, 28(sp) # 4-byte Folded Reload 3229; RV32IF-NEXT: .cfi_restore ra 3230; RV32IF-NEXT: addi sp, sp, 32 3231; RV32IF-NEXT: .cfi_def_cfa_offset 0 3232; RV32IF-NEXT: ret 3233; 3234; RV64-LABEL: utest_f64i64_mm: 3235; RV64: # %bb.0: # %entry 3236; RV64-NEXT: addi sp, sp, -16 3237; RV64-NEXT: .cfi_def_cfa_offset 16 3238; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 3239; RV64-NEXT: .cfi_offset ra, -8 3240; RV64-NEXT: call __fixunsdfti 3241; RV64-NEXT: snez a1, a1 3242; RV64-NEXT: addi a1, a1, -1 3243; RV64-NEXT: and a0, a1, a0 3244; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 3245; RV64-NEXT: .cfi_restore ra 3246; RV64-NEXT: addi sp, sp, 16 3247; RV64-NEXT: .cfi_def_cfa_offset 0 3248; RV64-NEXT: ret 3249; 3250; RV32IFD-LABEL: utest_f64i64_mm: 3251; RV32IFD: # %bb.0: # %entry 3252; RV32IFD-NEXT: addi sp, sp, -32 3253; RV32IFD-NEXT: .cfi_def_cfa_offset 32 3254; RV32IFD-NEXT: sw ra, 28(sp) # 4-byte Folded Spill 3255; RV32IFD-NEXT: .cfi_offset ra, -4 3256; RV32IFD-NEXT: addi a0, sp, 8 3257; RV32IFD-NEXT: call __fixunsdfti 3258; RV32IFD-NEXT: lw a0, 16(sp) 3259; RV32IFD-NEXT: lw a1, 20(sp) 3260; RV32IFD-NEXT: lw a2, 12(sp) 3261; RV32IFD-NEXT: lw a3, 8(sp) 3262; RV32IFD-NEXT: or a4, a1, a0 3263; RV32IFD-NEXT: xori a0, a0, 1 3264; RV32IFD-NEXT: seqz a4, a4 3265; RV32IFD-NEXT: or a0, a0, a1 3266; RV32IFD-NEXT: seqz a0, a0 3267; RV32IFD-NEXT: addi a0, a0, -1 3268; RV32IFD-NEXT: and a0, a0, a4 3269; RV32IFD-NEXT: neg a1, a0 3270; RV32IFD-NEXT: and a0, a1, a3 3271; RV32IFD-NEXT: and a1, a1, a2 3272; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload 3273; RV32IFD-NEXT: .cfi_restore ra 3274; RV32IFD-NEXT: addi sp, sp, 32 3275; RV32IFD-NEXT: .cfi_def_cfa_offset 0 3276; RV32IFD-NEXT: ret 3277entry: 3278 %conv = fptoui double %x to i128 3279 %spec.store.select = call i128 @llvm.umin.i128(i128 %conv, i128 18446744073709551616) 3280 %conv6 = trunc i128 %spec.store.select to i64 3281 ret i64 %conv6 3282} 3283 3284define i64 @ustest_f64i64_mm(double %x) { 3285; RV32IF-LABEL: ustest_f64i64_mm: 3286; RV32IF: # %bb.0: # %entry 3287; RV32IF-NEXT: addi sp, sp, -32 3288; RV32IF-NEXT: .cfi_def_cfa_offset 32 3289; RV32IF-NEXT: sw ra, 28(sp) # 4-byte Folded Spill 3290; RV32IF-NEXT: .cfi_offset ra, -4 3291; RV32IF-NEXT: mv a2, a1 3292; RV32IF-NEXT: mv a1, a0 3293; RV32IF-NEXT: addi a0, sp, 8 3294; RV32IF-NEXT: call __fixdfti 3295; RV32IF-NEXT: lw a0, 20(sp) 3296; RV32IF-NEXT: lw a1, 8(sp) 3297; RV32IF-NEXT: lw a2, 12(sp) 3298; RV32IF-NEXT: lw a3, 16(sp) 3299; RV32IF-NEXT: beqz a0, .LBB47_2 3300; RV32IF-NEXT: # %bb.1: # %entry 3301; RV32IF-NEXT: slti a4, a0, 0 3302; RV32IF-NEXT: j .LBB47_3 3303; RV32IF-NEXT: .LBB47_2: 3304; RV32IF-NEXT: seqz a4, a3 3305; RV32IF-NEXT: .LBB47_3: # %entry 3306; RV32IF-NEXT: xori a3, a3, 1 3307; RV32IF-NEXT: or a3, a3, a0 3308; RV32IF-NEXT: seqz a3, a3 3309; RV32IF-NEXT: addi a3, a3, -1 3310; RV32IF-NEXT: and a3, a3, a4 3311; RV32IF-NEXT: neg a3, a3 3312; RV32IF-NEXT: and a2, a3, a2 3313; RV32IF-NEXT: and a1, a3, a1 3314; RV32IF-NEXT: and a0, a3, a0 3315; RV32IF-NEXT: slti a0, a0, 0 3316; RV32IF-NEXT: addi a3, a0, -1 3317; RV32IF-NEXT: and a0, a3, a1 3318; RV32IF-NEXT: and a1, a3, a2 3319; RV32IF-NEXT: lw ra, 28(sp) # 4-byte Folded Reload 3320; RV32IF-NEXT: .cfi_restore ra 3321; RV32IF-NEXT: addi sp, sp, 32 3322; RV32IF-NEXT: .cfi_def_cfa_offset 0 3323; RV32IF-NEXT: ret 3324; 3325; RV64-LABEL: ustest_f64i64_mm: 3326; RV64: # %bb.0: # %entry 3327; RV64-NEXT: addi sp, sp, -16 3328; RV64-NEXT: .cfi_def_cfa_offset 16 3329; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 3330; RV64-NEXT: .cfi_offset ra, -8 3331; RV64-NEXT: call __fixdfti 3332; RV64-NEXT: mv a2, a1 3333; RV64-NEXT: blez a1, .LBB47_2 3334; RV64-NEXT: # %bb.1: # %entry 3335; RV64-NEXT: li a2, 1 3336; RV64-NEXT: .LBB47_2: # %entry 3337; RV64-NEXT: slti a1, a1, 1 3338; RV64-NEXT: slti a2, a2, 0 3339; RV64-NEXT: neg a1, a1 3340; RV64-NEXT: and a0, a1, a0 3341; RV64-NEXT: addi a2, a2, -1 3342; RV64-NEXT: and a0, a2, a0 3343; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 3344; RV64-NEXT: .cfi_restore ra 3345; RV64-NEXT: addi sp, sp, 16 3346; RV64-NEXT: .cfi_def_cfa_offset 0 3347; RV64-NEXT: ret 3348; 3349; RV32IFD-LABEL: ustest_f64i64_mm: 3350; RV32IFD: # %bb.0: # %entry 3351; RV32IFD-NEXT: addi sp, sp, -32 3352; RV32IFD-NEXT: .cfi_def_cfa_offset 32 3353; RV32IFD-NEXT: sw ra, 28(sp) # 4-byte Folded Spill 3354; RV32IFD-NEXT: .cfi_offset ra, -4 3355; RV32IFD-NEXT: addi a0, sp, 8 3356; RV32IFD-NEXT: call __fixdfti 3357; RV32IFD-NEXT: lw a0, 20(sp) 3358; RV32IFD-NEXT: lw a1, 8(sp) 3359; RV32IFD-NEXT: lw a2, 12(sp) 3360; RV32IFD-NEXT: lw a3, 16(sp) 3361; RV32IFD-NEXT: beqz a0, .LBB47_2 3362; RV32IFD-NEXT: # %bb.1: # %entry 3363; RV32IFD-NEXT: slti a4, a0, 0 3364; RV32IFD-NEXT: j .LBB47_3 3365; RV32IFD-NEXT: .LBB47_2: 3366; RV32IFD-NEXT: seqz a4, a3 3367; RV32IFD-NEXT: .LBB47_3: # %entry 3368; RV32IFD-NEXT: xori a3, a3, 1 3369; RV32IFD-NEXT: or a3, a3, a0 3370; RV32IFD-NEXT: seqz a3, a3 3371; RV32IFD-NEXT: addi a3, a3, -1 3372; RV32IFD-NEXT: and a3, a3, a4 3373; RV32IFD-NEXT: neg a3, a3 3374; RV32IFD-NEXT: and a2, a3, a2 3375; RV32IFD-NEXT: and a1, a3, a1 3376; RV32IFD-NEXT: and a0, a3, a0 3377; RV32IFD-NEXT: slti a0, a0, 0 3378; RV32IFD-NEXT: addi a3, a0, -1 3379; RV32IFD-NEXT: and a0, a3, a1 3380; RV32IFD-NEXT: and a1, a3, a2 3381; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload 3382; RV32IFD-NEXT: .cfi_restore ra 3383; RV32IFD-NEXT: addi sp, sp, 32 3384; RV32IFD-NEXT: .cfi_def_cfa_offset 0 3385; RV32IFD-NEXT: ret 3386entry: 3387 %conv = fptosi double %x to i128 3388 %spec.store.select = call i128 @llvm.smin.i128(i128 %conv, i128 18446744073709551616) 3389 %spec.store.select7 = call i128 @llvm.smax.i128(i128 %spec.store.select, i128 0) 3390 %conv6 = trunc i128 %spec.store.select7 to i64 3391 ret i64 %conv6 3392} 3393 3394define i64 @stest_f32i64_mm(float %x) { 3395; RV32-LABEL: stest_f32i64_mm: 3396; RV32: # %bb.0: # %entry 3397; RV32-NEXT: addi sp, sp, -32 3398; RV32-NEXT: .cfi_def_cfa_offset 32 3399; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill 3400; RV32-NEXT: .cfi_offset ra, -4 3401; RV32-NEXT: addi a0, sp, 8 3402; RV32-NEXT: call __fixsfti 3403; RV32-NEXT: lw a3, 8(sp) 3404; RV32-NEXT: lw a1, 12(sp) 3405; RV32-NEXT: lw a2, 16(sp) 3406; RV32-NEXT: lw a4, 20(sp) 3407; RV32-NEXT: lui a0, 524288 3408; RV32-NEXT: addi a5, a0, -1 3409; RV32-NEXT: beq a1, a5, .LBB48_2 3410; RV32-NEXT: # %bb.1: # %entry 3411; RV32-NEXT: sltu a6, a1, a5 3412; RV32-NEXT: or a7, a2, a4 3413; RV32-NEXT: bnez a7, .LBB48_3 3414; RV32-NEXT: j .LBB48_4 3415; RV32-NEXT: .LBB48_2: 3416; RV32-NEXT: sltiu a6, a3, -1 3417; RV32-NEXT: or a7, a2, a4 3418; RV32-NEXT: beqz a7, .LBB48_4 3419; RV32-NEXT: .LBB48_3: # %entry 3420; RV32-NEXT: slti a6, a4, 0 3421; RV32-NEXT: .LBB48_4: # %entry 3422; RV32-NEXT: addi a7, a6, -1 3423; RV32-NEXT: neg t0, a6 3424; RV32-NEXT: bnez a6, .LBB48_6 3425; RV32-NEXT: # %bb.5: # %entry 3426; RV32-NEXT: mv a1, a5 3427; RV32-NEXT: .LBB48_6: # %entry 3428; RV32-NEXT: or a3, a7, a3 3429; RV32-NEXT: and a4, t0, a4 3430; RV32-NEXT: and a2, t0, a2 3431; RV32-NEXT: beq a1, a0, .LBB48_8 3432; RV32-NEXT: # %bb.7: # %entry 3433; RV32-NEXT: sltu a0, a0, a1 3434; RV32-NEXT: j .LBB48_9 3435; RV32-NEXT: .LBB48_8: 3436; RV32-NEXT: snez a0, a3 3437; RV32-NEXT: .LBB48_9: # %entry 3438; RV32-NEXT: and a2, a2, a4 3439; RV32-NEXT: li a5, -1 3440; RV32-NEXT: beq a2, a5, .LBB48_11 3441; RV32-NEXT: # %bb.10: # %entry 3442; RV32-NEXT: slti a0, a4, 0 3443; RV32-NEXT: xori a0, a0, 1 3444; RV32-NEXT: .LBB48_11: # %entry 3445; RV32-NEXT: bnez a0, .LBB48_13 3446; RV32-NEXT: # %bb.12: # %entry 3447; RV32-NEXT: lui a1, 524288 3448; RV32-NEXT: .LBB48_13: # %entry 3449; RV32-NEXT: neg a0, a0 3450; RV32-NEXT: and a0, a0, a3 3451; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload 3452; RV32-NEXT: .cfi_restore ra 3453; RV32-NEXT: addi sp, sp, 32 3454; RV32-NEXT: .cfi_def_cfa_offset 0 3455; RV32-NEXT: ret 3456; 3457; RV64-LABEL: stest_f32i64_mm: 3458; RV64: # %bb.0: # %entry 3459; RV64-NEXT: fcvt.l.s a0, fa0, rtz 3460; RV64-NEXT: feq.s a1, fa0, fa0 3461; RV64-NEXT: seqz a1, a1 3462; RV64-NEXT: addi a1, a1, -1 3463; RV64-NEXT: and a0, a1, a0 3464; RV64-NEXT: ret 3465entry: 3466 %conv = fptosi float %x to i128 3467 %spec.store.select = call i128 @llvm.smin.i128(i128 %conv, i128 9223372036854775807) 3468 %spec.store.select7 = call i128 @llvm.smax.i128(i128 %spec.store.select, i128 -9223372036854775808) 3469 %conv6 = trunc i128 %spec.store.select7 to i64 3470 ret i64 %conv6 3471} 3472 3473define i64 @utest_f32i64_mm(float %x) { 3474; RV32-LABEL: utest_f32i64_mm: 3475; RV32: # %bb.0: # %entry 3476; RV32-NEXT: addi sp, sp, -32 3477; RV32-NEXT: .cfi_def_cfa_offset 32 3478; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill 3479; RV32-NEXT: .cfi_offset ra, -4 3480; RV32-NEXT: addi a0, sp, 8 3481; RV32-NEXT: call __fixunssfti 3482; RV32-NEXT: lw a0, 16(sp) 3483; RV32-NEXT: lw a1, 20(sp) 3484; RV32-NEXT: lw a2, 12(sp) 3485; RV32-NEXT: lw a3, 8(sp) 3486; RV32-NEXT: or a4, a1, a0 3487; RV32-NEXT: xori a0, a0, 1 3488; RV32-NEXT: seqz a4, a4 3489; RV32-NEXT: or a0, a0, a1 3490; RV32-NEXT: seqz a0, a0 3491; RV32-NEXT: addi a0, a0, -1 3492; RV32-NEXT: and a0, a0, a4 3493; RV32-NEXT: neg a1, a0 3494; RV32-NEXT: and a0, a1, a3 3495; RV32-NEXT: and a1, a1, a2 3496; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload 3497; RV32-NEXT: .cfi_restore ra 3498; RV32-NEXT: addi sp, sp, 32 3499; RV32-NEXT: .cfi_def_cfa_offset 0 3500; RV32-NEXT: ret 3501; 3502; RV64-LABEL: utest_f32i64_mm: 3503; RV64: # %bb.0: # %entry 3504; RV64-NEXT: addi sp, sp, -16 3505; RV64-NEXT: .cfi_def_cfa_offset 16 3506; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 3507; RV64-NEXT: .cfi_offset ra, -8 3508; RV64-NEXT: call __fixunssfti 3509; RV64-NEXT: snez a1, a1 3510; RV64-NEXT: addi a1, a1, -1 3511; RV64-NEXT: and a0, a1, a0 3512; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 3513; RV64-NEXT: .cfi_restore ra 3514; RV64-NEXT: addi sp, sp, 16 3515; RV64-NEXT: .cfi_def_cfa_offset 0 3516; RV64-NEXT: ret 3517entry: 3518 %conv = fptoui float %x to i128 3519 %spec.store.select = call i128 @llvm.umin.i128(i128 %conv, i128 18446744073709551616) 3520 %conv6 = trunc i128 %spec.store.select to i64 3521 ret i64 %conv6 3522} 3523 3524define i64 @ustest_f32i64_mm(float %x) { 3525; RV32-LABEL: ustest_f32i64_mm: 3526; RV32: # %bb.0: # %entry 3527; RV32-NEXT: addi sp, sp, -32 3528; RV32-NEXT: .cfi_def_cfa_offset 32 3529; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill 3530; RV32-NEXT: .cfi_offset ra, -4 3531; RV32-NEXT: addi a0, sp, 8 3532; RV32-NEXT: call __fixsfti 3533; RV32-NEXT: lw a0, 20(sp) 3534; RV32-NEXT: lw a1, 8(sp) 3535; RV32-NEXT: lw a2, 12(sp) 3536; RV32-NEXT: lw a3, 16(sp) 3537; RV32-NEXT: beqz a0, .LBB50_2 3538; RV32-NEXT: # %bb.1: # %entry 3539; RV32-NEXT: slti a4, a0, 0 3540; RV32-NEXT: j .LBB50_3 3541; RV32-NEXT: .LBB50_2: 3542; RV32-NEXT: seqz a4, a3 3543; RV32-NEXT: .LBB50_3: # %entry 3544; RV32-NEXT: xori a3, a3, 1 3545; RV32-NEXT: or a3, a3, a0 3546; RV32-NEXT: seqz a3, a3 3547; RV32-NEXT: addi a3, a3, -1 3548; RV32-NEXT: and a3, a3, a4 3549; RV32-NEXT: neg a3, a3 3550; RV32-NEXT: and a2, a3, a2 3551; RV32-NEXT: and a1, a3, a1 3552; RV32-NEXT: and a0, a3, a0 3553; RV32-NEXT: slti a0, a0, 0 3554; RV32-NEXT: addi a3, a0, -1 3555; RV32-NEXT: and a0, a3, a1 3556; RV32-NEXT: and a1, a3, a2 3557; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload 3558; RV32-NEXT: .cfi_restore ra 3559; RV32-NEXT: addi sp, sp, 32 3560; RV32-NEXT: .cfi_def_cfa_offset 0 3561; RV32-NEXT: ret 3562; 3563; RV64-LABEL: ustest_f32i64_mm: 3564; RV64: # %bb.0: # %entry 3565; RV64-NEXT: addi sp, sp, -16 3566; RV64-NEXT: .cfi_def_cfa_offset 16 3567; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 3568; RV64-NEXT: .cfi_offset ra, -8 3569; RV64-NEXT: call __fixsfti 3570; RV64-NEXT: mv a2, a1 3571; RV64-NEXT: blez a1, .LBB50_2 3572; RV64-NEXT: # %bb.1: # %entry 3573; RV64-NEXT: li a2, 1 3574; RV64-NEXT: .LBB50_2: # %entry 3575; RV64-NEXT: slti a1, a1, 1 3576; RV64-NEXT: slti a2, a2, 0 3577; RV64-NEXT: neg a1, a1 3578; RV64-NEXT: and a0, a1, a0 3579; RV64-NEXT: addi a2, a2, -1 3580; RV64-NEXT: and a0, a2, a0 3581; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 3582; RV64-NEXT: .cfi_restore ra 3583; RV64-NEXT: addi sp, sp, 16 3584; RV64-NEXT: .cfi_def_cfa_offset 0 3585; RV64-NEXT: ret 3586entry: 3587 %conv = fptosi float %x to i128 3588 %spec.store.select = call i128 @llvm.smin.i128(i128 %conv, i128 18446744073709551616) 3589 %spec.store.select7 = call i128 @llvm.smax.i128(i128 %spec.store.select, i128 0) 3590 %conv6 = trunc i128 %spec.store.select7 to i64 3591 ret i64 %conv6 3592} 3593 3594define i64 @stest_f16i64_mm(half %x) { 3595; RV32-LABEL: stest_f16i64_mm: 3596; RV32: # %bb.0: # %entry 3597; RV32-NEXT: addi sp, sp, -32 3598; RV32-NEXT: .cfi_def_cfa_offset 32 3599; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill 3600; RV32-NEXT: .cfi_offset ra, -4 3601; RV32-NEXT: call __extendhfsf2 3602; RV32-NEXT: addi a0, sp, 8 3603; RV32-NEXT: call __fixsfti 3604; RV32-NEXT: lw a3, 8(sp) 3605; RV32-NEXT: lw a1, 12(sp) 3606; RV32-NEXT: lw a2, 16(sp) 3607; RV32-NEXT: lw a4, 20(sp) 3608; RV32-NEXT: lui a0, 524288 3609; RV32-NEXT: addi a5, a0, -1 3610; RV32-NEXT: beq a1, a5, .LBB51_2 3611; RV32-NEXT: # %bb.1: # %entry 3612; RV32-NEXT: sltu a6, a1, a5 3613; RV32-NEXT: or a7, a2, a4 3614; RV32-NEXT: bnez a7, .LBB51_3 3615; RV32-NEXT: j .LBB51_4 3616; RV32-NEXT: .LBB51_2: 3617; RV32-NEXT: sltiu a6, a3, -1 3618; RV32-NEXT: or a7, a2, a4 3619; RV32-NEXT: beqz a7, .LBB51_4 3620; RV32-NEXT: .LBB51_3: # %entry 3621; RV32-NEXT: slti a6, a4, 0 3622; RV32-NEXT: .LBB51_4: # %entry 3623; RV32-NEXT: addi a7, a6, -1 3624; RV32-NEXT: neg t0, a6 3625; RV32-NEXT: bnez a6, .LBB51_6 3626; RV32-NEXT: # %bb.5: # %entry 3627; RV32-NEXT: mv a1, a5 3628; RV32-NEXT: .LBB51_6: # %entry 3629; RV32-NEXT: or a3, a7, a3 3630; RV32-NEXT: and a4, t0, a4 3631; RV32-NEXT: and a2, t0, a2 3632; RV32-NEXT: beq a1, a0, .LBB51_8 3633; RV32-NEXT: # %bb.7: # %entry 3634; RV32-NEXT: sltu a0, a0, a1 3635; RV32-NEXT: j .LBB51_9 3636; RV32-NEXT: .LBB51_8: 3637; RV32-NEXT: snez a0, a3 3638; RV32-NEXT: .LBB51_9: # %entry 3639; RV32-NEXT: and a2, a2, a4 3640; RV32-NEXT: li a5, -1 3641; RV32-NEXT: beq a2, a5, .LBB51_11 3642; RV32-NEXT: # %bb.10: # %entry 3643; RV32-NEXT: slti a0, a4, 0 3644; RV32-NEXT: xori a0, a0, 1 3645; RV32-NEXT: .LBB51_11: # %entry 3646; RV32-NEXT: bnez a0, .LBB51_13 3647; RV32-NEXT: # %bb.12: # %entry 3648; RV32-NEXT: lui a1, 524288 3649; RV32-NEXT: .LBB51_13: # %entry 3650; RV32-NEXT: neg a0, a0 3651; RV32-NEXT: and a0, a0, a3 3652; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload 3653; RV32-NEXT: .cfi_restore ra 3654; RV32-NEXT: addi sp, sp, 32 3655; RV32-NEXT: .cfi_def_cfa_offset 0 3656; RV32-NEXT: ret 3657; 3658; RV64-LABEL: stest_f16i64_mm: 3659; RV64: # %bb.0: # %entry 3660; RV64-NEXT: addi sp, sp, -16 3661; RV64-NEXT: .cfi_def_cfa_offset 16 3662; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 3663; RV64-NEXT: .cfi_offset ra, -8 3664; RV64-NEXT: call __extendhfsf2 3665; RV64-NEXT: call __fixsfti 3666; RV64-NEXT: li a2, -1 3667; RV64-NEXT: srli a3, a2, 1 3668; RV64-NEXT: beqz a1, .LBB51_2 3669; RV64-NEXT: # %bb.1: # %entry 3670; RV64-NEXT: slti a4, a1, 0 3671; RV64-NEXT: j .LBB51_3 3672; RV64-NEXT: .LBB51_2: 3673; RV64-NEXT: sltu a4, a0, a3 3674; RV64-NEXT: .LBB51_3: # %entry 3675; RV64-NEXT: neg a5, a4 3676; RV64-NEXT: and a5, a5, a1 3677; RV64-NEXT: bnez a4, .LBB51_5 3678; RV64-NEXT: # %bb.4: # %entry 3679; RV64-NEXT: mv a0, a3 3680; RV64-NEXT: .LBB51_5: # %entry 3681; RV64-NEXT: slli a1, a2, 63 3682; RV64-NEXT: beq a5, a2, .LBB51_7 3683; RV64-NEXT: # %bb.6: # %entry 3684; RV64-NEXT: slti a2, a5, 0 3685; RV64-NEXT: xori a2, a2, 1 3686; RV64-NEXT: beqz a2, .LBB51_8 3687; RV64-NEXT: j .LBB51_9 3688; RV64-NEXT: .LBB51_7: 3689; RV64-NEXT: sltu a2, a1, a0 3690; RV64-NEXT: bnez a2, .LBB51_9 3691; RV64-NEXT: .LBB51_8: # %entry 3692; RV64-NEXT: mv a0, a1 3693; RV64-NEXT: .LBB51_9: # %entry 3694; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 3695; RV64-NEXT: .cfi_restore ra 3696; RV64-NEXT: addi sp, sp, 16 3697; RV64-NEXT: .cfi_def_cfa_offset 0 3698; RV64-NEXT: ret 3699entry: 3700 %conv = fptosi half %x to i128 3701 %spec.store.select = call i128 @llvm.smin.i128(i128 %conv, i128 9223372036854775807) 3702 %spec.store.select7 = call i128 @llvm.smax.i128(i128 %spec.store.select, i128 -9223372036854775808) 3703 %conv6 = trunc i128 %spec.store.select7 to i64 3704 ret i64 %conv6 3705} 3706 3707define i64 @utesth_f16i64_mm(half %x) { 3708; RV32-LABEL: utesth_f16i64_mm: 3709; RV32: # %bb.0: # %entry 3710; RV32-NEXT: addi sp, sp, -32 3711; RV32-NEXT: .cfi_def_cfa_offset 32 3712; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill 3713; RV32-NEXT: .cfi_offset ra, -4 3714; RV32-NEXT: call __extendhfsf2 3715; RV32-NEXT: addi a0, sp, 8 3716; RV32-NEXT: call __fixunssfti 3717; RV32-NEXT: lw a0, 16(sp) 3718; RV32-NEXT: lw a1, 20(sp) 3719; RV32-NEXT: lw a2, 12(sp) 3720; RV32-NEXT: lw a3, 8(sp) 3721; RV32-NEXT: or a4, a1, a0 3722; RV32-NEXT: xori a0, a0, 1 3723; RV32-NEXT: seqz a4, a4 3724; RV32-NEXT: or a0, a0, a1 3725; RV32-NEXT: seqz a0, a0 3726; RV32-NEXT: addi a0, a0, -1 3727; RV32-NEXT: and a0, a0, a4 3728; RV32-NEXT: neg a1, a0 3729; RV32-NEXT: and a0, a1, a3 3730; RV32-NEXT: and a1, a1, a2 3731; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload 3732; RV32-NEXT: .cfi_restore ra 3733; RV32-NEXT: addi sp, sp, 32 3734; RV32-NEXT: .cfi_def_cfa_offset 0 3735; RV32-NEXT: ret 3736; 3737; RV64-LABEL: utesth_f16i64_mm: 3738; RV64: # %bb.0: # %entry 3739; RV64-NEXT: addi sp, sp, -16 3740; RV64-NEXT: .cfi_def_cfa_offset 16 3741; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 3742; RV64-NEXT: .cfi_offset ra, -8 3743; RV64-NEXT: call __extendhfsf2 3744; RV64-NEXT: call __fixunssfti 3745; RV64-NEXT: snez a1, a1 3746; RV64-NEXT: addi a1, a1, -1 3747; RV64-NEXT: and a0, a1, a0 3748; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 3749; RV64-NEXT: .cfi_restore ra 3750; RV64-NEXT: addi sp, sp, 16 3751; RV64-NEXT: .cfi_def_cfa_offset 0 3752; RV64-NEXT: ret 3753entry: 3754 %conv = fptoui half %x to i128 3755 %spec.store.select = call i128 @llvm.umin.i128(i128 %conv, i128 18446744073709551616) 3756 %conv6 = trunc i128 %spec.store.select to i64 3757 ret i64 %conv6 3758} 3759 3760define i64 @ustest_f16i64_mm(half %x) { 3761; RV32-LABEL: ustest_f16i64_mm: 3762; RV32: # %bb.0: # %entry 3763; RV32-NEXT: addi sp, sp, -32 3764; RV32-NEXT: .cfi_def_cfa_offset 32 3765; RV32-NEXT: sw ra, 28(sp) # 4-byte Folded Spill 3766; RV32-NEXT: .cfi_offset ra, -4 3767; RV32-NEXT: call __extendhfsf2 3768; RV32-NEXT: addi a0, sp, 8 3769; RV32-NEXT: call __fixsfti 3770; RV32-NEXT: lw a0, 20(sp) 3771; RV32-NEXT: lw a1, 8(sp) 3772; RV32-NEXT: lw a2, 12(sp) 3773; RV32-NEXT: lw a3, 16(sp) 3774; RV32-NEXT: beqz a0, .LBB53_2 3775; RV32-NEXT: # %bb.1: # %entry 3776; RV32-NEXT: slti a4, a0, 0 3777; RV32-NEXT: j .LBB53_3 3778; RV32-NEXT: .LBB53_2: 3779; RV32-NEXT: seqz a4, a3 3780; RV32-NEXT: .LBB53_3: # %entry 3781; RV32-NEXT: xori a3, a3, 1 3782; RV32-NEXT: or a3, a3, a0 3783; RV32-NEXT: seqz a3, a3 3784; RV32-NEXT: addi a3, a3, -1 3785; RV32-NEXT: and a3, a3, a4 3786; RV32-NEXT: neg a3, a3 3787; RV32-NEXT: and a2, a3, a2 3788; RV32-NEXT: and a1, a3, a1 3789; RV32-NEXT: and a0, a3, a0 3790; RV32-NEXT: slti a0, a0, 0 3791; RV32-NEXT: addi a3, a0, -1 3792; RV32-NEXT: and a0, a3, a1 3793; RV32-NEXT: and a1, a3, a2 3794; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload 3795; RV32-NEXT: .cfi_restore ra 3796; RV32-NEXT: addi sp, sp, 32 3797; RV32-NEXT: .cfi_def_cfa_offset 0 3798; RV32-NEXT: ret 3799; 3800; RV64-LABEL: ustest_f16i64_mm: 3801; RV64: # %bb.0: # %entry 3802; RV64-NEXT: addi sp, sp, -16 3803; RV64-NEXT: .cfi_def_cfa_offset 16 3804; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill 3805; RV64-NEXT: .cfi_offset ra, -8 3806; RV64-NEXT: call __extendhfsf2 3807; RV64-NEXT: call __fixsfti 3808; RV64-NEXT: mv a2, a1 3809; RV64-NEXT: blez a1, .LBB53_2 3810; RV64-NEXT: # %bb.1: # %entry 3811; RV64-NEXT: li a2, 1 3812; RV64-NEXT: .LBB53_2: # %entry 3813; RV64-NEXT: slti a1, a1, 1 3814; RV64-NEXT: slti a2, a2, 0 3815; RV64-NEXT: neg a1, a1 3816; RV64-NEXT: and a0, a1, a0 3817; RV64-NEXT: addi a2, a2, -1 3818; RV64-NEXT: and a0, a2, a0 3819; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload 3820; RV64-NEXT: .cfi_restore ra 3821; RV64-NEXT: addi sp, sp, 16 3822; RV64-NEXT: .cfi_def_cfa_offset 0 3823; RV64-NEXT: ret 3824entry: 3825 %conv = fptosi half %x to i128 3826 %spec.store.select = call i128 @llvm.smin.i128(i128 %conv, i128 18446744073709551616) 3827 %spec.store.select7 = call i128 @llvm.smax.i128(i128 %spec.store.select, i128 0) 3828 %conv6 = trunc i128 %spec.store.select7 to i64 3829 ret i64 %conv6 3830} 3831 3832declare i32 @llvm.smin.i32(i32, i32) 3833declare i32 @llvm.smax.i32(i32, i32) 3834declare i32 @llvm.umin.i32(i32, i32) 3835declare i64 @llvm.smin.i64(i64, i64) 3836declare i64 @llvm.smax.i64(i64, i64) 3837declare i64 @llvm.umin.i64(i64, i64) 3838declare i128 @llvm.smin.i128(i128, i128) 3839declare i128 @llvm.smax.i128(i128, i128) 3840declare i128 @llvm.umin.i128(i128, i128) 3841