1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5 2; RUN: llc < %s -mtriple=riscv32 -global-isel | FileCheck %s --check-prefix=RV32I 3; RUN: llc < %s -mtriple=riscv64 -global-isel | FileCheck %s --check-prefix=RV64I 4 5define i8 @scmp.8.8(i8 signext %x, i8 signext %y) nounwind { 6; RV32I-LABEL: scmp.8.8: 7; RV32I: # %bb.0: 8; RV32I-NEXT: slt a2, a1, a0 9; RV32I-NEXT: slt a0, a0, a1 10; RV32I-NEXT: sub a0, a2, a0 11; RV32I-NEXT: ret 12; 13; RV64I-LABEL: scmp.8.8: 14; RV64I: # %bb.0: 15; RV64I-NEXT: slt a2, a1, a0 16; RV64I-NEXT: slt a0, a0, a1 17; RV64I-NEXT: sub a0, a2, a0 18; RV64I-NEXT: ret 19 %1 = call i8 @llvm.scmp(i8 %x, i8 %y) 20 ret i8 %1 21} 22 23define i8 @scmp.8.16(i16 signext %x, i16 signext %y) nounwind { 24; RV32I-LABEL: scmp.8.16: 25; RV32I: # %bb.0: 26; RV32I-NEXT: slt a2, a1, a0 27; RV32I-NEXT: slt a0, a0, a1 28; RV32I-NEXT: sub a0, a2, a0 29; RV32I-NEXT: ret 30; 31; RV64I-LABEL: scmp.8.16: 32; RV64I: # %bb.0: 33; RV64I-NEXT: slt a2, a1, a0 34; RV64I-NEXT: slt a0, a0, a1 35; RV64I-NEXT: sub a0, a2, a0 36; RV64I-NEXT: ret 37 %1 = call i8 @llvm.scmp(i16 %x, i16 %y) 38 ret i8 %1 39} 40 41define i8 @scmp.8.32(i32 %x, i32 %y) nounwind { 42; RV32I-LABEL: scmp.8.32: 43; RV32I: # %bb.0: 44; RV32I-NEXT: slt a2, a1, a0 45; RV32I-NEXT: slt a0, a0, a1 46; RV32I-NEXT: sub a0, a2, a0 47; RV32I-NEXT: ret 48; 49; RV64I-LABEL: scmp.8.32: 50; RV64I: # %bb.0: 51; RV64I-NEXT: sext.w a0, a0 52; RV64I-NEXT: sext.w a1, a1 53; RV64I-NEXT: slt a2, a1, a0 54; RV64I-NEXT: slt a0, a0, a1 55; RV64I-NEXT: sub a0, a2, a0 56; RV64I-NEXT: ret 57 %1 = call i8 @llvm.scmp(i32 %x, i32 %y) 58 ret i8 %1 59} 60 61define i8 @scmp.8.64(i64 %x, i64 %y) nounwind { 62; RV32I-LABEL: scmp.8.64: 63; RV32I: # %bb.0: 64; RV32I-NEXT: beq a1, a3, .LBB3_2 65; RV32I-NEXT: # %bb.1: 66; RV32I-NEXT: slt a4, a3, a1 67; RV32I-NEXT: slt a0, a1, a3 68; RV32I-NEXT: sub a0, a4, a0 69; RV32I-NEXT: ret 70; RV32I-NEXT: .LBB3_2: 71; RV32I-NEXT: sltu a4, a2, a0 72; RV32I-NEXT: sltu a0, a0, a2 73; RV32I-NEXT: sub a0, a4, a0 74; RV32I-NEXT: ret 75; 76; RV64I-LABEL: scmp.8.64: 77; RV64I: # %bb.0: 78; RV64I-NEXT: slt a2, a1, a0 79; RV64I-NEXT: slt a0, a0, a1 80; RV64I-NEXT: sub a0, a2, a0 81; RV64I-NEXT: ret 82 %1 = call i8 @llvm.scmp(i64 %x, i64 %y) 83 ret i8 %1 84} 85 86define i32 @scmp.32.32(i32 %x, i32 %y) nounwind { 87; RV32I-LABEL: scmp.32.32: 88; RV32I: # %bb.0: 89; RV32I-NEXT: slt a2, a1, a0 90; RV32I-NEXT: slt a0, a0, a1 91; RV32I-NEXT: sub a0, a2, a0 92; RV32I-NEXT: ret 93; 94; RV64I-LABEL: scmp.32.32: 95; RV64I: # %bb.0: 96; RV64I-NEXT: sext.w a0, a0 97; RV64I-NEXT: sext.w a1, a1 98; RV64I-NEXT: slt a2, a1, a0 99; RV64I-NEXT: slt a0, a0, a1 100; RV64I-NEXT: subw a0, a2, a0 101; RV64I-NEXT: ret 102 %1 = call i32 @llvm.scmp(i32 %x, i32 %y) 103 ret i32 %1 104} 105 106define i32 @scmp.32.64(i64 %x, i64 %y) nounwind { 107; RV32I-LABEL: scmp.32.64: 108; RV32I: # %bb.0: 109; RV32I-NEXT: beq a1, a3, .LBB5_2 110; RV32I-NEXT: # %bb.1: 111; RV32I-NEXT: slt a4, a3, a1 112; RV32I-NEXT: slt a0, a1, a3 113; RV32I-NEXT: sub a0, a4, a0 114; RV32I-NEXT: ret 115; RV32I-NEXT: .LBB5_2: 116; RV32I-NEXT: sltu a4, a2, a0 117; RV32I-NEXT: sltu a0, a0, a2 118; RV32I-NEXT: sub a0, a4, a0 119; RV32I-NEXT: ret 120; 121; RV64I-LABEL: scmp.32.64: 122; RV64I: # %bb.0: 123; RV64I-NEXT: slt a2, a1, a0 124; RV64I-NEXT: slt a0, a0, a1 125; RV64I-NEXT: subw a0, a2, a0 126; RV64I-NEXT: ret 127 %1 = call i32 @llvm.scmp(i64 %x, i64 %y) 128 ret i32 %1 129} 130 131define i64 @scmp.64.64(i64 %x, i64 %y) nounwind { 132; RV32I-LABEL: scmp.64.64: 133; RV32I: # %bb.0: 134; RV32I-NEXT: beq a1, a3, .LBB6_2 135; RV32I-NEXT: # %bb.1: 136; RV32I-NEXT: slt a4, a3, a1 137; RV32I-NEXT: slt a1, a1, a3 138; RV32I-NEXT: j .LBB6_3 139; RV32I-NEXT: .LBB6_2: 140; RV32I-NEXT: sltu a4, a2, a0 141; RV32I-NEXT: sltu a1, a0, a2 142; RV32I-NEXT: .LBB6_3: 143; RV32I-NEXT: sub a0, a4, a1 144; RV32I-NEXT: sltu a1, a4, a1 145; RV32I-NEXT: neg a1, a1 146; RV32I-NEXT: ret 147; 148; RV64I-LABEL: scmp.64.64: 149; RV64I: # %bb.0: 150; RV64I-NEXT: slt a2, a1, a0 151; RV64I-NEXT: slt a0, a0, a1 152; RV64I-NEXT: sub a0, a2, a0 153; RV64I-NEXT: ret 154 %1 = call i64 @llvm.scmp(i64 %x, i64 %y) 155 ret i64 %1 156} 157