1 //===- ARMLegalizerInfo.cpp --------------------------------------*- C++ -*-==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 /// \file 9 /// This file implements the targeting of the Machinelegalizer class for ARM. 10 /// \todo This should be generated by TableGen. 11 //===----------------------------------------------------------------------===// 12 13 #include "ARMLegalizerInfo.h" 14 #include "ARMCallLowering.h" 15 #include "ARMSubtarget.h" 16 #include "llvm/CodeGen/GlobalISel/LegalizerHelper.h" 17 #include "llvm/CodeGen/LowLevelType.h" 18 #include "llvm/CodeGen/MachineRegisterInfo.h" 19 #include "llvm/CodeGen/TargetOpcodes.h" 20 #include "llvm/CodeGen/ValueTypes.h" 21 #include "llvm/IR/DerivedTypes.h" 22 #include "llvm/IR/Type.h" 23 24 using namespace llvm; 25 using namespace LegalizeActions; 26 27 /// FIXME: The following static functions are SizeChangeStrategy functions 28 /// that are meant to temporarily mimic the behaviour of the old legalization 29 /// based on doubling/halving non-legal types as closely as possible. This is 30 /// not entirly possible as only legalizing the types that are exactly a power 31 /// of 2 times the size of the legal types would require specifying all those 32 /// sizes explicitly. 33 /// In practice, not specifying those isn't a problem, and the below functions 34 /// should disappear quickly as we add support for legalizing non-power-of-2 35 /// sized types further. 36 static void 37 addAndInterleaveWithUnsupported(LegalizerInfo::SizeAndActionsVec &result, 38 const LegalizerInfo::SizeAndActionsVec &v) { 39 for (unsigned i = 0; i < v.size(); ++i) { 40 result.push_back(v[i]); 41 if (i + 1 < v[i].first && i + 1 < v.size() && 42 v[i + 1].first != v[i].first + 1) 43 result.push_back({v[i].first + 1, Unsupported}); 44 } 45 } 46 47 static LegalizerInfo::SizeAndActionsVec 48 widen_8_16(const LegalizerInfo::SizeAndActionsVec &v) { 49 assert(v.size() >= 1); 50 assert(v[0].first > 17); 51 LegalizerInfo::SizeAndActionsVec result = {{1, Unsupported}, 52 {8, WidenScalar}, 53 {9, Unsupported}, 54 {16, WidenScalar}, 55 {17, Unsupported}}; 56 addAndInterleaveWithUnsupported(result, v); 57 auto Largest = result.back().first; 58 result.push_back({Largest + 1, Unsupported}); 59 return result; 60 } 61 62 static bool AEABI(const ARMSubtarget &ST) { 63 return ST.isTargetAEABI() || ST.isTargetGNUAEABI() || ST.isTargetMuslAEABI(); 64 } 65 66 ARMLegalizerInfo::ARMLegalizerInfo(const ARMSubtarget &ST) { 67 using namespace TargetOpcode; 68 69 const LLT p0 = LLT::pointer(0, 32); 70 71 const LLT s1 = LLT::scalar(1); 72 const LLT s8 = LLT::scalar(8); 73 const LLT s16 = LLT::scalar(16); 74 const LLT s32 = LLT::scalar(32); 75 const LLT s64 = LLT::scalar(64); 76 77 if (ST.isThumb1Only()) { 78 // Thumb1 is not supported yet. 79 computeTables(); 80 verify(*ST.getInstrInfo()); 81 return; 82 } 83 84 getActionDefinitionsBuilder({G_SEXT, G_ZEXT, G_ANYEXT}) 85 .legalForCartesianProduct({s32}, {s1, s8, s16}); 86 87 getActionDefinitionsBuilder({G_ADD, G_SUB, G_MUL, G_AND, G_OR, G_XOR}) 88 .legalFor({s32}) 89 .minScalar(0, s32); 90 91 getActionDefinitionsBuilder({G_ASHR, G_LSHR, G_SHL}) 92 .legalFor({{s32, s32}}) 93 .clampScalar(1, s32, s32); 94 95 bool HasHWDivide = (!ST.isThumb() && ST.hasDivideInARMMode()) || 96 (ST.isThumb() && ST.hasDivideInThumbMode()); 97 if (HasHWDivide) 98 getActionDefinitionsBuilder({G_SDIV, G_UDIV}) 99 .legalFor({s32}) 100 .clampScalar(0, s32, s32); 101 else 102 getActionDefinitionsBuilder({G_SDIV, G_UDIV}) 103 .libcallFor({s32}) 104 .clampScalar(0, s32, s32); 105 106 for (unsigned Op : {G_SREM, G_UREM}) { 107 setLegalizeScalarToDifferentSizeStrategy(Op, 0, widen_8_16); 108 if (HasHWDivide) 109 setAction({Op, s32}, Lower); 110 else if (AEABI(ST)) 111 setAction({Op, s32}, Custom); 112 else 113 setAction({Op, s32}, Libcall); 114 } 115 116 getActionDefinitionsBuilder(G_INTTOPTR).legalFor({{p0, s32}}); 117 getActionDefinitionsBuilder(G_PTRTOINT).legalFor({{s32, p0}}); 118 119 getActionDefinitionsBuilder(G_CONSTANT) 120 .legalFor({s32, p0}) 121 .clampScalar(0, s32, s32); 122 123 getActionDefinitionsBuilder(G_ICMP) 124 .legalForCartesianProduct({s1}, {s32, p0}) 125 .minScalar(1, s32); 126 127 getActionDefinitionsBuilder(G_SELECT).legalForCartesianProduct({s32, p0}, 128 {s1}); 129 130 // We're keeping these builders around because we'll want to add support for 131 // floating point to them. 132 auto &LoadStoreBuilder = 133 getActionDefinitionsBuilder({G_LOAD, G_STORE}) 134 .legalForTypesWithMemDesc({ 135 {s1, p0, 8, 8}, 136 {s8, p0, 8, 8}, 137 {s16, p0, 16, 8}, 138 {s32, p0, 32, 8}, 139 {p0, p0, 32, 8}}); 140 141 getActionDefinitionsBuilder(G_FRAME_INDEX).legalFor({p0}); 142 getActionDefinitionsBuilder(G_GLOBAL_VALUE).legalFor({p0}); 143 144 auto &PhiBuilder = 145 getActionDefinitionsBuilder(G_PHI) 146 .legalFor({s32, p0}) 147 .minScalar(0, s32); 148 149 getActionDefinitionsBuilder(G_GEP).legalFor({{p0, s32}}); 150 151 getActionDefinitionsBuilder(G_BRCOND).legalFor({s1}); 152 153 if (!ST.useSoftFloat() && ST.hasVFP2()) { 154 getActionDefinitionsBuilder( 155 {G_FADD, G_FSUB, G_FMUL, G_FDIV, G_FCONSTANT, G_FNEG}) 156 .legalFor({s32, s64}); 157 158 LoadStoreBuilder.legalFor({{s64, p0}}); 159 PhiBuilder.legalFor({s64}); 160 161 getActionDefinitionsBuilder(G_FCMP).legalForCartesianProduct({s1}, 162 {s32, s64}); 163 164 getActionDefinitionsBuilder(G_MERGE_VALUES).legalFor({{s64, s32}}); 165 getActionDefinitionsBuilder(G_UNMERGE_VALUES).legalFor({{s32, s64}}); 166 167 getActionDefinitionsBuilder(G_FPEXT).legalFor({{s64, s32}}); 168 getActionDefinitionsBuilder(G_FPTRUNC).legalFor({{s32, s64}}); 169 170 getActionDefinitionsBuilder({G_FPTOSI, G_FPTOUI}) 171 .legalForCartesianProduct({s32}, {s32, s64}); 172 getActionDefinitionsBuilder({G_SITOFP, G_UITOFP}) 173 .legalForCartesianProduct({s32, s64}, {s32}); 174 } else { 175 getActionDefinitionsBuilder({G_FADD, G_FSUB, G_FMUL, G_FDIV}) 176 .libcallFor({s32, s64}); 177 178 LoadStoreBuilder.maxScalar(0, s32); 179 180 for (auto Ty : {s32, s64}) 181 setAction({G_FNEG, Ty}, Lower); 182 183 getActionDefinitionsBuilder(G_FCONSTANT).customFor({s32, s64}); 184 185 getActionDefinitionsBuilder(G_FCMP).customForCartesianProduct({s1}, 186 {s32, s64}); 187 188 if (AEABI(ST)) 189 setFCmpLibcallsAEABI(); 190 else 191 setFCmpLibcallsGNU(); 192 193 getActionDefinitionsBuilder(G_FPEXT).libcallFor({{s64, s32}}); 194 getActionDefinitionsBuilder(G_FPTRUNC).libcallFor({{s32, s64}}); 195 196 getActionDefinitionsBuilder({G_FPTOSI, G_FPTOUI}) 197 .libcallForCartesianProduct({s32}, {s32, s64}); 198 getActionDefinitionsBuilder({G_SITOFP, G_UITOFP}) 199 .libcallForCartesianProduct({s32, s64}, {s32}); 200 } 201 202 if (!ST.useSoftFloat() && ST.hasVFP4()) 203 getActionDefinitionsBuilder(G_FMA).legalFor({s32, s64}); 204 else 205 getActionDefinitionsBuilder(G_FMA).libcallFor({s32, s64}); 206 207 getActionDefinitionsBuilder({G_FREM, G_FPOW}).libcallFor({s32, s64}); 208 209 if (ST.isThumb()) { 210 // FIXME: merge with the code for non-Thumb. 211 computeTables(); 212 verify(*ST.getInstrInfo()); 213 return; 214 } 215 216 if (ST.hasV5TOps()) { 217 getActionDefinitionsBuilder(G_CTLZ) 218 .legalFor({s32, s32}) 219 .clampScalar(1, s32, s32) 220 .clampScalar(0, s32, s32); 221 getActionDefinitionsBuilder(G_CTLZ_ZERO_UNDEF) 222 .lowerFor({s32, s32}) 223 .clampScalar(1, s32, s32) 224 .clampScalar(0, s32, s32); 225 } else { 226 getActionDefinitionsBuilder(G_CTLZ_ZERO_UNDEF) 227 .libcallFor({s32, s32}) 228 .clampScalar(1, s32, s32) 229 .clampScalar(0, s32, s32); 230 getActionDefinitionsBuilder(G_CTLZ) 231 .lowerFor({s32, s32}) 232 .clampScalar(1, s32, s32) 233 .clampScalar(0, s32, s32); 234 } 235 236 computeTables(); 237 verify(*ST.getInstrInfo()); 238 } 239 240 void ARMLegalizerInfo::setFCmpLibcallsAEABI() { 241 // FCMP_TRUE and FCMP_FALSE don't need libcalls, they should be 242 // default-initialized. 243 FCmp32Libcalls.resize(CmpInst::LAST_FCMP_PREDICATE + 1); 244 FCmp32Libcalls[CmpInst::FCMP_OEQ] = { 245 {RTLIB::OEQ_F32, CmpInst::BAD_ICMP_PREDICATE}}; 246 FCmp32Libcalls[CmpInst::FCMP_OGE] = { 247 {RTLIB::OGE_F32, CmpInst::BAD_ICMP_PREDICATE}}; 248 FCmp32Libcalls[CmpInst::FCMP_OGT] = { 249 {RTLIB::OGT_F32, CmpInst::BAD_ICMP_PREDICATE}}; 250 FCmp32Libcalls[CmpInst::FCMP_OLE] = { 251 {RTLIB::OLE_F32, CmpInst::BAD_ICMP_PREDICATE}}; 252 FCmp32Libcalls[CmpInst::FCMP_OLT] = { 253 {RTLIB::OLT_F32, CmpInst::BAD_ICMP_PREDICATE}}; 254 FCmp32Libcalls[CmpInst::FCMP_ORD] = {{RTLIB::O_F32, CmpInst::ICMP_EQ}}; 255 FCmp32Libcalls[CmpInst::FCMP_UGE] = {{RTLIB::OLT_F32, CmpInst::ICMP_EQ}}; 256 FCmp32Libcalls[CmpInst::FCMP_UGT] = {{RTLIB::OLE_F32, CmpInst::ICMP_EQ}}; 257 FCmp32Libcalls[CmpInst::FCMP_ULE] = {{RTLIB::OGT_F32, CmpInst::ICMP_EQ}}; 258 FCmp32Libcalls[CmpInst::FCMP_ULT] = {{RTLIB::OGE_F32, CmpInst::ICMP_EQ}}; 259 FCmp32Libcalls[CmpInst::FCMP_UNE] = {{RTLIB::UNE_F32, CmpInst::ICMP_EQ}}; 260 FCmp32Libcalls[CmpInst::FCMP_UNO] = { 261 {RTLIB::UO_F32, CmpInst::BAD_ICMP_PREDICATE}}; 262 FCmp32Libcalls[CmpInst::FCMP_ONE] = { 263 {RTLIB::OGT_F32, CmpInst::BAD_ICMP_PREDICATE}, 264 {RTLIB::OLT_F32, CmpInst::BAD_ICMP_PREDICATE}}; 265 FCmp32Libcalls[CmpInst::FCMP_UEQ] = { 266 {RTLIB::OEQ_F32, CmpInst::BAD_ICMP_PREDICATE}, 267 {RTLIB::UO_F32, CmpInst::BAD_ICMP_PREDICATE}}; 268 269 FCmp64Libcalls.resize(CmpInst::LAST_FCMP_PREDICATE + 1); 270 FCmp64Libcalls[CmpInst::FCMP_OEQ] = { 271 {RTLIB::OEQ_F64, CmpInst::BAD_ICMP_PREDICATE}}; 272 FCmp64Libcalls[CmpInst::FCMP_OGE] = { 273 {RTLIB::OGE_F64, CmpInst::BAD_ICMP_PREDICATE}}; 274 FCmp64Libcalls[CmpInst::FCMP_OGT] = { 275 {RTLIB::OGT_F64, CmpInst::BAD_ICMP_PREDICATE}}; 276 FCmp64Libcalls[CmpInst::FCMP_OLE] = { 277 {RTLIB::OLE_F64, CmpInst::BAD_ICMP_PREDICATE}}; 278 FCmp64Libcalls[CmpInst::FCMP_OLT] = { 279 {RTLIB::OLT_F64, CmpInst::BAD_ICMP_PREDICATE}}; 280 FCmp64Libcalls[CmpInst::FCMP_ORD] = {{RTLIB::O_F64, CmpInst::ICMP_EQ}}; 281 FCmp64Libcalls[CmpInst::FCMP_UGE] = {{RTLIB::OLT_F64, CmpInst::ICMP_EQ}}; 282 FCmp64Libcalls[CmpInst::FCMP_UGT] = {{RTLIB::OLE_F64, CmpInst::ICMP_EQ}}; 283 FCmp64Libcalls[CmpInst::FCMP_ULE] = {{RTLIB::OGT_F64, CmpInst::ICMP_EQ}}; 284 FCmp64Libcalls[CmpInst::FCMP_ULT] = {{RTLIB::OGE_F64, CmpInst::ICMP_EQ}}; 285 FCmp64Libcalls[CmpInst::FCMP_UNE] = {{RTLIB::UNE_F64, CmpInst::ICMP_EQ}}; 286 FCmp64Libcalls[CmpInst::FCMP_UNO] = { 287 {RTLIB::UO_F64, CmpInst::BAD_ICMP_PREDICATE}}; 288 FCmp64Libcalls[CmpInst::FCMP_ONE] = { 289 {RTLIB::OGT_F64, CmpInst::BAD_ICMP_PREDICATE}, 290 {RTLIB::OLT_F64, CmpInst::BAD_ICMP_PREDICATE}}; 291 FCmp64Libcalls[CmpInst::FCMP_UEQ] = { 292 {RTLIB::OEQ_F64, CmpInst::BAD_ICMP_PREDICATE}, 293 {RTLIB::UO_F64, CmpInst::BAD_ICMP_PREDICATE}}; 294 } 295 296 void ARMLegalizerInfo::setFCmpLibcallsGNU() { 297 // FCMP_TRUE and FCMP_FALSE don't need libcalls, they should be 298 // default-initialized. 299 FCmp32Libcalls.resize(CmpInst::LAST_FCMP_PREDICATE + 1); 300 FCmp32Libcalls[CmpInst::FCMP_OEQ] = {{RTLIB::OEQ_F32, CmpInst::ICMP_EQ}}; 301 FCmp32Libcalls[CmpInst::FCMP_OGE] = {{RTLIB::OGE_F32, CmpInst::ICMP_SGE}}; 302 FCmp32Libcalls[CmpInst::FCMP_OGT] = {{RTLIB::OGT_F32, CmpInst::ICMP_SGT}}; 303 FCmp32Libcalls[CmpInst::FCMP_OLE] = {{RTLIB::OLE_F32, CmpInst::ICMP_SLE}}; 304 FCmp32Libcalls[CmpInst::FCMP_OLT] = {{RTLIB::OLT_F32, CmpInst::ICMP_SLT}}; 305 FCmp32Libcalls[CmpInst::FCMP_ORD] = {{RTLIB::O_F32, CmpInst::ICMP_EQ}}; 306 FCmp32Libcalls[CmpInst::FCMP_UGE] = {{RTLIB::OLT_F32, CmpInst::ICMP_SGE}}; 307 FCmp32Libcalls[CmpInst::FCMP_UGT] = {{RTLIB::OLE_F32, CmpInst::ICMP_SGT}}; 308 FCmp32Libcalls[CmpInst::FCMP_ULE] = {{RTLIB::OGT_F32, CmpInst::ICMP_SLE}}; 309 FCmp32Libcalls[CmpInst::FCMP_ULT] = {{RTLIB::OGE_F32, CmpInst::ICMP_SLT}}; 310 FCmp32Libcalls[CmpInst::FCMP_UNE] = {{RTLIB::UNE_F32, CmpInst::ICMP_NE}}; 311 FCmp32Libcalls[CmpInst::FCMP_UNO] = {{RTLIB::UO_F32, CmpInst::ICMP_NE}}; 312 FCmp32Libcalls[CmpInst::FCMP_ONE] = {{RTLIB::OGT_F32, CmpInst::ICMP_SGT}, 313 {RTLIB::OLT_F32, CmpInst::ICMP_SLT}}; 314 FCmp32Libcalls[CmpInst::FCMP_UEQ] = {{RTLIB::OEQ_F32, CmpInst::ICMP_EQ}, 315 {RTLIB::UO_F32, CmpInst::ICMP_NE}}; 316 317 FCmp64Libcalls.resize(CmpInst::LAST_FCMP_PREDICATE + 1); 318 FCmp64Libcalls[CmpInst::FCMP_OEQ] = {{RTLIB::OEQ_F64, CmpInst::ICMP_EQ}}; 319 FCmp64Libcalls[CmpInst::FCMP_OGE] = {{RTLIB::OGE_F64, CmpInst::ICMP_SGE}}; 320 FCmp64Libcalls[CmpInst::FCMP_OGT] = {{RTLIB::OGT_F64, CmpInst::ICMP_SGT}}; 321 FCmp64Libcalls[CmpInst::FCMP_OLE] = {{RTLIB::OLE_F64, CmpInst::ICMP_SLE}}; 322 FCmp64Libcalls[CmpInst::FCMP_OLT] = {{RTLIB::OLT_F64, CmpInst::ICMP_SLT}}; 323 FCmp64Libcalls[CmpInst::FCMP_ORD] = {{RTLIB::O_F64, CmpInst::ICMP_EQ}}; 324 FCmp64Libcalls[CmpInst::FCMP_UGE] = {{RTLIB::OLT_F64, CmpInst::ICMP_SGE}}; 325 FCmp64Libcalls[CmpInst::FCMP_UGT] = {{RTLIB::OLE_F64, CmpInst::ICMP_SGT}}; 326 FCmp64Libcalls[CmpInst::FCMP_ULE] = {{RTLIB::OGT_F64, CmpInst::ICMP_SLE}}; 327 FCmp64Libcalls[CmpInst::FCMP_ULT] = {{RTLIB::OGE_F64, CmpInst::ICMP_SLT}}; 328 FCmp64Libcalls[CmpInst::FCMP_UNE] = {{RTLIB::UNE_F64, CmpInst::ICMP_NE}}; 329 FCmp64Libcalls[CmpInst::FCMP_UNO] = {{RTLIB::UO_F64, CmpInst::ICMP_NE}}; 330 FCmp64Libcalls[CmpInst::FCMP_ONE] = {{RTLIB::OGT_F64, CmpInst::ICMP_SGT}, 331 {RTLIB::OLT_F64, CmpInst::ICMP_SLT}}; 332 FCmp64Libcalls[CmpInst::FCMP_UEQ] = {{RTLIB::OEQ_F64, CmpInst::ICMP_EQ}, 333 {RTLIB::UO_F64, CmpInst::ICMP_NE}}; 334 } 335 336 ARMLegalizerInfo::FCmpLibcallsList 337 ARMLegalizerInfo::getFCmpLibcalls(CmpInst::Predicate Predicate, 338 unsigned Size) const { 339 assert(CmpInst::isFPPredicate(Predicate) && "Unsupported FCmp predicate"); 340 if (Size == 32) 341 return FCmp32Libcalls[Predicate]; 342 if (Size == 64) 343 return FCmp64Libcalls[Predicate]; 344 llvm_unreachable("Unsupported size for FCmp predicate"); 345 } 346 347 bool ARMLegalizerInfo::legalizeCustom(MachineInstr &MI, 348 MachineRegisterInfo &MRI, 349 MachineIRBuilder &MIRBuilder, 350 GISelChangeObserver &Observer) const { 351 using namespace TargetOpcode; 352 353 MIRBuilder.setInstr(MI); 354 LLVMContext &Ctx = MIRBuilder.getMF().getFunction().getContext(); 355 356 switch (MI.getOpcode()) { 357 default: 358 return false; 359 case G_SREM: 360 case G_UREM: { 361 unsigned OriginalResult = MI.getOperand(0).getReg(); 362 auto Size = MRI.getType(OriginalResult).getSizeInBits(); 363 if (Size != 32) 364 return false; 365 366 auto Libcall = 367 MI.getOpcode() == G_SREM ? RTLIB::SDIVREM_I32 : RTLIB::UDIVREM_I32; 368 369 // Our divmod libcalls return a struct containing the quotient and the 370 // remainder. We need to create a virtual register for it. 371 Type *ArgTy = Type::getInt32Ty(Ctx); 372 StructType *RetTy = StructType::get(Ctx, {ArgTy, ArgTy}, /* Packed */ true); 373 auto RetVal = MRI.createGenericVirtualRegister( 374 getLLTForType(*RetTy, MIRBuilder.getMF().getDataLayout())); 375 376 auto Status = createLibcall(MIRBuilder, Libcall, {RetVal, RetTy}, 377 {{MI.getOperand(1).getReg(), ArgTy}, 378 {MI.getOperand(2).getReg(), ArgTy}}); 379 if (Status != LegalizerHelper::Legalized) 380 return false; 381 382 // The remainder is the second result of divmod. Split the return value into 383 // a new, unused register for the quotient and the destination of the 384 // original instruction for the remainder. 385 MIRBuilder.buildUnmerge( 386 {MRI.createGenericVirtualRegister(LLT::scalar(32)), OriginalResult}, 387 RetVal); 388 break; 389 } 390 case G_FCMP: { 391 assert(MRI.getType(MI.getOperand(2).getReg()) == 392 MRI.getType(MI.getOperand(3).getReg()) && 393 "Mismatched operands for G_FCMP"); 394 auto OpSize = MRI.getType(MI.getOperand(2).getReg()).getSizeInBits(); 395 396 auto OriginalResult = MI.getOperand(0).getReg(); 397 auto Predicate = 398 static_cast<CmpInst::Predicate>(MI.getOperand(1).getPredicate()); 399 auto Libcalls = getFCmpLibcalls(Predicate, OpSize); 400 401 if (Libcalls.empty()) { 402 assert((Predicate == CmpInst::FCMP_TRUE || 403 Predicate == CmpInst::FCMP_FALSE) && 404 "Predicate needs libcalls, but none specified"); 405 MIRBuilder.buildConstant(OriginalResult, 406 Predicate == CmpInst::FCMP_TRUE ? 1 : 0); 407 MI.eraseFromParent(); 408 return true; 409 } 410 411 assert((OpSize == 32 || OpSize == 64) && "Unsupported operand size"); 412 auto *ArgTy = OpSize == 32 ? Type::getFloatTy(Ctx) : Type::getDoubleTy(Ctx); 413 auto *RetTy = Type::getInt32Ty(Ctx); 414 415 SmallVector<unsigned, 2> Results; 416 for (auto Libcall : Libcalls) { 417 auto LibcallResult = MRI.createGenericVirtualRegister(LLT::scalar(32)); 418 auto Status = 419 createLibcall(MIRBuilder, Libcall.LibcallID, {LibcallResult, RetTy}, 420 {{MI.getOperand(2).getReg(), ArgTy}, 421 {MI.getOperand(3).getReg(), ArgTy}}); 422 423 if (Status != LegalizerHelper::Legalized) 424 return false; 425 426 auto ProcessedResult = 427 Libcalls.size() == 1 428 ? OriginalResult 429 : MRI.createGenericVirtualRegister(MRI.getType(OriginalResult)); 430 431 // We have a result, but we need to transform it into a proper 1-bit 0 or 432 // 1, taking into account the different peculiarities of the values 433 // returned by the comparison functions. 434 CmpInst::Predicate ResultPred = Libcall.Predicate; 435 if (ResultPred == CmpInst::BAD_ICMP_PREDICATE) { 436 // We have a nice 0 or 1, and we just need to truncate it back to 1 bit 437 // to keep the types consistent. 438 MIRBuilder.buildTrunc(ProcessedResult, LibcallResult); 439 } else { 440 // We need to compare against 0. 441 assert(CmpInst::isIntPredicate(ResultPred) && "Unsupported predicate"); 442 auto Zero = MRI.createGenericVirtualRegister(LLT::scalar(32)); 443 MIRBuilder.buildConstant(Zero, 0); 444 MIRBuilder.buildICmp(ResultPred, ProcessedResult, LibcallResult, Zero); 445 } 446 Results.push_back(ProcessedResult); 447 } 448 449 if (Results.size() != 1) { 450 assert(Results.size() == 2 && "Unexpected number of results"); 451 MIRBuilder.buildOr(OriginalResult, Results[0], Results[1]); 452 } 453 break; 454 } 455 case G_FCONSTANT: { 456 // Convert to integer constants, while preserving the binary representation. 457 auto AsInteger = 458 MI.getOperand(1).getFPImm()->getValueAPF().bitcastToAPInt(); 459 MIRBuilder.buildConstant(MI.getOperand(0).getReg(), 460 *ConstantInt::get(Ctx, AsInteger)); 461 break; 462 } 463 } 464 465 MI.eraseFromParent(); 466 return true; 467 } 468