xref: /freebsd-src/contrib/llvm-project/llvm/lib/Target/ARM/ARMFastISel.cpp (revision 0fca6ea1d4eea4c934cfff25ac9ee8ad6fe95583)
10b57cec5SDimitry Andric //===- ARMFastISel.cpp - ARM FastISel implementation ----------------------===//
20b57cec5SDimitry Andric //
30b57cec5SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
40b57cec5SDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
50b57cec5SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
60b57cec5SDimitry Andric //
70b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
80b57cec5SDimitry Andric //
90b57cec5SDimitry Andric // This file defines the ARM-specific support for the FastISel class. Some
100b57cec5SDimitry Andric // of the target-specific code is generated by tablegen in the file
110b57cec5SDimitry Andric // ARMGenFastISel.inc, which is #included here.
120b57cec5SDimitry Andric //
130b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
140b57cec5SDimitry Andric 
150b57cec5SDimitry Andric #include "ARM.h"
160b57cec5SDimitry Andric #include "ARMBaseInstrInfo.h"
170b57cec5SDimitry Andric #include "ARMBaseRegisterInfo.h"
180b57cec5SDimitry Andric #include "ARMCallingConv.h"
190b57cec5SDimitry Andric #include "ARMConstantPoolValue.h"
200b57cec5SDimitry Andric #include "ARMISelLowering.h"
210b57cec5SDimitry Andric #include "ARMMachineFunctionInfo.h"
220b57cec5SDimitry Andric #include "ARMSubtarget.h"
230b57cec5SDimitry Andric #include "MCTargetDesc/ARMAddressingModes.h"
240b57cec5SDimitry Andric #include "MCTargetDesc/ARMBaseInfo.h"
250b57cec5SDimitry Andric #include "Utils/ARMBaseInfo.h"
260b57cec5SDimitry Andric #include "llvm/ADT/APFloat.h"
270b57cec5SDimitry Andric #include "llvm/ADT/APInt.h"
280b57cec5SDimitry Andric #include "llvm/ADT/DenseMap.h"
290b57cec5SDimitry Andric #include "llvm/ADT/SmallVector.h"
300b57cec5SDimitry Andric #include "llvm/CodeGen/CallingConvLower.h"
310b57cec5SDimitry Andric #include "llvm/CodeGen/FastISel.h"
320b57cec5SDimitry Andric #include "llvm/CodeGen/FunctionLoweringInfo.h"
330b57cec5SDimitry Andric #include "llvm/CodeGen/ISDOpcodes.h"
340b57cec5SDimitry Andric #include "llvm/CodeGen/MachineBasicBlock.h"
350b57cec5SDimitry Andric #include "llvm/CodeGen/MachineConstantPool.h"
360b57cec5SDimitry Andric #include "llvm/CodeGen/MachineFrameInfo.h"
370b57cec5SDimitry Andric #include "llvm/CodeGen/MachineFunction.h"
380b57cec5SDimitry Andric #include "llvm/CodeGen/MachineInstr.h"
390b57cec5SDimitry Andric #include "llvm/CodeGen/MachineInstrBuilder.h"
400b57cec5SDimitry Andric #include "llvm/CodeGen/MachineMemOperand.h"
410b57cec5SDimitry Andric #include "llvm/CodeGen/MachineOperand.h"
420b57cec5SDimitry Andric #include "llvm/CodeGen/MachineRegisterInfo.h"
43*0fca6ea1SDimitry Andric #include "llvm/CodeGen/RuntimeLibcallUtil.h"
440b57cec5SDimitry Andric #include "llvm/CodeGen/TargetInstrInfo.h"
450b57cec5SDimitry Andric #include "llvm/CodeGen/TargetLowering.h"
460b57cec5SDimitry Andric #include "llvm/CodeGen/TargetOpcodes.h"
470b57cec5SDimitry Andric #include "llvm/CodeGen/TargetRegisterInfo.h"
480b57cec5SDimitry Andric #include "llvm/CodeGen/ValueTypes.h"
49*0fca6ea1SDimitry Andric #include "llvm/CodeGenTypes/MachineValueType.h"
500b57cec5SDimitry Andric #include "llvm/IR/Argument.h"
510b57cec5SDimitry Andric #include "llvm/IR/Attributes.h"
520b57cec5SDimitry Andric #include "llvm/IR/CallingConv.h"
530b57cec5SDimitry Andric #include "llvm/IR/Constant.h"
540b57cec5SDimitry Andric #include "llvm/IR/Constants.h"
550b57cec5SDimitry Andric #include "llvm/IR/DataLayout.h"
560b57cec5SDimitry Andric #include "llvm/IR/DerivedTypes.h"
570b57cec5SDimitry Andric #include "llvm/IR/Function.h"
580b57cec5SDimitry Andric #include "llvm/IR/GetElementPtrTypeIterator.h"
590b57cec5SDimitry Andric #include "llvm/IR/GlobalValue.h"
600b57cec5SDimitry Andric #include "llvm/IR/GlobalVariable.h"
610b57cec5SDimitry Andric #include "llvm/IR/InstrTypes.h"
620b57cec5SDimitry Andric #include "llvm/IR/Instruction.h"
630b57cec5SDimitry Andric #include "llvm/IR/Instructions.h"
640b57cec5SDimitry Andric #include "llvm/IR/IntrinsicInst.h"
650b57cec5SDimitry Andric #include "llvm/IR/Intrinsics.h"
660b57cec5SDimitry Andric #include "llvm/IR/Module.h"
670b57cec5SDimitry Andric #include "llvm/IR/Operator.h"
680b57cec5SDimitry Andric #include "llvm/IR/Type.h"
690b57cec5SDimitry Andric #include "llvm/IR/User.h"
700b57cec5SDimitry Andric #include "llvm/IR/Value.h"
710b57cec5SDimitry Andric #include "llvm/MC/MCInstrDesc.h"
720b57cec5SDimitry Andric #include "llvm/MC/MCRegisterInfo.h"
730b57cec5SDimitry Andric #include "llvm/Support/Casting.h"
740b57cec5SDimitry Andric #include "llvm/Support/Compiler.h"
750b57cec5SDimitry Andric #include "llvm/Support/ErrorHandling.h"
760b57cec5SDimitry Andric #include "llvm/Support/MathExtras.h"
770b57cec5SDimitry Andric #include "llvm/Target/TargetMachine.h"
780b57cec5SDimitry Andric #include "llvm/Target/TargetOptions.h"
790b57cec5SDimitry Andric #include <cassert>
800b57cec5SDimitry Andric #include <cstdint>
810b57cec5SDimitry Andric #include <utility>
820b57cec5SDimitry Andric 
830b57cec5SDimitry Andric using namespace llvm;
840b57cec5SDimitry Andric 
850b57cec5SDimitry Andric namespace {
860b57cec5SDimitry Andric 
870b57cec5SDimitry Andric   // All possible address modes, plus some.
880b57cec5SDimitry Andric   struct Address {
890b57cec5SDimitry Andric     enum {
900b57cec5SDimitry Andric       RegBase,
910b57cec5SDimitry Andric       FrameIndexBase
920b57cec5SDimitry Andric     } BaseType = RegBase;
930b57cec5SDimitry Andric 
940b57cec5SDimitry Andric     union {
950b57cec5SDimitry Andric       unsigned Reg;
960b57cec5SDimitry Andric       int FI;
970b57cec5SDimitry Andric     } Base;
980b57cec5SDimitry Andric 
990b57cec5SDimitry Andric     int Offset = 0;
1000b57cec5SDimitry Andric 
1010b57cec5SDimitry Andric     // Innocuous defaults for our address.
1020b57cec5SDimitry Andric     Address() {
1030b57cec5SDimitry Andric       Base.Reg = 0;
1040b57cec5SDimitry Andric     }
1050b57cec5SDimitry Andric   };
1060b57cec5SDimitry Andric 
1070b57cec5SDimitry Andric class ARMFastISel final : public FastISel {
1080b57cec5SDimitry Andric   /// Subtarget - Keep a pointer to the ARMSubtarget around so that we can
1090b57cec5SDimitry Andric   /// make the right decision when generating code for different targets.
1100b57cec5SDimitry Andric   const ARMSubtarget *Subtarget;
1110b57cec5SDimitry Andric   Module &M;
1120b57cec5SDimitry Andric   const TargetMachine &TM;
1130b57cec5SDimitry Andric   const TargetInstrInfo &TII;
1140b57cec5SDimitry Andric   const TargetLowering &TLI;
1150b57cec5SDimitry Andric   ARMFunctionInfo *AFI;
1160b57cec5SDimitry Andric 
1170b57cec5SDimitry Andric   // Convenience variables to avoid some queries.
1180b57cec5SDimitry Andric   bool isThumb2;
1190b57cec5SDimitry Andric   LLVMContext *Context;
1200b57cec5SDimitry Andric 
1210b57cec5SDimitry Andric   public:
1220b57cec5SDimitry Andric     explicit ARMFastISel(FunctionLoweringInfo &funcInfo,
1230b57cec5SDimitry Andric                          const TargetLibraryInfo *libInfo)
1240b57cec5SDimitry Andric         : FastISel(funcInfo, libInfo),
12581ad6265SDimitry Andric           Subtarget(&funcInfo.MF->getSubtarget<ARMSubtarget>()),
1260b57cec5SDimitry Andric           M(const_cast<Module &>(*funcInfo.Fn->getParent())),
1270b57cec5SDimitry Andric           TM(funcInfo.MF->getTarget()), TII(*Subtarget->getInstrInfo()),
1280b57cec5SDimitry Andric           TLI(*Subtarget->getTargetLowering()) {
1290b57cec5SDimitry Andric       AFI = funcInfo.MF->getInfo<ARMFunctionInfo>();
1300b57cec5SDimitry Andric       isThumb2 = AFI->isThumbFunction();
1310b57cec5SDimitry Andric       Context = &funcInfo.Fn->getContext();
1320b57cec5SDimitry Andric     }
1330b57cec5SDimitry Andric 
1340b57cec5SDimitry Andric   private:
1350b57cec5SDimitry Andric     // Code from FastISel.cpp.
1360b57cec5SDimitry Andric 
1370b57cec5SDimitry Andric     unsigned fastEmitInst_r(unsigned MachineInstOpcode,
138fe6060f1SDimitry Andric                             const TargetRegisterClass *RC, unsigned Op0);
1390b57cec5SDimitry Andric     unsigned fastEmitInst_rr(unsigned MachineInstOpcode,
1400b57cec5SDimitry Andric                              const TargetRegisterClass *RC,
141fe6060f1SDimitry Andric                              unsigned Op0, unsigned Op1);
1420b57cec5SDimitry Andric     unsigned fastEmitInst_ri(unsigned MachineInstOpcode,
1430b57cec5SDimitry Andric                              const TargetRegisterClass *RC,
144fe6060f1SDimitry Andric                              unsigned Op0, uint64_t Imm);
1450b57cec5SDimitry Andric     unsigned fastEmitInst_i(unsigned MachineInstOpcode,
1460b57cec5SDimitry Andric                             const TargetRegisterClass *RC,
1470b57cec5SDimitry Andric                             uint64_t Imm);
1480b57cec5SDimitry Andric 
1490b57cec5SDimitry Andric     // Backend specific FastISel code.
1500b57cec5SDimitry Andric 
1510b57cec5SDimitry Andric     bool fastSelectInstruction(const Instruction *I) override;
1520b57cec5SDimitry Andric     unsigned fastMaterializeConstant(const Constant *C) override;
1530b57cec5SDimitry Andric     unsigned fastMaterializeAlloca(const AllocaInst *AI) override;
1540b57cec5SDimitry Andric     bool tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
1550b57cec5SDimitry Andric                              const LoadInst *LI) override;
1560b57cec5SDimitry Andric     bool fastLowerArguments() override;
1570b57cec5SDimitry Andric 
1580b57cec5SDimitry Andric #include "ARMGenFastISel.inc"
1590b57cec5SDimitry Andric 
1600b57cec5SDimitry Andric     // Instruction selection routines.
1610b57cec5SDimitry Andric 
1620b57cec5SDimitry Andric     bool SelectLoad(const Instruction *I);
1630b57cec5SDimitry Andric     bool SelectStore(const Instruction *I);
1640b57cec5SDimitry Andric     bool SelectBranch(const Instruction *I);
1650b57cec5SDimitry Andric     bool SelectIndirectBr(const Instruction *I);
1660b57cec5SDimitry Andric     bool SelectCmp(const Instruction *I);
1670b57cec5SDimitry Andric     bool SelectFPExt(const Instruction *I);
1680b57cec5SDimitry Andric     bool SelectFPTrunc(const Instruction *I);
1690b57cec5SDimitry Andric     bool SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode);
1700b57cec5SDimitry Andric     bool SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode);
1710b57cec5SDimitry Andric     bool SelectIToFP(const Instruction *I, bool isSigned);
1720b57cec5SDimitry Andric     bool SelectFPToI(const Instruction *I, bool isSigned);
1730b57cec5SDimitry Andric     bool SelectDiv(const Instruction *I, bool isSigned);
1740b57cec5SDimitry Andric     bool SelectRem(const Instruction *I, bool isSigned);
1750b57cec5SDimitry Andric     bool SelectCall(const Instruction *I, const char *IntrMemName);
1760b57cec5SDimitry Andric     bool SelectIntrinsicCall(const IntrinsicInst &I);
1770b57cec5SDimitry Andric     bool SelectSelect(const Instruction *I);
1780b57cec5SDimitry Andric     bool SelectRet(const Instruction *I);
1790b57cec5SDimitry Andric     bool SelectTrunc(const Instruction *I);
1800b57cec5SDimitry Andric     bool SelectIntExt(const Instruction *I);
1810b57cec5SDimitry Andric     bool SelectShift(const Instruction *I, ARM_AM::ShiftOpc ShiftTy);
1820b57cec5SDimitry Andric 
1830b57cec5SDimitry Andric     // Utility routines.
1840b57cec5SDimitry Andric 
1850b57cec5SDimitry Andric     bool isPositionIndependent() const;
1860b57cec5SDimitry Andric     bool isTypeLegal(Type *Ty, MVT &VT);
1870b57cec5SDimitry Andric     bool isLoadTypeLegal(Type *Ty, MVT &VT);
1880b57cec5SDimitry Andric     bool ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
1898bcb0991SDimitry Andric                     bool isZExt);
1908bcb0991SDimitry Andric     bool ARMEmitLoad(MVT VT, Register &ResultReg, Address &Addr,
191bdd1243dSDimitry Andric                      MaybeAlign Alignment = std::nullopt, bool isZExt = true,
1920b57cec5SDimitry Andric                      bool allocReg = true);
1930b57cec5SDimitry Andric     bool ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr,
194bdd1243dSDimitry Andric                       MaybeAlign Alignment = std::nullopt);
1950b57cec5SDimitry Andric     bool ARMComputeAddress(const Value *Obj, Address &Addr);
1960b57cec5SDimitry Andric     void ARMSimplifyAddress(Address &Addr, MVT VT, bool useAM3);
1970b57cec5SDimitry Andric     bool ARMIsMemCpySmall(uint64_t Len);
1980b57cec5SDimitry Andric     bool ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len,
199bdd1243dSDimitry Andric                                MaybeAlign Alignment);
2000b57cec5SDimitry Andric     unsigned ARMEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT, bool isZExt);
2010b57cec5SDimitry Andric     unsigned ARMMaterializeFP(const ConstantFP *CFP, MVT VT);
2020b57cec5SDimitry Andric     unsigned ARMMaterializeInt(const Constant *C, MVT VT);
2030b57cec5SDimitry Andric     unsigned ARMMaterializeGV(const GlobalValue *GV, MVT VT);
2040b57cec5SDimitry Andric     unsigned ARMMoveToFPReg(MVT VT, unsigned SrcReg);
2050b57cec5SDimitry Andric     unsigned ARMMoveToIntReg(MVT VT, unsigned SrcReg);
2060b57cec5SDimitry Andric     unsigned ARMSelectCallOp(bool UseReg);
2075ffd83dbSDimitry Andric     unsigned ARMLowerPICELF(const GlobalValue *GV, MVT VT);
2080b57cec5SDimitry Andric 
2090b57cec5SDimitry Andric     const TargetLowering *getTargetLowering() { return &TLI; }
2100b57cec5SDimitry Andric 
2110b57cec5SDimitry Andric     // Call handling routines.
2120b57cec5SDimitry Andric 
2130b57cec5SDimitry Andric     CCAssignFn *CCAssignFnForCall(CallingConv::ID CC,
2140b57cec5SDimitry Andric                                   bool Return,
2150b57cec5SDimitry Andric                                   bool isVarArg);
2160b57cec5SDimitry Andric     bool ProcessCallArgs(SmallVectorImpl<Value*> &Args,
2178bcb0991SDimitry Andric                          SmallVectorImpl<Register> &ArgRegs,
2180b57cec5SDimitry Andric                          SmallVectorImpl<MVT> &ArgVTs,
2190b57cec5SDimitry Andric                          SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags,
2208bcb0991SDimitry Andric                          SmallVectorImpl<Register> &RegArgs,
2210b57cec5SDimitry Andric                          CallingConv::ID CC,
2220b57cec5SDimitry Andric                          unsigned &NumBytes,
2230b57cec5SDimitry Andric                          bool isVarArg);
2240b57cec5SDimitry Andric     unsigned getLibcallReg(const Twine &Name);
2258bcb0991SDimitry Andric     bool FinishCall(MVT RetVT, SmallVectorImpl<Register> &UsedRegs,
2260b57cec5SDimitry Andric                     const Instruction *I, CallingConv::ID CC,
2270b57cec5SDimitry Andric                     unsigned &NumBytes, bool isVarArg);
2280b57cec5SDimitry Andric     bool ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call);
2290b57cec5SDimitry Andric 
2300b57cec5SDimitry Andric     // OptionalDef handling routines.
2310b57cec5SDimitry Andric 
2320b57cec5SDimitry Andric     bool isARMNEONPred(const MachineInstr *MI);
2330b57cec5SDimitry Andric     bool DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR);
2340b57cec5SDimitry Andric     const MachineInstrBuilder &AddOptionalDefs(const MachineInstrBuilder &MIB);
2350b57cec5SDimitry Andric     void AddLoadStoreOperands(MVT VT, Address &Addr,
2360b57cec5SDimitry Andric                               const MachineInstrBuilder &MIB,
2370b57cec5SDimitry Andric                               MachineMemOperand::Flags Flags, bool useAM3);
2380b57cec5SDimitry Andric };
2390b57cec5SDimitry Andric 
2400b57cec5SDimitry Andric } // end anonymous namespace
2410b57cec5SDimitry Andric 
2420b57cec5SDimitry Andric // DefinesOptionalPredicate - This is different from DefinesPredicate in that
2430b57cec5SDimitry Andric // we don't care about implicit defs here, just places we'll need to add a
2440b57cec5SDimitry Andric // default CCReg argument. Sets CPSR if we're setting CPSR instead of CCR.
2450b57cec5SDimitry Andric bool ARMFastISel::DefinesOptionalPredicate(MachineInstr *MI, bool *CPSR) {
2460b57cec5SDimitry Andric   if (!MI->hasOptionalDef())
2470b57cec5SDimitry Andric     return false;
2480b57cec5SDimitry Andric 
2490b57cec5SDimitry Andric   // Look to see if our OptionalDef is defining CPSR or CCR.
2500b57cec5SDimitry Andric   for (const MachineOperand &MO : MI->operands()) {
2510b57cec5SDimitry Andric     if (!MO.isReg() || !MO.isDef()) continue;
2520b57cec5SDimitry Andric     if (MO.getReg() == ARM::CPSR)
2530b57cec5SDimitry Andric       *CPSR = true;
2540b57cec5SDimitry Andric   }
2550b57cec5SDimitry Andric   return true;
2560b57cec5SDimitry Andric }
2570b57cec5SDimitry Andric 
2580b57cec5SDimitry Andric bool ARMFastISel::isARMNEONPred(const MachineInstr *MI) {
2590b57cec5SDimitry Andric   const MCInstrDesc &MCID = MI->getDesc();
2600b57cec5SDimitry Andric 
2610b57cec5SDimitry Andric   // If we're a thumb2 or not NEON function we'll be handled via isPredicable.
2620b57cec5SDimitry Andric   if ((MCID.TSFlags & ARMII::DomainMask) != ARMII::DomainNEON ||
2630b57cec5SDimitry Andric        AFI->isThumb2Function())
2640b57cec5SDimitry Andric     return MI->isPredicable();
2650b57cec5SDimitry Andric 
2660b57cec5SDimitry Andric   for (const MCOperandInfo &opInfo : MCID.operands())
2670b57cec5SDimitry Andric     if (opInfo.isPredicate())
2680b57cec5SDimitry Andric       return true;
2690b57cec5SDimitry Andric 
2700b57cec5SDimitry Andric   return false;
2710b57cec5SDimitry Andric }
2720b57cec5SDimitry Andric 
2730b57cec5SDimitry Andric // If the machine is predicable go ahead and add the predicate operands, if
2740b57cec5SDimitry Andric // it needs default CC operands add those.
2750b57cec5SDimitry Andric // TODO: If we want to support thumb1 then we'll need to deal with optional
2760b57cec5SDimitry Andric // CPSR defs that need to be added before the remaining operands. See s_cc_out
2770b57cec5SDimitry Andric // for descriptions why.
2780b57cec5SDimitry Andric const MachineInstrBuilder &
2790b57cec5SDimitry Andric ARMFastISel::AddOptionalDefs(const MachineInstrBuilder &MIB) {
2800b57cec5SDimitry Andric   MachineInstr *MI = &*MIB;
2810b57cec5SDimitry Andric 
2820b57cec5SDimitry Andric   // Do we use a predicate? or...
2830b57cec5SDimitry Andric   // Are we NEON in ARM mode and have a predicate operand? If so, I know
2840b57cec5SDimitry Andric   // we're not predicable but add it anyways.
2850b57cec5SDimitry Andric   if (isARMNEONPred(MI))
2860b57cec5SDimitry Andric     MIB.add(predOps(ARMCC::AL));
2870b57cec5SDimitry Andric 
2880b57cec5SDimitry Andric   // Do we optionally set a predicate?  Preds is size > 0 iff the predicate
2890b57cec5SDimitry Andric   // defines CPSR. All other OptionalDefines in ARM are the CCR register.
2900b57cec5SDimitry Andric   bool CPSR = false;
2910b57cec5SDimitry Andric   if (DefinesOptionalPredicate(MI, &CPSR))
2920b57cec5SDimitry Andric     MIB.add(CPSR ? t1CondCodeOp() : condCodeOp());
2930b57cec5SDimitry Andric   return MIB;
2940b57cec5SDimitry Andric }
2950b57cec5SDimitry Andric 
2960b57cec5SDimitry Andric unsigned ARMFastISel::fastEmitInst_r(unsigned MachineInstOpcode,
2970b57cec5SDimitry Andric                                      const TargetRegisterClass *RC,
298fe6060f1SDimitry Andric                                      unsigned Op0) {
2998bcb0991SDimitry Andric   Register ResultReg = createResultReg(RC);
3000b57cec5SDimitry Andric   const MCInstrDesc &II = TII.get(MachineInstOpcode);
3010b57cec5SDimitry Andric 
3020b57cec5SDimitry Andric   // Make sure the input operand is sufficiently constrained to be legal
3030b57cec5SDimitry Andric   // for this instruction.
3040b57cec5SDimitry Andric   Op0 = constrainOperandRegClass(II, Op0, 1);
3050b57cec5SDimitry Andric   if (II.getNumDefs() >= 1) {
306bdd1243dSDimitry Andric     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II,
307fe6060f1SDimitry Andric                             ResultReg).addReg(Op0));
3080b57cec5SDimitry Andric   } else {
309bdd1243dSDimitry Andric     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
310fe6060f1SDimitry Andric                    .addReg(Op0));
311bdd1243dSDimitry Andric     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3120b57cec5SDimitry Andric                             TII.get(TargetOpcode::COPY), ResultReg)
313bdd1243dSDimitry Andric                         .addReg(II.implicit_defs()[0]));
3140b57cec5SDimitry Andric   }
3150b57cec5SDimitry Andric   return ResultReg;
3160b57cec5SDimitry Andric }
3170b57cec5SDimitry Andric 
3180b57cec5SDimitry Andric unsigned ARMFastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
3190b57cec5SDimitry Andric                                       const TargetRegisterClass *RC,
320fe6060f1SDimitry Andric                                       unsigned Op0, unsigned Op1) {
32104eeddc0SDimitry Andric   Register ResultReg = createResultReg(RC);
3220b57cec5SDimitry Andric   const MCInstrDesc &II = TII.get(MachineInstOpcode);
3230b57cec5SDimitry Andric 
3240b57cec5SDimitry Andric   // Make sure the input operands are sufficiently constrained to be legal
3250b57cec5SDimitry Andric   // for this instruction.
3260b57cec5SDimitry Andric   Op0 = constrainOperandRegClass(II, Op0, 1);
3270b57cec5SDimitry Andric   Op1 = constrainOperandRegClass(II, Op1, 2);
3280b57cec5SDimitry Andric 
3290b57cec5SDimitry Andric   if (II.getNumDefs() >= 1) {
3300b57cec5SDimitry Andric     AddOptionalDefs(
331bdd1243dSDimitry Andric         BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
332fe6060f1SDimitry Andric             .addReg(Op0)
333fe6060f1SDimitry Andric             .addReg(Op1));
3340b57cec5SDimitry Andric   } else {
335bdd1243dSDimitry Andric     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
336fe6060f1SDimitry Andric                    .addReg(Op0)
337fe6060f1SDimitry Andric                    .addReg(Op1));
338bdd1243dSDimitry Andric     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3390b57cec5SDimitry Andric                             TII.get(TargetOpcode::COPY), ResultReg)
340bdd1243dSDimitry Andric                         .addReg(II.implicit_defs()[0]));
3410b57cec5SDimitry Andric   }
3420b57cec5SDimitry Andric   return ResultReg;
3430b57cec5SDimitry Andric }
3440b57cec5SDimitry Andric 
3450b57cec5SDimitry Andric unsigned ARMFastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
3460b57cec5SDimitry Andric                                       const TargetRegisterClass *RC,
347fe6060f1SDimitry Andric                                       unsigned Op0, uint64_t Imm) {
34804eeddc0SDimitry Andric   Register ResultReg = createResultReg(RC);
3490b57cec5SDimitry Andric   const MCInstrDesc &II = TII.get(MachineInstOpcode);
3500b57cec5SDimitry Andric 
3510b57cec5SDimitry Andric   // Make sure the input operand is sufficiently constrained to be legal
3520b57cec5SDimitry Andric   // for this instruction.
3530b57cec5SDimitry Andric   Op0 = constrainOperandRegClass(II, Op0, 1);
3540b57cec5SDimitry Andric   if (II.getNumDefs() >= 1) {
3550b57cec5SDimitry Andric     AddOptionalDefs(
356bdd1243dSDimitry Andric         BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II, ResultReg)
357fe6060f1SDimitry Andric             .addReg(Op0)
3580b57cec5SDimitry Andric             .addImm(Imm));
3590b57cec5SDimitry Andric   } else {
360bdd1243dSDimitry Andric     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
361fe6060f1SDimitry Andric                    .addReg(Op0)
3620b57cec5SDimitry Andric                    .addImm(Imm));
363bdd1243dSDimitry Andric     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3640b57cec5SDimitry Andric                             TII.get(TargetOpcode::COPY), ResultReg)
365bdd1243dSDimitry Andric                         .addReg(II.implicit_defs()[0]));
3660b57cec5SDimitry Andric   }
3670b57cec5SDimitry Andric   return ResultReg;
3680b57cec5SDimitry Andric }
3690b57cec5SDimitry Andric 
3700b57cec5SDimitry Andric unsigned ARMFastISel::fastEmitInst_i(unsigned MachineInstOpcode,
3710b57cec5SDimitry Andric                                      const TargetRegisterClass *RC,
3720b57cec5SDimitry Andric                                      uint64_t Imm) {
37304eeddc0SDimitry Andric   Register ResultReg = createResultReg(RC);
3740b57cec5SDimitry Andric   const MCInstrDesc &II = TII.get(MachineInstOpcode);
3750b57cec5SDimitry Andric 
3760b57cec5SDimitry Andric   if (II.getNumDefs() >= 1) {
377bdd1243dSDimitry Andric     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II,
3780b57cec5SDimitry Andric                             ResultReg).addImm(Imm));
3790b57cec5SDimitry Andric   } else {
380bdd1243dSDimitry Andric     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
3810b57cec5SDimitry Andric                    .addImm(Imm));
382bdd1243dSDimitry Andric     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3830b57cec5SDimitry Andric                             TII.get(TargetOpcode::COPY), ResultReg)
384bdd1243dSDimitry Andric                         .addReg(II.implicit_defs()[0]));
3850b57cec5SDimitry Andric   }
3860b57cec5SDimitry Andric   return ResultReg;
3870b57cec5SDimitry Andric }
3880b57cec5SDimitry Andric 
3890b57cec5SDimitry Andric // TODO: Don't worry about 64-bit now, but when this is fixed remove the
3900b57cec5SDimitry Andric // checks from the various callers.
3910b57cec5SDimitry Andric unsigned ARMFastISel::ARMMoveToFPReg(MVT VT, unsigned SrcReg) {
3920b57cec5SDimitry Andric   if (VT == MVT::f64) return 0;
3930b57cec5SDimitry Andric 
39404eeddc0SDimitry Andric   Register MoveReg = createResultReg(TLI.getRegClassFor(VT));
395bdd1243dSDimitry Andric   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
3960b57cec5SDimitry Andric                           TII.get(ARM::VMOVSR), MoveReg)
3970b57cec5SDimitry Andric                   .addReg(SrcReg));
3980b57cec5SDimitry Andric   return MoveReg;
3990b57cec5SDimitry Andric }
4000b57cec5SDimitry Andric 
4010b57cec5SDimitry Andric unsigned ARMFastISel::ARMMoveToIntReg(MVT VT, unsigned SrcReg) {
4020b57cec5SDimitry Andric   if (VT == MVT::i64) return 0;
4030b57cec5SDimitry Andric 
40404eeddc0SDimitry Andric   Register MoveReg = createResultReg(TLI.getRegClassFor(VT));
405bdd1243dSDimitry Andric   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
4060b57cec5SDimitry Andric                           TII.get(ARM::VMOVRS), MoveReg)
4070b57cec5SDimitry Andric                   .addReg(SrcReg));
4080b57cec5SDimitry Andric   return MoveReg;
4090b57cec5SDimitry Andric }
4100b57cec5SDimitry Andric 
4110b57cec5SDimitry Andric // For double width floating point we need to materialize two constants
4120b57cec5SDimitry Andric // (the high and the low) into integer registers then use a move to get
4130b57cec5SDimitry Andric // the combined constant into an FP reg.
4140b57cec5SDimitry Andric unsigned ARMFastISel::ARMMaterializeFP(const ConstantFP *CFP, MVT VT) {
4150b57cec5SDimitry Andric   const APFloat Val = CFP->getValueAPF();
4160b57cec5SDimitry Andric   bool is64bit = VT == MVT::f64;
4170b57cec5SDimitry Andric 
4180b57cec5SDimitry Andric   // This checks to see if we can use VFP3 instructions to materialize
4190b57cec5SDimitry Andric   // a constant, otherwise we have to go through the constant pool.
4200b57cec5SDimitry Andric   if (TLI.isFPImmLegal(Val, VT)) {
4210b57cec5SDimitry Andric     int Imm;
4220b57cec5SDimitry Andric     unsigned Opc;
4230b57cec5SDimitry Andric     if (is64bit) {
4240b57cec5SDimitry Andric       Imm = ARM_AM::getFP64Imm(Val);
4250b57cec5SDimitry Andric       Opc = ARM::FCONSTD;
4260b57cec5SDimitry Andric     } else {
4270b57cec5SDimitry Andric       Imm = ARM_AM::getFP32Imm(Val);
4280b57cec5SDimitry Andric       Opc = ARM::FCONSTS;
4290b57cec5SDimitry Andric     }
43004eeddc0SDimitry Andric     Register DestReg = createResultReg(TLI.getRegClassFor(VT));
431bdd1243dSDimitry Andric     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
4320b57cec5SDimitry Andric                             TII.get(Opc), DestReg).addImm(Imm));
4330b57cec5SDimitry Andric     return DestReg;
4340b57cec5SDimitry Andric   }
4350b57cec5SDimitry Andric 
4360b57cec5SDimitry Andric   // Require VFP2 for loading fp constants.
4370b57cec5SDimitry Andric   if (!Subtarget->hasVFP2Base()) return false;
4380b57cec5SDimitry Andric 
4390b57cec5SDimitry Andric   // MachineConstantPool wants an explicit alignment.
4405ffd83dbSDimitry Andric   Align Alignment = DL.getPrefTypeAlign(CFP->getType());
4415ffd83dbSDimitry Andric   unsigned Idx = MCP.getConstantPoolIndex(cast<Constant>(CFP), Alignment);
44204eeddc0SDimitry Andric   Register DestReg = createResultReg(TLI.getRegClassFor(VT));
4430b57cec5SDimitry Andric   unsigned Opc = is64bit ? ARM::VLDRD : ARM::VLDRS;
4440b57cec5SDimitry Andric 
4450b57cec5SDimitry Andric   // The extra reg is for addrmode5.
4460b57cec5SDimitry Andric   AddOptionalDefs(
447bdd1243dSDimitry Andric       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), DestReg)
4480b57cec5SDimitry Andric           .addConstantPoolIndex(Idx)
4490b57cec5SDimitry Andric           .addReg(0));
4500b57cec5SDimitry Andric   return DestReg;
4510b57cec5SDimitry Andric }
4520b57cec5SDimitry Andric 
4530b57cec5SDimitry Andric unsigned ARMFastISel::ARMMaterializeInt(const Constant *C, MVT VT) {
4540b57cec5SDimitry Andric   if (VT != MVT::i32 && VT != MVT::i16 && VT != MVT::i8 && VT != MVT::i1)
4550b57cec5SDimitry Andric     return 0;
4560b57cec5SDimitry Andric 
4570b57cec5SDimitry Andric   // If we can do this in a single instruction without a constant pool entry
4580b57cec5SDimitry Andric   // do so now.
4590b57cec5SDimitry Andric   const ConstantInt *CI = cast<ConstantInt>(C);
4600b57cec5SDimitry Andric   if (Subtarget->hasV6T2Ops() && isUInt<16>(CI->getZExtValue())) {
4610b57cec5SDimitry Andric     unsigned Opc = isThumb2 ? ARM::t2MOVi16 : ARM::MOVi16;
4620b57cec5SDimitry Andric     const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass :
4630b57cec5SDimitry Andric       &ARM::GPRRegClass;
46404eeddc0SDimitry Andric     Register ImmReg = createResultReg(RC);
465bdd1243dSDimitry Andric     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
4660b57cec5SDimitry Andric                             TII.get(Opc), ImmReg)
4670b57cec5SDimitry Andric                     .addImm(CI->getZExtValue()));
4680b57cec5SDimitry Andric     return ImmReg;
4690b57cec5SDimitry Andric   }
4700b57cec5SDimitry Andric 
4710b57cec5SDimitry Andric   // Use MVN to emit negative constants.
4720b57cec5SDimitry Andric   if (VT == MVT::i32 && Subtarget->hasV6T2Ops() && CI->isNegative()) {
4730b57cec5SDimitry Andric     unsigned Imm = (unsigned)~(CI->getSExtValue());
4740b57cec5SDimitry Andric     bool UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) :
4750b57cec5SDimitry Andric       (ARM_AM::getSOImmVal(Imm) != -1);
4760b57cec5SDimitry Andric     if (UseImm) {
4770b57cec5SDimitry Andric       unsigned Opc = isThumb2 ? ARM::t2MVNi : ARM::MVNi;
4780b57cec5SDimitry Andric       const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass :
4790b57cec5SDimitry Andric                                                  &ARM::GPRRegClass;
48004eeddc0SDimitry Andric       Register ImmReg = createResultReg(RC);
481bdd1243dSDimitry Andric       AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
4820b57cec5SDimitry Andric                               TII.get(Opc), ImmReg)
4830b57cec5SDimitry Andric                       .addImm(Imm));
4840b57cec5SDimitry Andric       return ImmReg;
4850b57cec5SDimitry Andric     }
4860b57cec5SDimitry Andric   }
4870b57cec5SDimitry Andric 
4880b57cec5SDimitry Andric   unsigned ResultReg = 0;
4890b57cec5SDimitry Andric   if (Subtarget->useMovt())
4900b57cec5SDimitry Andric     ResultReg = fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
4910b57cec5SDimitry Andric 
4920b57cec5SDimitry Andric   if (ResultReg)
4930b57cec5SDimitry Andric     return ResultReg;
4940b57cec5SDimitry Andric 
4950b57cec5SDimitry Andric   // Load from constant pool.  For now 32-bit only.
4960b57cec5SDimitry Andric   if (VT != MVT::i32)
4970b57cec5SDimitry Andric     return 0;
4980b57cec5SDimitry Andric 
4990b57cec5SDimitry Andric   // MachineConstantPool wants an explicit alignment.
5005ffd83dbSDimitry Andric   Align Alignment = DL.getPrefTypeAlign(C->getType());
5015ffd83dbSDimitry Andric   unsigned Idx = MCP.getConstantPoolIndex(C, Alignment);
5020b57cec5SDimitry Andric   ResultReg = createResultReg(TLI.getRegClassFor(VT));
5030b57cec5SDimitry Andric   if (isThumb2)
504bdd1243dSDimitry Andric     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
5050b57cec5SDimitry Andric                             TII.get(ARM::t2LDRpci), ResultReg)
5060b57cec5SDimitry Andric                       .addConstantPoolIndex(Idx));
5070b57cec5SDimitry Andric   else {
5080b57cec5SDimitry Andric     // The extra immediate is for addrmode2.
5090b57cec5SDimitry Andric     ResultReg = constrainOperandRegClass(TII.get(ARM::LDRcp), ResultReg, 0);
510bdd1243dSDimitry Andric     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
5110b57cec5SDimitry Andric                             TII.get(ARM::LDRcp), ResultReg)
5120b57cec5SDimitry Andric                       .addConstantPoolIndex(Idx)
5130b57cec5SDimitry Andric                       .addImm(0));
5140b57cec5SDimitry Andric   }
5150b57cec5SDimitry Andric   return ResultReg;
5160b57cec5SDimitry Andric }
5170b57cec5SDimitry Andric 
5180b57cec5SDimitry Andric bool ARMFastISel::isPositionIndependent() const {
5190b57cec5SDimitry Andric   return TLI.isPositionIndependent();
5200b57cec5SDimitry Andric }
5210b57cec5SDimitry Andric 
5220b57cec5SDimitry Andric unsigned ARMFastISel::ARMMaterializeGV(const GlobalValue *GV, MVT VT) {
5230b57cec5SDimitry Andric   // For now 32-bit only.
5240b57cec5SDimitry Andric   if (VT != MVT::i32 || GV->isThreadLocal()) return 0;
5250b57cec5SDimitry Andric 
5260b57cec5SDimitry Andric   // ROPI/RWPI not currently supported.
5270b57cec5SDimitry Andric   if (Subtarget->isROPI() || Subtarget->isRWPI())
5280b57cec5SDimitry Andric     return 0;
5290b57cec5SDimitry Andric 
5300b57cec5SDimitry Andric   bool IsIndirect = Subtarget->isGVIndirectSymbol(GV);
5310b57cec5SDimitry Andric   const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass
5320b57cec5SDimitry Andric                                            : &ARM::GPRRegClass;
53304eeddc0SDimitry Andric   Register DestReg = createResultReg(RC);
5340b57cec5SDimitry Andric 
5350b57cec5SDimitry Andric   // FastISel TLS support on non-MachO is broken, punt to SelectionDAG.
5360b57cec5SDimitry Andric   const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
5370b57cec5SDimitry Andric   bool IsThreadLocal = GVar && GVar->isThreadLocal();
5380b57cec5SDimitry Andric   if (!Subtarget->isTargetMachO() && IsThreadLocal) return 0;
5390b57cec5SDimitry Andric 
5400b57cec5SDimitry Andric   bool IsPositionIndependent = isPositionIndependent();
5410b57cec5SDimitry Andric   // Use movw+movt when possible, it avoids constant pool entries.
5420b57cec5SDimitry Andric   // Non-darwin targets only support static movt relocations in FastISel.
5430b57cec5SDimitry Andric   if (Subtarget->useMovt() &&
5440b57cec5SDimitry Andric       (Subtarget->isTargetMachO() || !IsPositionIndependent)) {
5450b57cec5SDimitry Andric     unsigned Opc;
5460b57cec5SDimitry Andric     unsigned char TF = 0;
5470b57cec5SDimitry Andric     if (Subtarget->isTargetMachO())
5480b57cec5SDimitry Andric       TF = ARMII::MO_NONLAZY;
5490b57cec5SDimitry Andric 
5500b57cec5SDimitry Andric     if (IsPositionIndependent)
5510b57cec5SDimitry Andric       Opc = isThumb2 ? ARM::t2MOV_ga_pcrel : ARM::MOV_ga_pcrel;
5520b57cec5SDimitry Andric     else
5530b57cec5SDimitry Andric       Opc = isThumb2 ? ARM::t2MOVi32imm : ARM::MOVi32imm;
554bdd1243dSDimitry Andric     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
5550b57cec5SDimitry Andric                             TII.get(Opc), DestReg).addGlobalAddress(GV, 0, TF));
5560b57cec5SDimitry Andric   } else {
5570b57cec5SDimitry Andric     // MachineConstantPool wants an explicit alignment.
5585ffd83dbSDimitry Andric     Align Alignment = DL.getPrefTypeAlign(GV->getType());
5590b57cec5SDimitry Andric 
5600b57cec5SDimitry Andric     if (Subtarget->isTargetELF() && IsPositionIndependent)
5615ffd83dbSDimitry Andric       return ARMLowerPICELF(GV, VT);
5620b57cec5SDimitry Andric 
5630b57cec5SDimitry Andric     // Grab index.
5640b57cec5SDimitry Andric     unsigned PCAdj = IsPositionIndependent ? (Subtarget->isThumb() ? 4 : 8) : 0;
5650b57cec5SDimitry Andric     unsigned Id = AFI->createPICLabelUId();
5660b57cec5SDimitry Andric     ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(GV, Id,
5670b57cec5SDimitry Andric                                                                 ARMCP::CPValue,
5680b57cec5SDimitry Andric                                                                 PCAdj);
5695ffd83dbSDimitry Andric     unsigned Idx = MCP.getConstantPoolIndex(CPV, Alignment);
5700b57cec5SDimitry Andric 
5710b57cec5SDimitry Andric     // Load value.
5720b57cec5SDimitry Andric     MachineInstrBuilder MIB;
5730b57cec5SDimitry Andric     if (isThumb2) {
5740b57cec5SDimitry Andric       unsigned Opc = IsPositionIndependent ? ARM::t2LDRpci_pic : ARM::t2LDRpci;
575bdd1243dSDimitry Andric       MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc),
5760b57cec5SDimitry Andric                     DestReg).addConstantPoolIndex(Idx);
5770b57cec5SDimitry Andric       if (IsPositionIndependent)
5780b57cec5SDimitry Andric         MIB.addImm(Id);
5790b57cec5SDimitry Andric       AddOptionalDefs(MIB);
5800b57cec5SDimitry Andric     } else {
5810b57cec5SDimitry Andric       // The extra immediate is for addrmode2.
5820b57cec5SDimitry Andric       DestReg = constrainOperandRegClass(TII.get(ARM::LDRcp), DestReg, 0);
583bdd1243dSDimitry Andric       MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
5840b57cec5SDimitry Andric                     TII.get(ARM::LDRcp), DestReg)
5850b57cec5SDimitry Andric                 .addConstantPoolIndex(Idx)
5860b57cec5SDimitry Andric                 .addImm(0);
5870b57cec5SDimitry Andric       AddOptionalDefs(MIB);
5880b57cec5SDimitry Andric 
5890b57cec5SDimitry Andric       if (IsPositionIndependent) {
5900b57cec5SDimitry Andric         unsigned Opc = IsIndirect ? ARM::PICLDR : ARM::PICADD;
59104eeddc0SDimitry Andric         Register NewDestReg = createResultReg(TLI.getRegClassFor(VT));
5920b57cec5SDimitry Andric 
5930b57cec5SDimitry Andric         MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
594bdd1243dSDimitry Andric                                           MIMD, TII.get(Opc), NewDestReg)
5950b57cec5SDimitry Andric                                   .addReg(DestReg)
5960b57cec5SDimitry Andric                                   .addImm(Id);
5970b57cec5SDimitry Andric         AddOptionalDefs(MIB);
5980b57cec5SDimitry Andric         return NewDestReg;
5990b57cec5SDimitry Andric       }
6000b57cec5SDimitry Andric     }
6010b57cec5SDimitry Andric   }
6020b57cec5SDimitry Andric 
603e8d8bef9SDimitry Andric   if ((Subtarget->isTargetELF() && Subtarget->isGVInGOT(GV)) ||
60481ad6265SDimitry Andric       (Subtarget->isTargetMachO() && IsIndirect)) {
6050b57cec5SDimitry Andric     MachineInstrBuilder MIB;
60604eeddc0SDimitry Andric     Register NewDestReg = createResultReg(TLI.getRegClassFor(VT));
6070b57cec5SDimitry Andric     if (isThumb2)
608bdd1243dSDimitry Andric       MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
6090b57cec5SDimitry Andric                     TII.get(ARM::t2LDRi12), NewDestReg)
6100b57cec5SDimitry Andric             .addReg(DestReg)
6110b57cec5SDimitry Andric             .addImm(0);
6120b57cec5SDimitry Andric     else
613bdd1243dSDimitry Andric       MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
6140b57cec5SDimitry Andric                     TII.get(ARM::LDRi12), NewDestReg)
6150b57cec5SDimitry Andric                 .addReg(DestReg)
6160b57cec5SDimitry Andric                 .addImm(0);
6170b57cec5SDimitry Andric     DestReg = NewDestReg;
6180b57cec5SDimitry Andric     AddOptionalDefs(MIB);
6190b57cec5SDimitry Andric   }
6200b57cec5SDimitry Andric 
6210b57cec5SDimitry Andric   return DestReg;
6220b57cec5SDimitry Andric }
6230b57cec5SDimitry Andric 
6240b57cec5SDimitry Andric unsigned ARMFastISel::fastMaterializeConstant(const Constant *C) {
6250b57cec5SDimitry Andric   EVT CEVT = TLI.getValueType(DL, C->getType(), true);
6260b57cec5SDimitry Andric 
6270b57cec5SDimitry Andric   // Only handle simple types.
6280b57cec5SDimitry Andric   if (!CEVT.isSimple()) return 0;
6290b57cec5SDimitry Andric   MVT VT = CEVT.getSimpleVT();
6300b57cec5SDimitry Andric 
6310b57cec5SDimitry Andric   if (const ConstantFP *CFP = dyn_cast<ConstantFP>(C))
6320b57cec5SDimitry Andric     return ARMMaterializeFP(CFP, VT);
6330b57cec5SDimitry Andric   else if (const GlobalValue *GV = dyn_cast<GlobalValue>(C))
6340b57cec5SDimitry Andric     return ARMMaterializeGV(GV, VT);
6350b57cec5SDimitry Andric   else if (isa<ConstantInt>(C))
6360b57cec5SDimitry Andric     return ARMMaterializeInt(C, VT);
6370b57cec5SDimitry Andric 
6380b57cec5SDimitry Andric   return 0;
6390b57cec5SDimitry Andric }
6400b57cec5SDimitry Andric 
6410b57cec5SDimitry Andric // TODO: unsigned ARMFastISel::TargetMaterializeFloatZero(const ConstantFP *CF);
6420b57cec5SDimitry Andric 
6430b57cec5SDimitry Andric unsigned ARMFastISel::fastMaterializeAlloca(const AllocaInst *AI) {
6440b57cec5SDimitry Andric   // Don't handle dynamic allocas.
6450b57cec5SDimitry Andric   if (!FuncInfo.StaticAllocaMap.count(AI)) return 0;
6460b57cec5SDimitry Andric 
6470b57cec5SDimitry Andric   MVT VT;
6480b57cec5SDimitry Andric   if (!isLoadTypeLegal(AI->getType(), VT)) return 0;
6490b57cec5SDimitry Andric 
6500b57cec5SDimitry Andric   DenseMap<const AllocaInst*, int>::iterator SI =
6510b57cec5SDimitry Andric     FuncInfo.StaticAllocaMap.find(AI);
6520b57cec5SDimitry Andric 
6530b57cec5SDimitry Andric   // This will get lowered later into the correct offsets and registers
6540b57cec5SDimitry Andric   // via rewriteXFrameIndex.
6550b57cec5SDimitry Andric   if (SI != FuncInfo.StaticAllocaMap.end()) {
6560b57cec5SDimitry Andric     unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;
6570b57cec5SDimitry Andric     const TargetRegisterClass* RC = TLI.getRegClassFor(VT);
65804eeddc0SDimitry Andric     Register ResultReg = createResultReg(RC);
6590b57cec5SDimitry Andric     ResultReg = constrainOperandRegClass(TII.get(Opc), ResultReg, 0);
6600b57cec5SDimitry Andric 
661bdd1243dSDimitry Andric     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
6620b57cec5SDimitry Andric                             TII.get(Opc), ResultReg)
6630b57cec5SDimitry Andric                             .addFrameIndex(SI->second)
6640b57cec5SDimitry Andric                             .addImm(0));
6650b57cec5SDimitry Andric     return ResultReg;
6660b57cec5SDimitry Andric   }
6670b57cec5SDimitry Andric 
6680b57cec5SDimitry Andric   return 0;
6690b57cec5SDimitry Andric }
6700b57cec5SDimitry Andric 
6710b57cec5SDimitry Andric bool ARMFastISel::isTypeLegal(Type *Ty, MVT &VT) {
6720b57cec5SDimitry Andric   EVT evt = TLI.getValueType(DL, Ty, true);
6730b57cec5SDimitry Andric 
6740b57cec5SDimitry Andric   // Only handle simple types.
6750b57cec5SDimitry Andric   if (evt == MVT::Other || !evt.isSimple()) return false;
6760b57cec5SDimitry Andric   VT = evt.getSimpleVT();
6770b57cec5SDimitry Andric 
6780b57cec5SDimitry Andric   // Handle all legal types, i.e. a register that will directly hold this
6790b57cec5SDimitry Andric   // value.
6800b57cec5SDimitry Andric   return TLI.isTypeLegal(VT);
6810b57cec5SDimitry Andric }
6820b57cec5SDimitry Andric 
6830b57cec5SDimitry Andric bool ARMFastISel::isLoadTypeLegal(Type *Ty, MVT &VT) {
6840b57cec5SDimitry Andric   if (isTypeLegal(Ty, VT)) return true;
6850b57cec5SDimitry Andric 
6860b57cec5SDimitry Andric   // If this is a type than can be sign or zero-extended to a basic operation
6870b57cec5SDimitry Andric   // go ahead and accept it now.
6880b57cec5SDimitry Andric   if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
6890b57cec5SDimitry Andric     return true;
6900b57cec5SDimitry Andric 
6910b57cec5SDimitry Andric   return false;
6920b57cec5SDimitry Andric }
6930b57cec5SDimitry Andric 
6940b57cec5SDimitry Andric // Computes the address to get to an object.
6950b57cec5SDimitry Andric bool ARMFastISel::ARMComputeAddress(const Value *Obj, Address &Addr) {
6960b57cec5SDimitry Andric   // Some boilerplate from the X86 FastISel.
6970b57cec5SDimitry Andric   const User *U = nullptr;
6980b57cec5SDimitry Andric   unsigned Opcode = Instruction::UserOp1;
6990b57cec5SDimitry Andric   if (const Instruction *I = dyn_cast<Instruction>(Obj)) {
7000b57cec5SDimitry Andric     // Don't walk into other basic blocks unless the object is an alloca from
7010b57cec5SDimitry Andric     // another block, otherwise it may not have a virtual register assigned.
7020b57cec5SDimitry Andric     if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) ||
7030b57cec5SDimitry Andric         FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) {
7040b57cec5SDimitry Andric       Opcode = I->getOpcode();
7050b57cec5SDimitry Andric       U = I;
7060b57cec5SDimitry Andric     }
7070b57cec5SDimitry Andric   } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) {
7080b57cec5SDimitry Andric     Opcode = C->getOpcode();
7090b57cec5SDimitry Andric     U = C;
7100b57cec5SDimitry Andric   }
7110b57cec5SDimitry Andric 
7120b57cec5SDimitry Andric   if (PointerType *Ty = dyn_cast<PointerType>(Obj->getType()))
7130b57cec5SDimitry Andric     if (Ty->getAddressSpace() > 255)
7140b57cec5SDimitry Andric       // Fast instruction selection doesn't support the special
7150b57cec5SDimitry Andric       // address spaces.
7160b57cec5SDimitry Andric       return false;
7170b57cec5SDimitry Andric 
7180b57cec5SDimitry Andric   switch (Opcode) {
7190b57cec5SDimitry Andric     default:
7200b57cec5SDimitry Andric     break;
7210b57cec5SDimitry Andric     case Instruction::BitCast:
7220b57cec5SDimitry Andric       // Look through bitcasts.
7230b57cec5SDimitry Andric       return ARMComputeAddress(U->getOperand(0), Addr);
7240b57cec5SDimitry Andric     case Instruction::IntToPtr:
7250b57cec5SDimitry Andric       // Look past no-op inttoptrs.
7260b57cec5SDimitry Andric       if (TLI.getValueType(DL, U->getOperand(0)->getType()) ==
7270b57cec5SDimitry Andric           TLI.getPointerTy(DL))
7280b57cec5SDimitry Andric         return ARMComputeAddress(U->getOperand(0), Addr);
7290b57cec5SDimitry Andric       break;
7300b57cec5SDimitry Andric     case Instruction::PtrToInt:
7310b57cec5SDimitry Andric       // Look past no-op ptrtoints.
7320b57cec5SDimitry Andric       if (TLI.getValueType(DL, U->getType()) == TLI.getPointerTy(DL))
7330b57cec5SDimitry Andric         return ARMComputeAddress(U->getOperand(0), Addr);
7340b57cec5SDimitry Andric       break;
7350b57cec5SDimitry Andric     case Instruction::GetElementPtr: {
7360b57cec5SDimitry Andric       Address SavedAddr = Addr;
7370b57cec5SDimitry Andric       int TmpOffset = Addr.Offset;
7380b57cec5SDimitry Andric 
7390b57cec5SDimitry Andric       // Iterate through the GEP folding the constants into offsets where
7400b57cec5SDimitry Andric       // we can.
7410b57cec5SDimitry Andric       gep_type_iterator GTI = gep_type_begin(U);
7420b57cec5SDimitry Andric       for (User::const_op_iterator i = U->op_begin() + 1, e = U->op_end();
7430b57cec5SDimitry Andric            i != e; ++i, ++GTI) {
7440b57cec5SDimitry Andric         const Value *Op = *i;
7450b57cec5SDimitry Andric         if (StructType *STy = GTI.getStructTypeOrNull()) {
7460b57cec5SDimitry Andric           const StructLayout *SL = DL.getStructLayout(STy);
7470b57cec5SDimitry Andric           unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
7480b57cec5SDimitry Andric           TmpOffset += SL->getElementOffset(Idx);
7490b57cec5SDimitry Andric         } else {
7501db9f3b2SDimitry Andric           uint64_t S = GTI.getSequentialElementStride(DL);
7510b57cec5SDimitry Andric           while (true) {
7520b57cec5SDimitry Andric             if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
7530b57cec5SDimitry Andric               // Constant-offset addressing.
7540b57cec5SDimitry Andric               TmpOffset += CI->getSExtValue() * S;
7550b57cec5SDimitry Andric               break;
7560b57cec5SDimitry Andric             }
7570b57cec5SDimitry Andric             if (canFoldAddIntoGEP(U, Op)) {
7580b57cec5SDimitry Andric               // A compatible add with a constant operand. Fold the constant.
7590b57cec5SDimitry Andric               ConstantInt *CI =
7600b57cec5SDimitry Andric               cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));
7610b57cec5SDimitry Andric               TmpOffset += CI->getSExtValue() * S;
7620b57cec5SDimitry Andric               // Iterate on the other operand.
7630b57cec5SDimitry Andric               Op = cast<AddOperator>(Op)->getOperand(0);
7640b57cec5SDimitry Andric               continue;
7650b57cec5SDimitry Andric             }
7660b57cec5SDimitry Andric             // Unsupported
7670b57cec5SDimitry Andric             goto unsupported_gep;
7680b57cec5SDimitry Andric           }
7690b57cec5SDimitry Andric         }
7700b57cec5SDimitry Andric       }
7710b57cec5SDimitry Andric 
7720b57cec5SDimitry Andric       // Try to grab the base operand now.
7730b57cec5SDimitry Andric       Addr.Offset = TmpOffset;
7740b57cec5SDimitry Andric       if (ARMComputeAddress(U->getOperand(0), Addr)) return true;
7750b57cec5SDimitry Andric 
7760b57cec5SDimitry Andric       // We failed, restore everything and try the other options.
7770b57cec5SDimitry Andric       Addr = SavedAddr;
7780b57cec5SDimitry Andric 
7790b57cec5SDimitry Andric       unsupported_gep:
7800b57cec5SDimitry Andric       break;
7810b57cec5SDimitry Andric     }
7820b57cec5SDimitry Andric     case Instruction::Alloca: {
7830b57cec5SDimitry Andric       const AllocaInst *AI = cast<AllocaInst>(Obj);
7840b57cec5SDimitry Andric       DenseMap<const AllocaInst*, int>::iterator SI =
7850b57cec5SDimitry Andric         FuncInfo.StaticAllocaMap.find(AI);
7860b57cec5SDimitry Andric       if (SI != FuncInfo.StaticAllocaMap.end()) {
7870b57cec5SDimitry Andric         Addr.BaseType = Address::FrameIndexBase;
7880b57cec5SDimitry Andric         Addr.Base.FI = SI->second;
7890b57cec5SDimitry Andric         return true;
7900b57cec5SDimitry Andric       }
7910b57cec5SDimitry Andric       break;
7920b57cec5SDimitry Andric     }
7930b57cec5SDimitry Andric   }
7940b57cec5SDimitry Andric 
7950b57cec5SDimitry Andric   // Try to get this in a register if nothing else has worked.
7960b57cec5SDimitry Andric   if (Addr.Base.Reg == 0) Addr.Base.Reg = getRegForValue(Obj);
7970b57cec5SDimitry Andric   return Addr.Base.Reg != 0;
7980b57cec5SDimitry Andric }
7990b57cec5SDimitry Andric 
8000b57cec5SDimitry Andric void ARMFastISel::ARMSimplifyAddress(Address &Addr, MVT VT, bool useAM3) {
8010b57cec5SDimitry Andric   bool needsLowering = false;
8020b57cec5SDimitry Andric   switch (VT.SimpleTy) {
8030b57cec5SDimitry Andric     default: llvm_unreachable("Unhandled load/store type!");
8040b57cec5SDimitry Andric     case MVT::i1:
8050b57cec5SDimitry Andric     case MVT::i8:
8060b57cec5SDimitry Andric     case MVT::i16:
8070b57cec5SDimitry Andric     case MVT::i32:
8080b57cec5SDimitry Andric       if (!useAM3) {
8090b57cec5SDimitry Andric         // Integer loads/stores handle 12-bit offsets.
8100b57cec5SDimitry Andric         needsLowering = ((Addr.Offset & 0xfff) != Addr.Offset);
8110b57cec5SDimitry Andric         // Handle negative offsets.
8120b57cec5SDimitry Andric         if (needsLowering && isThumb2)
8130b57cec5SDimitry Andric           needsLowering = !(Subtarget->hasV6T2Ops() && Addr.Offset < 0 &&
8140b57cec5SDimitry Andric                             Addr.Offset > -256);
8150b57cec5SDimitry Andric       } else {
8160b57cec5SDimitry Andric         // ARM halfword load/stores and signed byte loads use +/-imm8 offsets.
8170b57cec5SDimitry Andric         needsLowering = (Addr.Offset > 255 || Addr.Offset < -255);
8180b57cec5SDimitry Andric       }
8190b57cec5SDimitry Andric       break;
8200b57cec5SDimitry Andric     case MVT::f32:
8210b57cec5SDimitry Andric     case MVT::f64:
8220b57cec5SDimitry Andric       // Floating point operands handle 8-bit offsets.
8230b57cec5SDimitry Andric       needsLowering = ((Addr.Offset & 0xff) != Addr.Offset);
8240b57cec5SDimitry Andric       break;
8250b57cec5SDimitry Andric   }
8260b57cec5SDimitry Andric 
8270b57cec5SDimitry Andric   // If this is a stack pointer and the offset needs to be simplified then
8280b57cec5SDimitry Andric   // put the alloca address into a register, set the base type back to
8290b57cec5SDimitry Andric   // register and continue. This should almost never happen.
8300b57cec5SDimitry Andric   if (needsLowering && Addr.BaseType == Address::FrameIndexBase) {
8310b57cec5SDimitry Andric     const TargetRegisterClass *RC = isThumb2 ? &ARM::tGPRRegClass
8320b57cec5SDimitry Andric                                              : &ARM::GPRRegClass;
83304eeddc0SDimitry Andric     Register ResultReg = createResultReg(RC);
8340b57cec5SDimitry Andric     unsigned Opc = isThumb2 ? ARM::t2ADDri : ARM::ADDri;
835bdd1243dSDimitry Andric     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
8360b57cec5SDimitry Andric                             TII.get(Opc), ResultReg)
8370b57cec5SDimitry Andric                             .addFrameIndex(Addr.Base.FI)
8380b57cec5SDimitry Andric                             .addImm(0));
8390b57cec5SDimitry Andric     Addr.Base.Reg = ResultReg;
8400b57cec5SDimitry Andric     Addr.BaseType = Address::RegBase;
8410b57cec5SDimitry Andric   }
8420b57cec5SDimitry Andric 
8430b57cec5SDimitry Andric   // Since the offset is too large for the load/store instruction
8440b57cec5SDimitry Andric   // get the reg+offset into a register.
8450b57cec5SDimitry Andric   if (needsLowering) {
8460b57cec5SDimitry Andric     Addr.Base.Reg = fastEmit_ri_(MVT::i32, ISD::ADD, Addr.Base.Reg,
847fe6060f1SDimitry Andric                                  Addr.Offset, MVT::i32);
8480b57cec5SDimitry Andric     Addr.Offset = 0;
8490b57cec5SDimitry Andric   }
8500b57cec5SDimitry Andric }
8510b57cec5SDimitry Andric 
8520b57cec5SDimitry Andric void ARMFastISel::AddLoadStoreOperands(MVT VT, Address &Addr,
8530b57cec5SDimitry Andric                                        const MachineInstrBuilder &MIB,
8540b57cec5SDimitry Andric                                        MachineMemOperand::Flags Flags,
8550b57cec5SDimitry Andric                                        bool useAM3) {
8560b57cec5SDimitry Andric   // addrmode5 output depends on the selection dag addressing dividing the
8570b57cec5SDimitry Andric   // offset by 4 that it then later multiplies. Do this here as well.
8580b57cec5SDimitry Andric   if (VT.SimpleTy == MVT::f32 || VT.SimpleTy == MVT::f64)
8590b57cec5SDimitry Andric     Addr.Offset /= 4;
8600b57cec5SDimitry Andric 
8610b57cec5SDimitry Andric   // Frame base works a bit differently. Handle it separately.
8620b57cec5SDimitry Andric   if (Addr.BaseType == Address::FrameIndexBase) {
8630b57cec5SDimitry Andric     int FI = Addr.Base.FI;
8640b57cec5SDimitry Andric     int Offset = Addr.Offset;
8650b57cec5SDimitry Andric     MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand(
8660b57cec5SDimitry Andric         MachinePointerInfo::getFixedStack(*FuncInfo.MF, FI, Offset), Flags,
8675ffd83dbSDimitry Andric         MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
8680b57cec5SDimitry Andric     // Now add the rest of the operands.
8690b57cec5SDimitry Andric     MIB.addFrameIndex(FI);
8700b57cec5SDimitry Andric 
8710b57cec5SDimitry Andric     // ARM halfword load/stores and signed byte loads need an additional
8720b57cec5SDimitry Andric     // operand.
8730b57cec5SDimitry Andric     if (useAM3) {
8740b57cec5SDimitry Andric       int Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset;
8750b57cec5SDimitry Andric       MIB.addReg(0);
8760b57cec5SDimitry Andric       MIB.addImm(Imm);
8770b57cec5SDimitry Andric     } else {
8780b57cec5SDimitry Andric       MIB.addImm(Addr.Offset);
8790b57cec5SDimitry Andric     }
8800b57cec5SDimitry Andric     MIB.addMemOperand(MMO);
8810b57cec5SDimitry Andric   } else {
8820b57cec5SDimitry Andric     // Now add the rest of the operands.
8830b57cec5SDimitry Andric     MIB.addReg(Addr.Base.Reg);
8840b57cec5SDimitry Andric 
8850b57cec5SDimitry Andric     // ARM halfword load/stores and signed byte loads need an additional
8860b57cec5SDimitry Andric     // operand.
8870b57cec5SDimitry Andric     if (useAM3) {
8880b57cec5SDimitry Andric       int Imm = (Addr.Offset < 0) ? (0x100 | -Addr.Offset) : Addr.Offset;
8890b57cec5SDimitry Andric       MIB.addReg(0);
8900b57cec5SDimitry Andric       MIB.addImm(Imm);
8910b57cec5SDimitry Andric     } else {
8920b57cec5SDimitry Andric       MIB.addImm(Addr.Offset);
8930b57cec5SDimitry Andric     }
8940b57cec5SDimitry Andric   }
8950b57cec5SDimitry Andric   AddOptionalDefs(MIB);
8960b57cec5SDimitry Andric }
8970b57cec5SDimitry Andric 
8988bcb0991SDimitry Andric bool ARMFastISel::ARMEmitLoad(MVT VT, Register &ResultReg, Address &Addr,
89981ad6265SDimitry Andric                               MaybeAlign Alignment, bool isZExt,
90081ad6265SDimitry Andric                               bool allocReg) {
9010b57cec5SDimitry Andric   unsigned Opc;
9020b57cec5SDimitry Andric   bool useAM3 = false;
9030b57cec5SDimitry Andric   bool needVMOV = false;
9040b57cec5SDimitry Andric   const TargetRegisterClass *RC;
9050b57cec5SDimitry Andric   switch (VT.SimpleTy) {
9060b57cec5SDimitry Andric     // This is mostly going to be Neon/vector support.
9070b57cec5SDimitry Andric     default: return false;
9080b57cec5SDimitry Andric     case MVT::i1:
9090b57cec5SDimitry Andric     case MVT::i8:
9100b57cec5SDimitry Andric       if (isThumb2) {
9110b57cec5SDimitry Andric         if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
9120b57cec5SDimitry Andric           Opc = isZExt ? ARM::t2LDRBi8 : ARM::t2LDRSBi8;
9130b57cec5SDimitry Andric         else
9140b57cec5SDimitry Andric           Opc = isZExt ? ARM::t2LDRBi12 : ARM::t2LDRSBi12;
9150b57cec5SDimitry Andric       } else {
9160b57cec5SDimitry Andric         if (isZExt) {
9170b57cec5SDimitry Andric           Opc = ARM::LDRBi12;
9180b57cec5SDimitry Andric         } else {
9190b57cec5SDimitry Andric           Opc = ARM::LDRSB;
9200b57cec5SDimitry Andric           useAM3 = true;
9210b57cec5SDimitry Andric         }
9220b57cec5SDimitry Andric       }
9230b57cec5SDimitry Andric       RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
9240b57cec5SDimitry Andric       break;
9250b57cec5SDimitry Andric     case MVT::i16:
92681ad6265SDimitry Andric       if (Alignment && *Alignment < Align(2) &&
92781ad6265SDimitry Andric           !Subtarget->allowsUnalignedMem())
9280b57cec5SDimitry Andric         return false;
9290b57cec5SDimitry Andric 
9300b57cec5SDimitry Andric       if (isThumb2) {
9310b57cec5SDimitry Andric         if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
9320b57cec5SDimitry Andric           Opc = isZExt ? ARM::t2LDRHi8 : ARM::t2LDRSHi8;
9330b57cec5SDimitry Andric         else
9340b57cec5SDimitry Andric           Opc = isZExt ? ARM::t2LDRHi12 : ARM::t2LDRSHi12;
9350b57cec5SDimitry Andric       } else {
9360b57cec5SDimitry Andric         Opc = isZExt ? ARM::LDRH : ARM::LDRSH;
9370b57cec5SDimitry Andric         useAM3 = true;
9380b57cec5SDimitry Andric       }
9390b57cec5SDimitry Andric       RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
9400b57cec5SDimitry Andric       break;
9410b57cec5SDimitry Andric     case MVT::i32:
94281ad6265SDimitry Andric       if (Alignment && *Alignment < Align(4) &&
94381ad6265SDimitry Andric           !Subtarget->allowsUnalignedMem())
9440b57cec5SDimitry Andric         return false;
9450b57cec5SDimitry Andric 
9460b57cec5SDimitry Andric       if (isThumb2) {
9470b57cec5SDimitry Andric         if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
9480b57cec5SDimitry Andric           Opc = ARM::t2LDRi8;
9490b57cec5SDimitry Andric         else
9500b57cec5SDimitry Andric           Opc = ARM::t2LDRi12;
9510b57cec5SDimitry Andric       } else {
9520b57cec5SDimitry Andric         Opc = ARM::LDRi12;
9530b57cec5SDimitry Andric       }
9540b57cec5SDimitry Andric       RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
9550b57cec5SDimitry Andric       break;
9560b57cec5SDimitry Andric     case MVT::f32:
9570b57cec5SDimitry Andric       if (!Subtarget->hasVFP2Base()) return false;
9580b57cec5SDimitry Andric       // Unaligned loads need special handling. Floats require word-alignment.
95981ad6265SDimitry Andric       if (Alignment && *Alignment < Align(4)) {
9600b57cec5SDimitry Andric         needVMOV = true;
9610b57cec5SDimitry Andric         VT = MVT::i32;
9620b57cec5SDimitry Andric         Opc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12;
9630b57cec5SDimitry Andric         RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRnopcRegClass;
9640b57cec5SDimitry Andric       } else {
9650b57cec5SDimitry Andric         Opc = ARM::VLDRS;
9660b57cec5SDimitry Andric         RC = TLI.getRegClassFor(VT);
9670b57cec5SDimitry Andric       }
9680b57cec5SDimitry Andric       break;
9690b57cec5SDimitry Andric     case MVT::f64:
9700b57cec5SDimitry Andric       // Can load and store double precision even without FeatureFP64
9710b57cec5SDimitry Andric       if (!Subtarget->hasVFP2Base()) return false;
9720b57cec5SDimitry Andric       // FIXME: Unaligned loads need special handling.  Doublewords require
9730b57cec5SDimitry Andric       // word-alignment.
97481ad6265SDimitry Andric       if (Alignment && *Alignment < Align(4))
9750b57cec5SDimitry Andric         return false;
9760b57cec5SDimitry Andric 
9770b57cec5SDimitry Andric       Opc = ARM::VLDRD;
9780b57cec5SDimitry Andric       RC = TLI.getRegClassFor(VT);
9790b57cec5SDimitry Andric       break;
9800b57cec5SDimitry Andric   }
9810b57cec5SDimitry Andric   // Simplify this down to something we can handle.
9820b57cec5SDimitry Andric   ARMSimplifyAddress(Addr, VT, useAM3);
9830b57cec5SDimitry Andric 
9840b57cec5SDimitry Andric   // Create the base instruction, then add the operands.
9850b57cec5SDimitry Andric   if (allocReg)
9860b57cec5SDimitry Andric     ResultReg = createResultReg(RC);
9870b57cec5SDimitry Andric   assert(ResultReg > 255 && "Expected an allocated virtual register.");
988bdd1243dSDimitry Andric   MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
9890b57cec5SDimitry Andric                                     TII.get(Opc), ResultReg);
9900b57cec5SDimitry Andric   AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOLoad, useAM3);
9910b57cec5SDimitry Andric 
9920b57cec5SDimitry Andric   // If we had an unaligned load of a float we've converted it to an regular
9930b57cec5SDimitry Andric   // load.  Now we must move from the GRP to the FP register.
9940b57cec5SDimitry Andric   if (needVMOV) {
99504eeddc0SDimitry Andric     Register MoveReg = createResultReg(TLI.getRegClassFor(MVT::f32));
996bdd1243dSDimitry Andric     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
9970b57cec5SDimitry Andric                             TII.get(ARM::VMOVSR), MoveReg)
9980b57cec5SDimitry Andric                     .addReg(ResultReg));
9990b57cec5SDimitry Andric     ResultReg = MoveReg;
10000b57cec5SDimitry Andric   }
10010b57cec5SDimitry Andric   return true;
10020b57cec5SDimitry Andric }
10030b57cec5SDimitry Andric 
10040b57cec5SDimitry Andric bool ARMFastISel::SelectLoad(const Instruction *I) {
10050b57cec5SDimitry Andric   // Atomic loads need special handling.
10060b57cec5SDimitry Andric   if (cast<LoadInst>(I)->isAtomic())
10070b57cec5SDimitry Andric     return false;
10080b57cec5SDimitry Andric 
10090b57cec5SDimitry Andric   const Value *SV = I->getOperand(0);
10100b57cec5SDimitry Andric   if (TLI.supportSwiftError()) {
10110b57cec5SDimitry Andric     // Swifterror values can come from either a function parameter with
10120b57cec5SDimitry Andric     // swifterror attribute or an alloca with swifterror attribute.
10130b57cec5SDimitry Andric     if (const Argument *Arg = dyn_cast<Argument>(SV)) {
10140b57cec5SDimitry Andric       if (Arg->hasSwiftErrorAttr())
10150b57cec5SDimitry Andric         return false;
10160b57cec5SDimitry Andric     }
10170b57cec5SDimitry Andric 
10180b57cec5SDimitry Andric     if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(SV)) {
10190b57cec5SDimitry Andric       if (Alloca->isSwiftError())
10200b57cec5SDimitry Andric         return false;
10210b57cec5SDimitry Andric     }
10220b57cec5SDimitry Andric   }
10230b57cec5SDimitry Andric 
10240b57cec5SDimitry Andric   // Verify we have a legal type before going any further.
10250b57cec5SDimitry Andric   MVT VT;
10260b57cec5SDimitry Andric   if (!isLoadTypeLegal(I->getType(), VT))
10270b57cec5SDimitry Andric     return false;
10280b57cec5SDimitry Andric 
10290b57cec5SDimitry Andric   // See if we can handle this address.
10300b57cec5SDimitry Andric   Address Addr;
10310b57cec5SDimitry Andric   if (!ARMComputeAddress(I->getOperand(0), Addr)) return false;
10320b57cec5SDimitry Andric 
10338bcb0991SDimitry Andric   Register ResultReg;
103481ad6265SDimitry Andric   if (!ARMEmitLoad(VT, ResultReg, Addr, cast<LoadInst>(I)->getAlign()))
10350b57cec5SDimitry Andric     return false;
10360b57cec5SDimitry Andric   updateValueMap(I, ResultReg);
10370b57cec5SDimitry Andric   return true;
10380b57cec5SDimitry Andric }
10390b57cec5SDimitry Andric 
10400b57cec5SDimitry Andric bool ARMFastISel::ARMEmitStore(MVT VT, unsigned SrcReg, Address &Addr,
104181ad6265SDimitry Andric                                MaybeAlign Alignment) {
10420b57cec5SDimitry Andric   unsigned StrOpc;
10430b57cec5SDimitry Andric   bool useAM3 = false;
10440b57cec5SDimitry Andric   switch (VT.SimpleTy) {
10450b57cec5SDimitry Andric     // This is mostly going to be Neon/vector support.
10460b57cec5SDimitry Andric     default: return false;
10470b57cec5SDimitry Andric     case MVT::i1: {
104804eeddc0SDimitry Andric       Register Res = createResultReg(isThumb2 ? &ARM::tGPRRegClass
10490b57cec5SDimitry Andric                                               : &ARM::GPRRegClass);
10500b57cec5SDimitry Andric       unsigned Opc = isThumb2 ? ARM::t2ANDri : ARM::ANDri;
10510b57cec5SDimitry Andric       SrcReg = constrainOperandRegClass(TII.get(Opc), SrcReg, 1);
1052bdd1243dSDimitry Andric       AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
10530b57cec5SDimitry Andric                               TII.get(Opc), Res)
10540b57cec5SDimitry Andric                       .addReg(SrcReg).addImm(1));
10550b57cec5SDimitry Andric       SrcReg = Res;
1056bdd1243dSDimitry Andric       [[fallthrough]];
10570b57cec5SDimitry Andric     }
10580b57cec5SDimitry Andric     case MVT::i8:
10590b57cec5SDimitry Andric       if (isThumb2) {
10600b57cec5SDimitry Andric         if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
10610b57cec5SDimitry Andric           StrOpc = ARM::t2STRBi8;
10620b57cec5SDimitry Andric         else
10630b57cec5SDimitry Andric           StrOpc = ARM::t2STRBi12;
10640b57cec5SDimitry Andric       } else {
10650b57cec5SDimitry Andric         StrOpc = ARM::STRBi12;
10660b57cec5SDimitry Andric       }
10670b57cec5SDimitry Andric       break;
10680b57cec5SDimitry Andric     case MVT::i16:
106981ad6265SDimitry Andric       if (Alignment && *Alignment < Align(2) &&
107081ad6265SDimitry Andric           !Subtarget->allowsUnalignedMem())
10710b57cec5SDimitry Andric         return false;
10720b57cec5SDimitry Andric 
10730b57cec5SDimitry Andric       if (isThumb2) {
10740b57cec5SDimitry Andric         if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
10750b57cec5SDimitry Andric           StrOpc = ARM::t2STRHi8;
10760b57cec5SDimitry Andric         else
10770b57cec5SDimitry Andric           StrOpc = ARM::t2STRHi12;
10780b57cec5SDimitry Andric       } else {
10790b57cec5SDimitry Andric         StrOpc = ARM::STRH;
10800b57cec5SDimitry Andric         useAM3 = true;
10810b57cec5SDimitry Andric       }
10820b57cec5SDimitry Andric       break;
10830b57cec5SDimitry Andric     case MVT::i32:
108481ad6265SDimitry Andric       if (Alignment && *Alignment < Align(4) &&
108581ad6265SDimitry Andric           !Subtarget->allowsUnalignedMem())
10860b57cec5SDimitry Andric         return false;
10870b57cec5SDimitry Andric 
10880b57cec5SDimitry Andric       if (isThumb2) {
10890b57cec5SDimitry Andric         if (Addr.Offset < 0 && Addr.Offset > -256 && Subtarget->hasV6T2Ops())
10900b57cec5SDimitry Andric           StrOpc = ARM::t2STRi8;
10910b57cec5SDimitry Andric         else
10920b57cec5SDimitry Andric           StrOpc = ARM::t2STRi12;
10930b57cec5SDimitry Andric       } else {
10940b57cec5SDimitry Andric         StrOpc = ARM::STRi12;
10950b57cec5SDimitry Andric       }
10960b57cec5SDimitry Andric       break;
10970b57cec5SDimitry Andric     case MVT::f32:
10980b57cec5SDimitry Andric       if (!Subtarget->hasVFP2Base()) return false;
10990b57cec5SDimitry Andric       // Unaligned stores need special handling. Floats require word-alignment.
110081ad6265SDimitry Andric       if (Alignment && *Alignment < Align(4)) {
110104eeddc0SDimitry Andric         Register MoveReg = createResultReg(TLI.getRegClassFor(MVT::i32));
1102bdd1243dSDimitry Andric         AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
11030b57cec5SDimitry Andric                                 TII.get(ARM::VMOVRS), MoveReg)
11040b57cec5SDimitry Andric                         .addReg(SrcReg));
11050b57cec5SDimitry Andric         SrcReg = MoveReg;
11060b57cec5SDimitry Andric         VT = MVT::i32;
11070b57cec5SDimitry Andric         StrOpc = isThumb2 ? ARM::t2STRi12 : ARM::STRi12;
11080b57cec5SDimitry Andric       } else {
11090b57cec5SDimitry Andric         StrOpc = ARM::VSTRS;
11100b57cec5SDimitry Andric       }
11110b57cec5SDimitry Andric       break;
11120b57cec5SDimitry Andric     case MVT::f64:
11130b57cec5SDimitry Andric       // Can load and store double precision even without FeatureFP64
11140b57cec5SDimitry Andric       if (!Subtarget->hasVFP2Base()) return false;
11150b57cec5SDimitry Andric       // FIXME: Unaligned stores need special handling.  Doublewords require
11160b57cec5SDimitry Andric       // word-alignment.
111781ad6265SDimitry Andric       if (Alignment && *Alignment < Align(4))
11180b57cec5SDimitry Andric         return false;
11190b57cec5SDimitry Andric 
11200b57cec5SDimitry Andric       StrOpc = ARM::VSTRD;
11210b57cec5SDimitry Andric       break;
11220b57cec5SDimitry Andric   }
11230b57cec5SDimitry Andric   // Simplify this down to something we can handle.
11240b57cec5SDimitry Andric   ARMSimplifyAddress(Addr, VT, useAM3);
11250b57cec5SDimitry Andric 
11260b57cec5SDimitry Andric   // Create the base instruction, then add the operands.
11270b57cec5SDimitry Andric   SrcReg = constrainOperandRegClass(TII.get(StrOpc), SrcReg, 0);
1128bdd1243dSDimitry Andric   MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
11290b57cec5SDimitry Andric                                     TII.get(StrOpc))
11300b57cec5SDimitry Andric                             .addReg(SrcReg);
11310b57cec5SDimitry Andric   AddLoadStoreOperands(VT, Addr, MIB, MachineMemOperand::MOStore, useAM3);
11320b57cec5SDimitry Andric   return true;
11330b57cec5SDimitry Andric }
11340b57cec5SDimitry Andric 
11350b57cec5SDimitry Andric bool ARMFastISel::SelectStore(const Instruction *I) {
11360b57cec5SDimitry Andric   Value *Op0 = I->getOperand(0);
11370b57cec5SDimitry Andric   unsigned SrcReg = 0;
11380b57cec5SDimitry Andric 
11390b57cec5SDimitry Andric   // Atomic stores need special handling.
11400b57cec5SDimitry Andric   if (cast<StoreInst>(I)->isAtomic())
11410b57cec5SDimitry Andric     return false;
11420b57cec5SDimitry Andric 
11430b57cec5SDimitry Andric   const Value *PtrV = I->getOperand(1);
11440b57cec5SDimitry Andric   if (TLI.supportSwiftError()) {
11450b57cec5SDimitry Andric     // Swifterror values can come from either a function parameter with
11460b57cec5SDimitry Andric     // swifterror attribute or an alloca with swifterror attribute.
11470b57cec5SDimitry Andric     if (const Argument *Arg = dyn_cast<Argument>(PtrV)) {
11480b57cec5SDimitry Andric       if (Arg->hasSwiftErrorAttr())
11490b57cec5SDimitry Andric         return false;
11500b57cec5SDimitry Andric     }
11510b57cec5SDimitry Andric 
11520b57cec5SDimitry Andric     if (const AllocaInst *Alloca = dyn_cast<AllocaInst>(PtrV)) {
11530b57cec5SDimitry Andric       if (Alloca->isSwiftError())
11540b57cec5SDimitry Andric         return false;
11550b57cec5SDimitry Andric     }
11560b57cec5SDimitry Andric   }
11570b57cec5SDimitry Andric 
11580b57cec5SDimitry Andric   // Verify we have a legal type before going any further.
11590b57cec5SDimitry Andric   MVT VT;
11600b57cec5SDimitry Andric   if (!isLoadTypeLegal(I->getOperand(0)->getType(), VT))
11610b57cec5SDimitry Andric     return false;
11620b57cec5SDimitry Andric 
11630b57cec5SDimitry Andric   // Get the value to be stored into a register.
11640b57cec5SDimitry Andric   SrcReg = getRegForValue(Op0);
11650b57cec5SDimitry Andric   if (SrcReg == 0) return false;
11660b57cec5SDimitry Andric 
11670b57cec5SDimitry Andric   // See if we can handle this address.
11680b57cec5SDimitry Andric   Address Addr;
11690b57cec5SDimitry Andric   if (!ARMComputeAddress(I->getOperand(1), Addr))
11700b57cec5SDimitry Andric     return false;
11710b57cec5SDimitry Andric 
117281ad6265SDimitry Andric   if (!ARMEmitStore(VT, SrcReg, Addr, cast<StoreInst>(I)->getAlign()))
11730b57cec5SDimitry Andric     return false;
11740b57cec5SDimitry Andric   return true;
11750b57cec5SDimitry Andric }
11760b57cec5SDimitry Andric 
11770b57cec5SDimitry Andric static ARMCC::CondCodes getComparePred(CmpInst::Predicate Pred) {
11780b57cec5SDimitry Andric   switch (Pred) {
11790b57cec5SDimitry Andric     // Needs two compares...
11800b57cec5SDimitry Andric     case CmpInst::FCMP_ONE:
11810b57cec5SDimitry Andric     case CmpInst::FCMP_UEQ:
11820b57cec5SDimitry Andric     default:
11830b57cec5SDimitry Andric       // AL is our "false" for now. The other two need more compares.
11840b57cec5SDimitry Andric       return ARMCC::AL;
11850b57cec5SDimitry Andric     case CmpInst::ICMP_EQ:
11860b57cec5SDimitry Andric     case CmpInst::FCMP_OEQ:
11870b57cec5SDimitry Andric       return ARMCC::EQ;
11880b57cec5SDimitry Andric     case CmpInst::ICMP_SGT:
11890b57cec5SDimitry Andric     case CmpInst::FCMP_OGT:
11900b57cec5SDimitry Andric       return ARMCC::GT;
11910b57cec5SDimitry Andric     case CmpInst::ICMP_SGE:
11920b57cec5SDimitry Andric     case CmpInst::FCMP_OGE:
11930b57cec5SDimitry Andric       return ARMCC::GE;
11940b57cec5SDimitry Andric     case CmpInst::ICMP_UGT:
11950b57cec5SDimitry Andric     case CmpInst::FCMP_UGT:
11960b57cec5SDimitry Andric       return ARMCC::HI;
11970b57cec5SDimitry Andric     case CmpInst::FCMP_OLT:
11980b57cec5SDimitry Andric       return ARMCC::MI;
11990b57cec5SDimitry Andric     case CmpInst::ICMP_ULE:
12000b57cec5SDimitry Andric     case CmpInst::FCMP_OLE:
12010b57cec5SDimitry Andric       return ARMCC::LS;
12020b57cec5SDimitry Andric     case CmpInst::FCMP_ORD:
12030b57cec5SDimitry Andric       return ARMCC::VC;
12040b57cec5SDimitry Andric     case CmpInst::FCMP_UNO:
12050b57cec5SDimitry Andric       return ARMCC::VS;
12060b57cec5SDimitry Andric     case CmpInst::FCMP_UGE:
12070b57cec5SDimitry Andric       return ARMCC::PL;
12080b57cec5SDimitry Andric     case CmpInst::ICMP_SLT:
12090b57cec5SDimitry Andric     case CmpInst::FCMP_ULT:
12100b57cec5SDimitry Andric       return ARMCC::LT;
12110b57cec5SDimitry Andric     case CmpInst::ICMP_SLE:
12120b57cec5SDimitry Andric     case CmpInst::FCMP_ULE:
12130b57cec5SDimitry Andric       return ARMCC::LE;
12140b57cec5SDimitry Andric     case CmpInst::FCMP_UNE:
12150b57cec5SDimitry Andric     case CmpInst::ICMP_NE:
12160b57cec5SDimitry Andric       return ARMCC::NE;
12170b57cec5SDimitry Andric     case CmpInst::ICMP_UGE:
12180b57cec5SDimitry Andric       return ARMCC::HS;
12190b57cec5SDimitry Andric     case CmpInst::ICMP_ULT:
12200b57cec5SDimitry Andric       return ARMCC::LO;
12210b57cec5SDimitry Andric   }
12220b57cec5SDimitry Andric }
12230b57cec5SDimitry Andric 
12240b57cec5SDimitry Andric bool ARMFastISel::SelectBranch(const Instruction *I) {
12250b57cec5SDimitry Andric   const BranchInst *BI = cast<BranchInst>(I);
12260b57cec5SDimitry Andric   MachineBasicBlock *TBB = FuncInfo.MBBMap[BI->getSuccessor(0)];
12270b57cec5SDimitry Andric   MachineBasicBlock *FBB = FuncInfo.MBBMap[BI->getSuccessor(1)];
12280b57cec5SDimitry Andric 
12290b57cec5SDimitry Andric   // Simple branch support.
12300b57cec5SDimitry Andric 
12310b57cec5SDimitry Andric   // If we can, avoid recomputing the compare - redoing it could lead to wonky
12320b57cec5SDimitry Andric   // behavior.
12330b57cec5SDimitry Andric   if (const CmpInst *CI = dyn_cast<CmpInst>(BI->getCondition())) {
12340b57cec5SDimitry Andric     if (CI->hasOneUse() && (CI->getParent() == I->getParent())) {
12350b57cec5SDimitry Andric       // Get the compare predicate.
12360b57cec5SDimitry Andric       // Try to take advantage of fallthrough opportunities.
12370b57cec5SDimitry Andric       CmpInst::Predicate Predicate = CI->getPredicate();
12380b57cec5SDimitry Andric       if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
12390b57cec5SDimitry Andric         std::swap(TBB, FBB);
12400b57cec5SDimitry Andric         Predicate = CmpInst::getInversePredicate(Predicate);
12410b57cec5SDimitry Andric       }
12420b57cec5SDimitry Andric 
12430b57cec5SDimitry Andric       ARMCC::CondCodes ARMPred = getComparePred(Predicate);
12440b57cec5SDimitry Andric 
12450b57cec5SDimitry Andric       // We may not handle every CC for now.
12460b57cec5SDimitry Andric       if (ARMPred == ARMCC::AL) return false;
12470b57cec5SDimitry Andric 
12480b57cec5SDimitry Andric       // Emit the compare.
12498bcb0991SDimitry Andric       if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned()))
12500b57cec5SDimitry Andric         return false;
12510b57cec5SDimitry Andric 
12520b57cec5SDimitry Andric       unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
1253bdd1243dSDimitry Andric       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(BrOpc))
12540b57cec5SDimitry Andric       .addMBB(TBB).addImm(ARMPred).addReg(ARM::CPSR);
12550b57cec5SDimitry Andric       finishCondBranch(BI->getParent(), TBB, FBB);
12560b57cec5SDimitry Andric       return true;
12570b57cec5SDimitry Andric     }
12580b57cec5SDimitry Andric   } else if (TruncInst *TI = dyn_cast<TruncInst>(BI->getCondition())) {
12590b57cec5SDimitry Andric     MVT SourceVT;
12600b57cec5SDimitry Andric     if (TI->hasOneUse() && TI->getParent() == I->getParent() &&
12610b57cec5SDimitry Andric         (isLoadTypeLegal(TI->getOperand(0)->getType(), SourceVT))) {
12620b57cec5SDimitry Andric       unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
126304eeddc0SDimitry Andric       Register OpReg = getRegForValue(TI->getOperand(0));
12640b57cec5SDimitry Andric       OpReg = constrainOperandRegClass(TII.get(TstOpc), OpReg, 0);
1265bdd1243dSDimitry Andric       AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
12660b57cec5SDimitry Andric                               TII.get(TstOpc))
12670b57cec5SDimitry Andric                       .addReg(OpReg).addImm(1));
12680b57cec5SDimitry Andric 
12690b57cec5SDimitry Andric       unsigned CCMode = ARMCC::NE;
12700b57cec5SDimitry Andric       if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
12710b57cec5SDimitry Andric         std::swap(TBB, FBB);
12720b57cec5SDimitry Andric         CCMode = ARMCC::EQ;
12730b57cec5SDimitry Andric       }
12740b57cec5SDimitry Andric 
12750b57cec5SDimitry Andric       unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
1276bdd1243dSDimitry Andric       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(BrOpc))
12770b57cec5SDimitry Andric       .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR);
12780b57cec5SDimitry Andric 
12790b57cec5SDimitry Andric       finishCondBranch(BI->getParent(), TBB, FBB);
12800b57cec5SDimitry Andric       return true;
12810b57cec5SDimitry Andric     }
12820b57cec5SDimitry Andric   } else if (const ConstantInt *CI =
12830b57cec5SDimitry Andric              dyn_cast<ConstantInt>(BI->getCondition())) {
12840b57cec5SDimitry Andric     uint64_t Imm = CI->getZExtValue();
12850b57cec5SDimitry Andric     MachineBasicBlock *Target = (Imm == 0) ? FBB : TBB;
1286bdd1243dSDimitry Andric     fastEmitBranch(Target, MIMD.getDL());
12870b57cec5SDimitry Andric     return true;
12880b57cec5SDimitry Andric   }
12890b57cec5SDimitry Andric 
129004eeddc0SDimitry Andric   Register CmpReg = getRegForValue(BI->getCondition());
12910b57cec5SDimitry Andric   if (CmpReg == 0) return false;
12920b57cec5SDimitry Andric 
12930b57cec5SDimitry Andric   // We've been divorced from our compare!  Our block was split, and
12940b57cec5SDimitry Andric   // now our compare lives in a predecessor block.  We musn't
12950b57cec5SDimitry Andric   // re-compare here, as the children of the compare aren't guaranteed
12960b57cec5SDimitry Andric   // live across the block boundary (we *could* check for this).
12970b57cec5SDimitry Andric   // Regardless, the compare has been done in the predecessor block,
12980b57cec5SDimitry Andric   // and it left a value for us in a virtual register.  Ergo, we test
12990b57cec5SDimitry Andric   // the one-bit value left in the virtual register.
13000b57cec5SDimitry Andric   unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
13010b57cec5SDimitry Andric   CmpReg = constrainOperandRegClass(TII.get(TstOpc), CmpReg, 0);
13020b57cec5SDimitry Andric   AddOptionalDefs(
1303bdd1243dSDimitry Andric       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TstOpc))
13040b57cec5SDimitry Andric           .addReg(CmpReg)
13050b57cec5SDimitry Andric           .addImm(1));
13060b57cec5SDimitry Andric 
13070b57cec5SDimitry Andric   unsigned CCMode = ARMCC::NE;
13080b57cec5SDimitry Andric   if (FuncInfo.MBB->isLayoutSuccessor(TBB)) {
13090b57cec5SDimitry Andric     std::swap(TBB, FBB);
13100b57cec5SDimitry Andric     CCMode = ARMCC::EQ;
13110b57cec5SDimitry Andric   }
13120b57cec5SDimitry Andric 
13130b57cec5SDimitry Andric   unsigned BrOpc = isThumb2 ? ARM::t2Bcc : ARM::Bcc;
1314bdd1243dSDimitry Andric   BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(BrOpc))
13150b57cec5SDimitry Andric                   .addMBB(TBB).addImm(CCMode).addReg(ARM::CPSR);
13160b57cec5SDimitry Andric   finishCondBranch(BI->getParent(), TBB, FBB);
13170b57cec5SDimitry Andric   return true;
13180b57cec5SDimitry Andric }
13190b57cec5SDimitry Andric 
13200b57cec5SDimitry Andric bool ARMFastISel::SelectIndirectBr(const Instruction *I) {
132104eeddc0SDimitry Andric   Register AddrReg = getRegForValue(I->getOperand(0));
13220b57cec5SDimitry Andric   if (AddrReg == 0) return false;
13230b57cec5SDimitry Andric 
13240b57cec5SDimitry Andric   unsigned Opc = isThumb2 ? ARM::tBRIND : ARM::BX;
13250b57cec5SDimitry Andric   assert(isThumb2 || Subtarget->hasV4TOps());
13260b57cec5SDimitry Andric 
1327bdd1243dSDimitry Andric   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
13280b57cec5SDimitry Andric                           TII.get(Opc)).addReg(AddrReg));
13290b57cec5SDimitry Andric 
13300b57cec5SDimitry Andric   const IndirectBrInst *IB = cast<IndirectBrInst>(I);
13310b57cec5SDimitry Andric   for (const BasicBlock *SuccBB : IB->successors())
13320b57cec5SDimitry Andric     FuncInfo.MBB->addSuccessor(FuncInfo.MBBMap[SuccBB]);
13330b57cec5SDimitry Andric 
13340b57cec5SDimitry Andric   return true;
13350b57cec5SDimitry Andric }
13360b57cec5SDimitry Andric 
13370b57cec5SDimitry Andric bool ARMFastISel::ARMEmitCmp(const Value *Src1Value, const Value *Src2Value,
13388bcb0991SDimitry Andric                              bool isZExt) {
13390b57cec5SDimitry Andric   Type *Ty = Src1Value->getType();
13400b57cec5SDimitry Andric   EVT SrcEVT = TLI.getValueType(DL, Ty, true);
13410b57cec5SDimitry Andric   if (!SrcEVT.isSimple()) return false;
13420b57cec5SDimitry Andric   MVT SrcVT = SrcEVT.getSimpleVT();
13430b57cec5SDimitry Andric 
13440b57cec5SDimitry Andric   if (Ty->isFloatTy() && !Subtarget->hasVFP2Base())
13450b57cec5SDimitry Andric     return false;
13460b57cec5SDimitry Andric 
13470b57cec5SDimitry Andric   if (Ty->isDoubleTy() && (!Subtarget->hasVFP2Base() || !Subtarget->hasFP64()))
13480b57cec5SDimitry Andric     return false;
13490b57cec5SDimitry Andric 
13500b57cec5SDimitry Andric   // Check to see if the 2nd operand is a constant that we can encode directly
13510b57cec5SDimitry Andric   // in the compare.
13520b57cec5SDimitry Andric   int Imm = 0;
13530b57cec5SDimitry Andric   bool UseImm = false;
13540b57cec5SDimitry Andric   bool isNegativeImm = false;
13550b57cec5SDimitry Andric   // FIXME: At -O0 we don't have anything that canonicalizes operand order.
13560b57cec5SDimitry Andric   // Thus, Src1Value may be a ConstantInt, but we're missing it.
13570b57cec5SDimitry Andric   if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(Src2Value)) {
13580b57cec5SDimitry Andric     if (SrcVT == MVT::i32 || SrcVT == MVT::i16 || SrcVT == MVT::i8 ||
13590b57cec5SDimitry Andric         SrcVT == MVT::i1) {
13600b57cec5SDimitry Andric       const APInt &CIVal = ConstInt->getValue();
13610b57cec5SDimitry Andric       Imm = (isZExt) ? (int)CIVal.getZExtValue() : (int)CIVal.getSExtValue();
13620b57cec5SDimitry Andric       // For INT_MIN/LONG_MIN (i.e., 0x80000000) we need to use a cmp, rather
13630b57cec5SDimitry Andric       // then a cmn, because there is no way to represent 2147483648 as a
13640b57cec5SDimitry Andric       // signed 32-bit int.
13650b57cec5SDimitry Andric       if (Imm < 0 && Imm != (int)0x80000000) {
13660b57cec5SDimitry Andric         isNegativeImm = true;
13670b57cec5SDimitry Andric         Imm = -Imm;
13680b57cec5SDimitry Andric       }
13690b57cec5SDimitry Andric       UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) :
13700b57cec5SDimitry Andric         (ARM_AM::getSOImmVal(Imm) != -1);
13710b57cec5SDimitry Andric     }
13720b57cec5SDimitry Andric   } else if (const ConstantFP *ConstFP = dyn_cast<ConstantFP>(Src2Value)) {
13730b57cec5SDimitry Andric     if (SrcVT == MVT::f32 || SrcVT == MVT::f64)
13740b57cec5SDimitry Andric       if (ConstFP->isZero() && !ConstFP->isNegative())
13750b57cec5SDimitry Andric         UseImm = true;
13760b57cec5SDimitry Andric   }
13770b57cec5SDimitry Andric 
13780b57cec5SDimitry Andric   unsigned CmpOpc;
13790b57cec5SDimitry Andric   bool isICmp = true;
13800b57cec5SDimitry Andric   bool needsExt = false;
13810b57cec5SDimitry Andric   switch (SrcVT.SimpleTy) {
13820b57cec5SDimitry Andric     default: return false;
13830b57cec5SDimitry Andric     // TODO: Verify compares.
13840b57cec5SDimitry Andric     case MVT::f32:
13850b57cec5SDimitry Andric       isICmp = false;
13860b57cec5SDimitry Andric       CmpOpc = UseImm ? ARM::VCMPZS : ARM::VCMPS;
13870b57cec5SDimitry Andric       break;
13880b57cec5SDimitry Andric     case MVT::f64:
13890b57cec5SDimitry Andric       isICmp = false;
13900b57cec5SDimitry Andric       CmpOpc = UseImm ? ARM::VCMPZD : ARM::VCMPD;
13910b57cec5SDimitry Andric       break;
13920b57cec5SDimitry Andric     case MVT::i1:
13930b57cec5SDimitry Andric     case MVT::i8:
13940b57cec5SDimitry Andric     case MVT::i16:
13950b57cec5SDimitry Andric       needsExt = true;
1396bdd1243dSDimitry Andric       [[fallthrough]];
13970b57cec5SDimitry Andric     case MVT::i32:
13980b57cec5SDimitry Andric       if (isThumb2) {
13990b57cec5SDimitry Andric         if (!UseImm)
14000b57cec5SDimitry Andric           CmpOpc = ARM::t2CMPrr;
14010b57cec5SDimitry Andric         else
14020b57cec5SDimitry Andric           CmpOpc = isNegativeImm ? ARM::t2CMNri : ARM::t2CMPri;
14030b57cec5SDimitry Andric       } else {
14040b57cec5SDimitry Andric         if (!UseImm)
14050b57cec5SDimitry Andric           CmpOpc = ARM::CMPrr;
14060b57cec5SDimitry Andric         else
14070b57cec5SDimitry Andric           CmpOpc = isNegativeImm ? ARM::CMNri : ARM::CMPri;
14080b57cec5SDimitry Andric       }
14090b57cec5SDimitry Andric       break;
14100b57cec5SDimitry Andric   }
14110b57cec5SDimitry Andric 
141204eeddc0SDimitry Andric   Register SrcReg1 = getRegForValue(Src1Value);
14130b57cec5SDimitry Andric   if (SrcReg1 == 0) return false;
14140b57cec5SDimitry Andric 
14150b57cec5SDimitry Andric   unsigned SrcReg2 = 0;
14160b57cec5SDimitry Andric   if (!UseImm) {
14170b57cec5SDimitry Andric     SrcReg2 = getRegForValue(Src2Value);
14180b57cec5SDimitry Andric     if (SrcReg2 == 0) return false;
14190b57cec5SDimitry Andric   }
14200b57cec5SDimitry Andric 
14210b57cec5SDimitry Andric   // We have i1, i8, or i16, we need to either zero extend or sign extend.
14220b57cec5SDimitry Andric   if (needsExt) {
14230b57cec5SDimitry Andric     SrcReg1 = ARMEmitIntExt(SrcVT, SrcReg1, MVT::i32, isZExt);
14240b57cec5SDimitry Andric     if (SrcReg1 == 0) return false;
14250b57cec5SDimitry Andric     if (!UseImm) {
14260b57cec5SDimitry Andric       SrcReg2 = ARMEmitIntExt(SrcVT, SrcReg2, MVT::i32, isZExt);
14270b57cec5SDimitry Andric       if (SrcReg2 == 0) return false;
14280b57cec5SDimitry Andric     }
14290b57cec5SDimitry Andric   }
14300b57cec5SDimitry Andric 
14310b57cec5SDimitry Andric   const MCInstrDesc &II = TII.get(CmpOpc);
14320b57cec5SDimitry Andric   SrcReg1 = constrainOperandRegClass(II, SrcReg1, 0);
14330b57cec5SDimitry Andric   if (!UseImm) {
14340b57cec5SDimitry Andric     SrcReg2 = constrainOperandRegClass(II, SrcReg2, 1);
1435bdd1243dSDimitry Andric     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
14360b57cec5SDimitry Andric                     .addReg(SrcReg1).addReg(SrcReg2));
14370b57cec5SDimitry Andric   } else {
14380b57cec5SDimitry Andric     MachineInstrBuilder MIB;
1439bdd1243dSDimitry Andric     MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, II)
14400b57cec5SDimitry Andric       .addReg(SrcReg1);
14410b57cec5SDimitry Andric 
14420b57cec5SDimitry Andric     // Only add immediate for icmp as the immediate for fcmp is an implicit 0.0.
14430b57cec5SDimitry Andric     if (isICmp)
14440b57cec5SDimitry Andric       MIB.addImm(Imm);
14450b57cec5SDimitry Andric     AddOptionalDefs(MIB);
14460b57cec5SDimitry Andric   }
14470b57cec5SDimitry Andric 
14480b57cec5SDimitry Andric   // For floating point we need to move the result to a comparison register
14490b57cec5SDimitry Andric   // that we can then use for branches.
14500b57cec5SDimitry Andric   if (Ty->isFloatTy() || Ty->isDoubleTy())
1451bdd1243dSDimitry Andric     AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
14520b57cec5SDimitry Andric                             TII.get(ARM::FMSTAT)));
14530b57cec5SDimitry Andric   return true;
14540b57cec5SDimitry Andric }
14550b57cec5SDimitry Andric 
14560b57cec5SDimitry Andric bool ARMFastISel::SelectCmp(const Instruction *I) {
14570b57cec5SDimitry Andric   const CmpInst *CI = cast<CmpInst>(I);
14580b57cec5SDimitry Andric 
14590b57cec5SDimitry Andric   // Get the compare predicate.
14600b57cec5SDimitry Andric   ARMCC::CondCodes ARMPred = getComparePred(CI->getPredicate());
14610b57cec5SDimitry Andric 
14620b57cec5SDimitry Andric   // We may not handle every CC for now.
14630b57cec5SDimitry Andric   if (ARMPred == ARMCC::AL) return false;
14640b57cec5SDimitry Andric 
14650b57cec5SDimitry Andric   // Emit the compare.
14668bcb0991SDimitry Andric   if (!ARMEmitCmp(CI->getOperand(0), CI->getOperand(1), CI->isUnsigned()))
14670b57cec5SDimitry Andric     return false;
14680b57cec5SDimitry Andric 
14690b57cec5SDimitry Andric   // Now set a register based on the comparison. Explicitly set the predicates
14700b57cec5SDimitry Andric   // here.
14710b57cec5SDimitry Andric   unsigned MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi;
14720b57cec5SDimitry Andric   const TargetRegisterClass *RC = isThumb2 ? &ARM::rGPRRegClass
14730b57cec5SDimitry Andric                                            : &ARM::GPRRegClass;
147404eeddc0SDimitry Andric   Register DestReg = createResultReg(RC);
14750b57cec5SDimitry Andric   Constant *Zero = ConstantInt::get(Type::getInt32Ty(*Context), 0);
14760b57cec5SDimitry Andric   unsigned ZeroReg = fastMaterializeConstant(Zero);
14770b57cec5SDimitry Andric   // ARMEmitCmp emits a FMSTAT when necessary, so it's always safe to use CPSR.
1478bdd1243dSDimitry Andric   BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(MovCCOpc), DestReg)
14790b57cec5SDimitry Andric           .addReg(ZeroReg).addImm(1)
14800b57cec5SDimitry Andric           .addImm(ARMPred).addReg(ARM::CPSR);
14810b57cec5SDimitry Andric 
14820b57cec5SDimitry Andric   updateValueMap(I, DestReg);
14830b57cec5SDimitry Andric   return true;
14840b57cec5SDimitry Andric }
14850b57cec5SDimitry Andric 
14860b57cec5SDimitry Andric bool ARMFastISel::SelectFPExt(const Instruction *I) {
14870b57cec5SDimitry Andric   // Make sure we have VFP and that we're extending float to double.
14880b57cec5SDimitry Andric   if (!Subtarget->hasVFP2Base() || !Subtarget->hasFP64()) return false;
14890b57cec5SDimitry Andric 
14900b57cec5SDimitry Andric   Value *V = I->getOperand(0);
14910b57cec5SDimitry Andric   if (!I->getType()->isDoubleTy() ||
14920b57cec5SDimitry Andric       !V->getType()->isFloatTy()) return false;
14930b57cec5SDimitry Andric 
149404eeddc0SDimitry Andric   Register Op = getRegForValue(V);
14950b57cec5SDimitry Andric   if (Op == 0) return false;
14960b57cec5SDimitry Andric 
149704eeddc0SDimitry Andric   Register Result = createResultReg(&ARM::DPRRegClass);
1498bdd1243dSDimitry Andric   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
14990b57cec5SDimitry Andric                           TII.get(ARM::VCVTDS), Result)
15000b57cec5SDimitry Andric                   .addReg(Op));
15010b57cec5SDimitry Andric   updateValueMap(I, Result);
15020b57cec5SDimitry Andric   return true;
15030b57cec5SDimitry Andric }
15040b57cec5SDimitry Andric 
15050b57cec5SDimitry Andric bool ARMFastISel::SelectFPTrunc(const Instruction *I) {
15060b57cec5SDimitry Andric   // Make sure we have VFP and that we're truncating double to float.
15070b57cec5SDimitry Andric   if (!Subtarget->hasVFP2Base() || !Subtarget->hasFP64()) return false;
15080b57cec5SDimitry Andric 
15090b57cec5SDimitry Andric   Value *V = I->getOperand(0);
15100b57cec5SDimitry Andric   if (!(I->getType()->isFloatTy() &&
15110b57cec5SDimitry Andric         V->getType()->isDoubleTy())) return false;
15120b57cec5SDimitry Andric 
151304eeddc0SDimitry Andric   Register Op = getRegForValue(V);
15140b57cec5SDimitry Andric   if (Op == 0) return false;
15150b57cec5SDimitry Andric 
151604eeddc0SDimitry Andric   Register Result = createResultReg(&ARM::SPRRegClass);
1517bdd1243dSDimitry Andric   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
15180b57cec5SDimitry Andric                           TII.get(ARM::VCVTSD), Result)
15190b57cec5SDimitry Andric                   .addReg(Op));
15200b57cec5SDimitry Andric   updateValueMap(I, Result);
15210b57cec5SDimitry Andric   return true;
15220b57cec5SDimitry Andric }
15230b57cec5SDimitry Andric 
15240b57cec5SDimitry Andric bool ARMFastISel::SelectIToFP(const Instruction *I, bool isSigned) {
15250b57cec5SDimitry Andric   // Make sure we have VFP.
15260b57cec5SDimitry Andric   if (!Subtarget->hasVFP2Base()) return false;
15270b57cec5SDimitry Andric 
15280b57cec5SDimitry Andric   MVT DstVT;
15290b57cec5SDimitry Andric   Type *Ty = I->getType();
15300b57cec5SDimitry Andric   if (!isTypeLegal(Ty, DstVT))
15310b57cec5SDimitry Andric     return false;
15320b57cec5SDimitry Andric 
15330b57cec5SDimitry Andric   Value *Src = I->getOperand(0);
15340b57cec5SDimitry Andric   EVT SrcEVT = TLI.getValueType(DL, Src->getType(), true);
15350b57cec5SDimitry Andric   if (!SrcEVT.isSimple())
15360b57cec5SDimitry Andric     return false;
15370b57cec5SDimitry Andric   MVT SrcVT = SrcEVT.getSimpleVT();
15380b57cec5SDimitry Andric   if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8)
15390b57cec5SDimitry Andric     return false;
15400b57cec5SDimitry Andric 
154104eeddc0SDimitry Andric   Register SrcReg = getRegForValue(Src);
15420b57cec5SDimitry Andric   if (SrcReg == 0) return false;
15430b57cec5SDimitry Andric 
15440b57cec5SDimitry Andric   // Handle sign-extension.
15450b57cec5SDimitry Andric   if (SrcVT == MVT::i16 || SrcVT == MVT::i8) {
15460b57cec5SDimitry Andric     SrcReg = ARMEmitIntExt(SrcVT, SrcReg, MVT::i32,
15470b57cec5SDimitry Andric                                        /*isZExt*/!isSigned);
15480b57cec5SDimitry Andric     if (SrcReg == 0) return false;
15490b57cec5SDimitry Andric   }
15500b57cec5SDimitry Andric 
15510b57cec5SDimitry Andric   // The conversion routine works on fp-reg to fp-reg and the operand above
15520b57cec5SDimitry Andric   // was an integer, move it to the fp registers if possible.
15530b57cec5SDimitry Andric   unsigned FP = ARMMoveToFPReg(MVT::f32, SrcReg);
15540b57cec5SDimitry Andric   if (FP == 0) return false;
15550b57cec5SDimitry Andric 
15560b57cec5SDimitry Andric   unsigned Opc;
15570b57cec5SDimitry Andric   if (Ty->isFloatTy()) Opc = isSigned ? ARM::VSITOS : ARM::VUITOS;
15580b57cec5SDimitry Andric   else if (Ty->isDoubleTy() && Subtarget->hasFP64())
15590b57cec5SDimitry Andric     Opc = isSigned ? ARM::VSITOD : ARM::VUITOD;
15600b57cec5SDimitry Andric   else return false;
15610b57cec5SDimitry Andric 
156204eeddc0SDimitry Andric   Register ResultReg = createResultReg(TLI.getRegClassFor(DstVT));
1563bdd1243dSDimitry Andric   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
15640b57cec5SDimitry Andric                           TII.get(Opc), ResultReg).addReg(FP));
15650b57cec5SDimitry Andric   updateValueMap(I, ResultReg);
15660b57cec5SDimitry Andric   return true;
15670b57cec5SDimitry Andric }
15680b57cec5SDimitry Andric 
15690b57cec5SDimitry Andric bool ARMFastISel::SelectFPToI(const Instruction *I, bool isSigned) {
15700b57cec5SDimitry Andric   // Make sure we have VFP.
15710b57cec5SDimitry Andric   if (!Subtarget->hasVFP2Base()) return false;
15720b57cec5SDimitry Andric 
15730b57cec5SDimitry Andric   MVT DstVT;
15740b57cec5SDimitry Andric   Type *RetTy = I->getType();
15750b57cec5SDimitry Andric   if (!isTypeLegal(RetTy, DstVT))
15760b57cec5SDimitry Andric     return false;
15770b57cec5SDimitry Andric 
157804eeddc0SDimitry Andric   Register Op = getRegForValue(I->getOperand(0));
15790b57cec5SDimitry Andric   if (Op == 0) return false;
15800b57cec5SDimitry Andric 
15810b57cec5SDimitry Andric   unsigned Opc;
15820b57cec5SDimitry Andric   Type *OpTy = I->getOperand(0)->getType();
15830b57cec5SDimitry Andric   if (OpTy->isFloatTy()) Opc = isSigned ? ARM::VTOSIZS : ARM::VTOUIZS;
15840b57cec5SDimitry Andric   else if (OpTy->isDoubleTy() && Subtarget->hasFP64())
15850b57cec5SDimitry Andric     Opc = isSigned ? ARM::VTOSIZD : ARM::VTOUIZD;
15860b57cec5SDimitry Andric   else return false;
15870b57cec5SDimitry Andric 
15880b57cec5SDimitry Andric   // f64->s32/u32 or f32->s32/u32 both need an intermediate f32 reg.
158904eeddc0SDimitry Andric   Register ResultReg = createResultReg(TLI.getRegClassFor(MVT::f32));
1590bdd1243dSDimitry Andric   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
15910b57cec5SDimitry Andric                           TII.get(Opc), ResultReg).addReg(Op));
15920b57cec5SDimitry Andric 
15930b57cec5SDimitry Andric   // This result needs to be in an integer register, but the conversion only
15940b57cec5SDimitry Andric   // takes place in fp-regs.
15950b57cec5SDimitry Andric   unsigned IntReg = ARMMoveToIntReg(DstVT, ResultReg);
15960b57cec5SDimitry Andric   if (IntReg == 0) return false;
15970b57cec5SDimitry Andric 
15980b57cec5SDimitry Andric   updateValueMap(I, IntReg);
15990b57cec5SDimitry Andric   return true;
16000b57cec5SDimitry Andric }
16010b57cec5SDimitry Andric 
16020b57cec5SDimitry Andric bool ARMFastISel::SelectSelect(const Instruction *I) {
16030b57cec5SDimitry Andric   MVT VT;
16040b57cec5SDimitry Andric   if (!isTypeLegal(I->getType(), VT))
16050b57cec5SDimitry Andric     return false;
16060b57cec5SDimitry Andric 
16070b57cec5SDimitry Andric   // Things need to be register sized for register moves.
16080b57cec5SDimitry Andric   if (VT != MVT::i32) return false;
16090b57cec5SDimitry Andric 
161004eeddc0SDimitry Andric   Register CondReg = getRegForValue(I->getOperand(0));
16110b57cec5SDimitry Andric   if (CondReg == 0) return false;
161204eeddc0SDimitry Andric   Register Op1Reg = getRegForValue(I->getOperand(1));
16130b57cec5SDimitry Andric   if (Op1Reg == 0) return false;
16140b57cec5SDimitry Andric 
16150b57cec5SDimitry Andric   // Check to see if we can use an immediate in the conditional move.
16160b57cec5SDimitry Andric   int Imm = 0;
16170b57cec5SDimitry Andric   bool UseImm = false;
16180b57cec5SDimitry Andric   bool isNegativeImm = false;
16190b57cec5SDimitry Andric   if (const ConstantInt *ConstInt = dyn_cast<ConstantInt>(I->getOperand(2))) {
16200b57cec5SDimitry Andric     assert(VT == MVT::i32 && "Expecting an i32.");
16210b57cec5SDimitry Andric     Imm = (int)ConstInt->getValue().getZExtValue();
16220b57cec5SDimitry Andric     if (Imm < 0) {
16230b57cec5SDimitry Andric       isNegativeImm = true;
16240b57cec5SDimitry Andric       Imm = ~Imm;
16250b57cec5SDimitry Andric     }
16260b57cec5SDimitry Andric     UseImm = isThumb2 ? (ARM_AM::getT2SOImmVal(Imm) != -1) :
16270b57cec5SDimitry Andric       (ARM_AM::getSOImmVal(Imm) != -1);
16280b57cec5SDimitry Andric   }
16290b57cec5SDimitry Andric 
16300b57cec5SDimitry Andric   unsigned Op2Reg = 0;
16310b57cec5SDimitry Andric   if (!UseImm) {
16320b57cec5SDimitry Andric     Op2Reg = getRegForValue(I->getOperand(2));
16330b57cec5SDimitry Andric     if (Op2Reg == 0) return false;
16340b57cec5SDimitry Andric   }
16350b57cec5SDimitry Andric 
16360b57cec5SDimitry Andric   unsigned TstOpc = isThumb2 ? ARM::t2TSTri : ARM::TSTri;
16370b57cec5SDimitry Andric   CondReg = constrainOperandRegClass(TII.get(TstOpc), CondReg, 0);
16380b57cec5SDimitry Andric   AddOptionalDefs(
1639bdd1243dSDimitry Andric       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(TstOpc))
16400b57cec5SDimitry Andric           .addReg(CondReg)
16410b57cec5SDimitry Andric           .addImm(1));
16420b57cec5SDimitry Andric 
16430b57cec5SDimitry Andric   unsigned MovCCOpc;
16440b57cec5SDimitry Andric   const TargetRegisterClass *RC;
16450b57cec5SDimitry Andric   if (!UseImm) {
16460b57cec5SDimitry Andric     RC = isThumb2 ? &ARM::tGPRRegClass : &ARM::GPRRegClass;
16470b57cec5SDimitry Andric     MovCCOpc = isThumb2 ? ARM::t2MOVCCr : ARM::MOVCCr;
16480b57cec5SDimitry Andric   } else {
16490b57cec5SDimitry Andric     RC = isThumb2 ? &ARM::rGPRRegClass : &ARM::GPRRegClass;
16500b57cec5SDimitry Andric     if (!isNegativeImm)
16510b57cec5SDimitry Andric       MovCCOpc = isThumb2 ? ARM::t2MOVCCi : ARM::MOVCCi;
16520b57cec5SDimitry Andric     else
16530b57cec5SDimitry Andric       MovCCOpc = isThumb2 ? ARM::t2MVNCCi : ARM::MVNCCi;
16540b57cec5SDimitry Andric   }
165504eeddc0SDimitry Andric   Register ResultReg = createResultReg(RC);
16560b57cec5SDimitry Andric   if (!UseImm) {
16570b57cec5SDimitry Andric     Op2Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op2Reg, 1);
16580b57cec5SDimitry Andric     Op1Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op1Reg, 2);
1659bdd1243dSDimitry Andric     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(MovCCOpc),
16600b57cec5SDimitry Andric             ResultReg)
16610b57cec5SDimitry Andric         .addReg(Op2Reg)
16620b57cec5SDimitry Andric         .addReg(Op1Reg)
16630b57cec5SDimitry Andric         .addImm(ARMCC::NE)
16640b57cec5SDimitry Andric         .addReg(ARM::CPSR);
16650b57cec5SDimitry Andric   } else {
16660b57cec5SDimitry Andric     Op1Reg = constrainOperandRegClass(TII.get(MovCCOpc), Op1Reg, 1);
1667bdd1243dSDimitry Andric     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(MovCCOpc),
16680b57cec5SDimitry Andric             ResultReg)
16690b57cec5SDimitry Andric         .addReg(Op1Reg)
16700b57cec5SDimitry Andric         .addImm(Imm)
16710b57cec5SDimitry Andric         .addImm(ARMCC::EQ)
16720b57cec5SDimitry Andric         .addReg(ARM::CPSR);
16730b57cec5SDimitry Andric   }
16740b57cec5SDimitry Andric   updateValueMap(I, ResultReg);
16750b57cec5SDimitry Andric   return true;
16760b57cec5SDimitry Andric }
16770b57cec5SDimitry Andric 
16780b57cec5SDimitry Andric bool ARMFastISel::SelectDiv(const Instruction *I, bool isSigned) {
16790b57cec5SDimitry Andric   MVT VT;
16800b57cec5SDimitry Andric   Type *Ty = I->getType();
16810b57cec5SDimitry Andric   if (!isTypeLegal(Ty, VT))
16820b57cec5SDimitry Andric     return false;
16830b57cec5SDimitry Andric 
16840b57cec5SDimitry Andric   // If we have integer div support we should have selected this automagically.
16850b57cec5SDimitry Andric   // In case we have a real miss go ahead and return false and we'll pick
16860b57cec5SDimitry Andric   // it up later.
16870b57cec5SDimitry Andric   if (Subtarget->hasDivideInThumbMode())
16880b57cec5SDimitry Andric     return false;
16890b57cec5SDimitry Andric 
16900b57cec5SDimitry Andric   // Otherwise emit a libcall.
16910b57cec5SDimitry Andric   RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
16920b57cec5SDimitry Andric   if (VT == MVT::i8)
16930b57cec5SDimitry Andric     LC = isSigned ? RTLIB::SDIV_I8 : RTLIB::UDIV_I8;
16940b57cec5SDimitry Andric   else if (VT == MVT::i16)
16950b57cec5SDimitry Andric     LC = isSigned ? RTLIB::SDIV_I16 : RTLIB::UDIV_I16;
16960b57cec5SDimitry Andric   else if (VT == MVT::i32)
16970b57cec5SDimitry Andric     LC = isSigned ? RTLIB::SDIV_I32 : RTLIB::UDIV_I32;
16980b57cec5SDimitry Andric   else if (VT == MVT::i64)
16990b57cec5SDimitry Andric     LC = isSigned ? RTLIB::SDIV_I64 : RTLIB::UDIV_I64;
17000b57cec5SDimitry Andric   else if (VT == MVT::i128)
17010b57cec5SDimitry Andric     LC = isSigned ? RTLIB::SDIV_I128 : RTLIB::UDIV_I128;
17020b57cec5SDimitry Andric   assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SDIV!");
17030b57cec5SDimitry Andric 
17040b57cec5SDimitry Andric   return ARMEmitLibcall(I, LC);
17050b57cec5SDimitry Andric }
17060b57cec5SDimitry Andric 
17070b57cec5SDimitry Andric bool ARMFastISel::SelectRem(const Instruction *I, bool isSigned) {
17080b57cec5SDimitry Andric   MVT VT;
17090b57cec5SDimitry Andric   Type *Ty = I->getType();
17100b57cec5SDimitry Andric   if (!isTypeLegal(Ty, VT))
17110b57cec5SDimitry Andric     return false;
17120b57cec5SDimitry Andric 
17130b57cec5SDimitry Andric   // Many ABIs do not provide a libcall for standalone remainder, so we need to
17140b57cec5SDimitry Andric   // use divrem (see the RTABI 4.3.1). Since FastISel can't handle non-double
17150b57cec5SDimitry Andric   // multi-reg returns, we'll have to bail out.
17160b57cec5SDimitry Andric   if (!TLI.hasStandaloneRem(VT)) {
17170b57cec5SDimitry Andric     return false;
17180b57cec5SDimitry Andric   }
17190b57cec5SDimitry Andric 
17200b57cec5SDimitry Andric   RTLIB::Libcall LC = RTLIB::UNKNOWN_LIBCALL;
17210b57cec5SDimitry Andric   if (VT == MVT::i8)
17220b57cec5SDimitry Andric     LC = isSigned ? RTLIB::SREM_I8 : RTLIB::UREM_I8;
17230b57cec5SDimitry Andric   else if (VT == MVT::i16)
17240b57cec5SDimitry Andric     LC = isSigned ? RTLIB::SREM_I16 : RTLIB::UREM_I16;
17250b57cec5SDimitry Andric   else if (VT == MVT::i32)
17260b57cec5SDimitry Andric     LC = isSigned ? RTLIB::SREM_I32 : RTLIB::UREM_I32;
17270b57cec5SDimitry Andric   else if (VT == MVT::i64)
17280b57cec5SDimitry Andric     LC = isSigned ? RTLIB::SREM_I64 : RTLIB::UREM_I64;
17290b57cec5SDimitry Andric   else if (VT == MVT::i128)
17300b57cec5SDimitry Andric     LC = isSigned ? RTLIB::SREM_I128 : RTLIB::UREM_I128;
17310b57cec5SDimitry Andric   assert(LC != RTLIB::UNKNOWN_LIBCALL && "Unsupported SREM!");
17320b57cec5SDimitry Andric 
17330b57cec5SDimitry Andric   return ARMEmitLibcall(I, LC);
17340b57cec5SDimitry Andric }
17350b57cec5SDimitry Andric 
17360b57cec5SDimitry Andric bool ARMFastISel::SelectBinaryIntOp(const Instruction *I, unsigned ISDOpcode) {
17370b57cec5SDimitry Andric   EVT DestVT = TLI.getValueType(DL, I->getType(), true);
17380b57cec5SDimitry Andric 
17390b57cec5SDimitry Andric   // We can get here in the case when we have a binary operation on a non-legal
17400b57cec5SDimitry Andric   // type and the target independent selector doesn't know how to handle it.
17410b57cec5SDimitry Andric   if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1)
17420b57cec5SDimitry Andric     return false;
17430b57cec5SDimitry Andric 
17440b57cec5SDimitry Andric   unsigned Opc;
17450b57cec5SDimitry Andric   switch (ISDOpcode) {
17460b57cec5SDimitry Andric     default: return false;
17470b57cec5SDimitry Andric     case ISD::ADD:
17480b57cec5SDimitry Andric       Opc = isThumb2 ? ARM::t2ADDrr : ARM::ADDrr;
17490b57cec5SDimitry Andric       break;
17500b57cec5SDimitry Andric     case ISD::OR:
17510b57cec5SDimitry Andric       Opc = isThumb2 ? ARM::t2ORRrr : ARM::ORRrr;
17520b57cec5SDimitry Andric       break;
17530b57cec5SDimitry Andric     case ISD::SUB:
17540b57cec5SDimitry Andric       Opc = isThumb2 ? ARM::t2SUBrr : ARM::SUBrr;
17550b57cec5SDimitry Andric       break;
17560b57cec5SDimitry Andric   }
17570b57cec5SDimitry Andric 
175804eeddc0SDimitry Andric   Register SrcReg1 = getRegForValue(I->getOperand(0));
17590b57cec5SDimitry Andric   if (SrcReg1 == 0) return false;
17600b57cec5SDimitry Andric 
17610b57cec5SDimitry Andric   // TODO: Often the 2nd operand is an immediate, which can be encoded directly
17620b57cec5SDimitry Andric   // in the instruction, rather then materializing the value in a register.
176304eeddc0SDimitry Andric   Register SrcReg2 = getRegForValue(I->getOperand(1));
17640b57cec5SDimitry Andric   if (SrcReg2 == 0) return false;
17650b57cec5SDimitry Andric 
176604eeddc0SDimitry Andric   Register ResultReg = createResultReg(&ARM::GPRnopcRegClass);
17670b57cec5SDimitry Andric   SrcReg1 = constrainOperandRegClass(TII.get(Opc), SrcReg1, 1);
17680b57cec5SDimitry Andric   SrcReg2 = constrainOperandRegClass(TII.get(Opc), SrcReg2, 2);
1769bdd1243dSDimitry Andric   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
17700b57cec5SDimitry Andric                           TII.get(Opc), ResultReg)
17710b57cec5SDimitry Andric                   .addReg(SrcReg1).addReg(SrcReg2));
17720b57cec5SDimitry Andric   updateValueMap(I, ResultReg);
17730b57cec5SDimitry Andric   return true;
17740b57cec5SDimitry Andric }
17750b57cec5SDimitry Andric 
17760b57cec5SDimitry Andric bool ARMFastISel::SelectBinaryFPOp(const Instruction *I, unsigned ISDOpcode) {
17770b57cec5SDimitry Andric   EVT FPVT = TLI.getValueType(DL, I->getType(), true);
17780b57cec5SDimitry Andric   if (!FPVT.isSimple()) return false;
17790b57cec5SDimitry Andric   MVT VT = FPVT.getSimpleVT();
17800b57cec5SDimitry Andric 
17810b57cec5SDimitry Andric   // FIXME: Support vector types where possible.
17820b57cec5SDimitry Andric   if (VT.isVector())
17830b57cec5SDimitry Andric     return false;
17840b57cec5SDimitry Andric 
17850b57cec5SDimitry Andric   // We can get here in the case when we want to use NEON for our fp
17860b57cec5SDimitry Andric   // operations, but can't figure out how to. Just use the vfp instructions
17870b57cec5SDimitry Andric   // if we have them.
17880b57cec5SDimitry Andric   // FIXME: It'd be nice to use NEON instructions.
17890b57cec5SDimitry Andric   Type *Ty = I->getType();
17900b57cec5SDimitry Andric   if (Ty->isFloatTy() && !Subtarget->hasVFP2Base())
17910b57cec5SDimitry Andric     return false;
17920b57cec5SDimitry Andric   if (Ty->isDoubleTy() && (!Subtarget->hasVFP2Base() || !Subtarget->hasFP64()))
17930b57cec5SDimitry Andric     return false;
17940b57cec5SDimitry Andric 
17950b57cec5SDimitry Andric   unsigned Opc;
17960b57cec5SDimitry Andric   bool is64bit = VT == MVT::f64 || VT == MVT::i64;
17970b57cec5SDimitry Andric   switch (ISDOpcode) {
17980b57cec5SDimitry Andric     default: return false;
17990b57cec5SDimitry Andric     case ISD::FADD:
18000b57cec5SDimitry Andric       Opc = is64bit ? ARM::VADDD : ARM::VADDS;
18010b57cec5SDimitry Andric       break;
18020b57cec5SDimitry Andric     case ISD::FSUB:
18030b57cec5SDimitry Andric       Opc = is64bit ? ARM::VSUBD : ARM::VSUBS;
18040b57cec5SDimitry Andric       break;
18050b57cec5SDimitry Andric     case ISD::FMUL:
18060b57cec5SDimitry Andric       Opc = is64bit ? ARM::VMULD : ARM::VMULS;
18070b57cec5SDimitry Andric       break;
18080b57cec5SDimitry Andric   }
180904eeddc0SDimitry Andric   Register Op1 = getRegForValue(I->getOperand(0));
18100b57cec5SDimitry Andric   if (Op1 == 0) return false;
18110b57cec5SDimitry Andric 
181204eeddc0SDimitry Andric   Register Op2 = getRegForValue(I->getOperand(1));
18130b57cec5SDimitry Andric   if (Op2 == 0) return false;
18140b57cec5SDimitry Andric 
181504eeddc0SDimitry Andric   Register ResultReg = createResultReg(TLI.getRegClassFor(VT.SimpleTy));
1816bdd1243dSDimitry Andric   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
18170b57cec5SDimitry Andric                           TII.get(Opc), ResultReg)
18180b57cec5SDimitry Andric                   .addReg(Op1).addReg(Op2));
18190b57cec5SDimitry Andric   updateValueMap(I, ResultReg);
18200b57cec5SDimitry Andric   return true;
18210b57cec5SDimitry Andric }
18220b57cec5SDimitry Andric 
18230b57cec5SDimitry Andric // Call Handling Code
18240b57cec5SDimitry Andric 
18250b57cec5SDimitry Andric // This is largely taken directly from CCAssignFnForNode
18260b57cec5SDimitry Andric // TODO: We may not support all of this.
18270b57cec5SDimitry Andric CCAssignFn *ARMFastISel::CCAssignFnForCall(CallingConv::ID CC,
18280b57cec5SDimitry Andric                                            bool Return,
18290b57cec5SDimitry Andric                                            bool isVarArg) {
18300b57cec5SDimitry Andric   switch (CC) {
18310b57cec5SDimitry Andric   default:
18320b57cec5SDimitry Andric     report_fatal_error("Unsupported calling convention");
18330b57cec5SDimitry Andric   case CallingConv::Fast:
18340b57cec5SDimitry Andric     if (Subtarget->hasVFP2Base() && !isVarArg) {
18350b57cec5SDimitry Andric       if (!Subtarget->isAAPCS_ABI())
18360b57cec5SDimitry Andric         return (Return ? RetFastCC_ARM_APCS : FastCC_ARM_APCS);
18370b57cec5SDimitry Andric       // For AAPCS ABI targets, just use VFP variant of the calling convention.
18380b57cec5SDimitry Andric       return (Return ? RetCC_ARM_AAPCS_VFP : CC_ARM_AAPCS_VFP);
18390b57cec5SDimitry Andric     }
1840bdd1243dSDimitry Andric     [[fallthrough]];
18410b57cec5SDimitry Andric   case CallingConv::C:
18420b57cec5SDimitry Andric   case CallingConv::CXX_FAST_TLS:
18430b57cec5SDimitry Andric     // Use target triple & subtarget features to do actual dispatch.
18440b57cec5SDimitry Andric     if (Subtarget->isAAPCS_ABI()) {
184506c3fb27SDimitry Andric       if (Subtarget->hasFPRegs() &&
18460b57cec5SDimitry Andric           TM.Options.FloatABIType == FloatABI::Hard && !isVarArg)
18470b57cec5SDimitry Andric         return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP);
18480b57cec5SDimitry Andric       else
18490b57cec5SDimitry Andric         return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS);
18500b57cec5SDimitry Andric     } else {
18510b57cec5SDimitry Andric       return (Return ? RetCC_ARM_APCS: CC_ARM_APCS);
18520b57cec5SDimitry Andric     }
18530b57cec5SDimitry Andric   case CallingConv::ARM_AAPCS_VFP:
18540b57cec5SDimitry Andric   case CallingConv::Swift:
1855fe6060f1SDimitry Andric   case CallingConv::SwiftTail:
18560b57cec5SDimitry Andric     if (!isVarArg)
18570b57cec5SDimitry Andric       return (Return ? RetCC_ARM_AAPCS_VFP: CC_ARM_AAPCS_VFP);
18580b57cec5SDimitry Andric     // Fall through to soft float variant, variadic functions don't
18590b57cec5SDimitry Andric     // use hard floating point ABI.
1860bdd1243dSDimitry Andric     [[fallthrough]];
18610b57cec5SDimitry Andric   case CallingConv::ARM_AAPCS:
18620b57cec5SDimitry Andric     return (Return ? RetCC_ARM_AAPCS: CC_ARM_AAPCS);
18630b57cec5SDimitry Andric   case CallingConv::ARM_APCS:
18640b57cec5SDimitry Andric     return (Return ? RetCC_ARM_APCS: CC_ARM_APCS);
18650b57cec5SDimitry Andric   case CallingConv::GHC:
18660b57cec5SDimitry Andric     if (Return)
18670b57cec5SDimitry Andric       report_fatal_error("Can't return in GHC call convention");
18680b57cec5SDimitry Andric     else
18690b57cec5SDimitry Andric       return CC_ARM_APCS_GHC;
1870480093f4SDimitry Andric   case CallingConv::CFGuard_Check:
1871480093f4SDimitry Andric     return (Return ? RetCC_ARM_AAPCS : CC_ARM_Win32_CFGuard_Check);
18720b57cec5SDimitry Andric   }
18730b57cec5SDimitry Andric }
18740b57cec5SDimitry Andric 
18750b57cec5SDimitry Andric bool ARMFastISel::ProcessCallArgs(SmallVectorImpl<Value*> &Args,
18768bcb0991SDimitry Andric                                   SmallVectorImpl<Register> &ArgRegs,
18770b57cec5SDimitry Andric                                   SmallVectorImpl<MVT> &ArgVTs,
18780b57cec5SDimitry Andric                                   SmallVectorImpl<ISD::ArgFlagsTy> &ArgFlags,
18798bcb0991SDimitry Andric                                   SmallVectorImpl<Register> &RegArgs,
18800b57cec5SDimitry Andric                                   CallingConv::ID CC,
18810b57cec5SDimitry Andric                                   unsigned &NumBytes,
18820b57cec5SDimitry Andric                                   bool isVarArg) {
18830b57cec5SDimitry Andric   SmallVector<CCValAssign, 16> ArgLocs;
18840b57cec5SDimitry Andric   CCState CCInfo(CC, isVarArg, *FuncInfo.MF, ArgLocs, *Context);
18850b57cec5SDimitry Andric   CCInfo.AnalyzeCallOperands(ArgVTs, ArgFlags,
18860b57cec5SDimitry Andric                              CCAssignFnForCall(CC, false, isVarArg));
18870b57cec5SDimitry Andric 
18880b57cec5SDimitry Andric   // Check that we can handle all of the arguments. If we can't, then bail out
18890b57cec5SDimitry Andric   // now before we add code to the MBB.
18900b57cec5SDimitry Andric   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
18910b57cec5SDimitry Andric     CCValAssign &VA = ArgLocs[i];
18920b57cec5SDimitry Andric     MVT ArgVT = ArgVTs[VA.getValNo()];
18930b57cec5SDimitry Andric 
18940b57cec5SDimitry Andric     // We don't handle NEON/vector parameters yet.
18950b57cec5SDimitry Andric     if (ArgVT.isVector() || ArgVT.getSizeInBits() > 64)
18960b57cec5SDimitry Andric       return false;
18970b57cec5SDimitry Andric 
18980b57cec5SDimitry Andric     // Now copy/store arg to correct locations.
18990b57cec5SDimitry Andric     if (VA.isRegLoc() && !VA.needsCustom()) {
19000b57cec5SDimitry Andric       continue;
19010b57cec5SDimitry Andric     } else if (VA.needsCustom()) {
19020b57cec5SDimitry Andric       // TODO: We need custom lowering for vector (v2f64) args.
19030b57cec5SDimitry Andric       if (VA.getLocVT() != MVT::f64 ||
19040b57cec5SDimitry Andric           // TODO: Only handle register args for now.
19050b57cec5SDimitry Andric           !VA.isRegLoc() || !ArgLocs[++i].isRegLoc())
19060b57cec5SDimitry Andric         return false;
19070b57cec5SDimitry Andric     } else {
19080b57cec5SDimitry Andric       switch (ArgVT.SimpleTy) {
19090b57cec5SDimitry Andric       default:
19100b57cec5SDimitry Andric         return false;
19110b57cec5SDimitry Andric       case MVT::i1:
19120b57cec5SDimitry Andric       case MVT::i8:
19130b57cec5SDimitry Andric       case MVT::i16:
19140b57cec5SDimitry Andric       case MVT::i32:
19150b57cec5SDimitry Andric         break;
19160b57cec5SDimitry Andric       case MVT::f32:
19170b57cec5SDimitry Andric         if (!Subtarget->hasVFP2Base())
19180b57cec5SDimitry Andric           return false;
19190b57cec5SDimitry Andric         break;
19200b57cec5SDimitry Andric       case MVT::f64:
19210b57cec5SDimitry Andric         if (!Subtarget->hasVFP2Base())
19220b57cec5SDimitry Andric           return false;
19230b57cec5SDimitry Andric         break;
19240b57cec5SDimitry Andric       }
19250b57cec5SDimitry Andric     }
19260b57cec5SDimitry Andric   }
19270b57cec5SDimitry Andric 
19280b57cec5SDimitry Andric   // At the point, we are able to handle the call's arguments in fast isel.
19290b57cec5SDimitry Andric 
19300b57cec5SDimitry Andric   // Get a count of how many bytes are to be pushed on the stack.
193106c3fb27SDimitry Andric   NumBytes = CCInfo.getStackSize();
19320b57cec5SDimitry Andric 
19330b57cec5SDimitry Andric   // Issue CALLSEQ_START
19340b57cec5SDimitry Andric   unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
1935bdd1243dSDimitry Andric   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
19360b57cec5SDimitry Andric                           TII.get(AdjStackDown))
19370b57cec5SDimitry Andric                   .addImm(NumBytes).addImm(0));
19380b57cec5SDimitry Andric 
19390b57cec5SDimitry Andric   // Process the args.
19400b57cec5SDimitry Andric   for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
19410b57cec5SDimitry Andric     CCValAssign &VA = ArgLocs[i];
19420b57cec5SDimitry Andric     const Value *ArgVal = Args[VA.getValNo()];
19438bcb0991SDimitry Andric     Register Arg = ArgRegs[VA.getValNo()];
19440b57cec5SDimitry Andric     MVT ArgVT = ArgVTs[VA.getValNo()];
19450b57cec5SDimitry Andric 
19460b57cec5SDimitry Andric     assert((!ArgVT.isVector() && ArgVT.getSizeInBits() <= 64) &&
19470b57cec5SDimitry Andric            "We don't handle NEON/vector parameters yet.");
19480b57cec5SDimitry Andric 
19490b57cec5SDimitry Andric     // Handle arg promotion, etc.
19500b57cec5SDimitry Andric     switch (VA.getLocInfo()) {
19510b57cec5SDimitry Andric       case CCValAssign::Full: break;
19520b57cec5SDimitry Andric       case CCValAssign::SExt: {
19530b57cec5SDimitry Andric         MVT DestVT = VA.getLocVT();
19540b57cec5SDimitry Andric         Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/false);
19550b57cec5SDimitry Andric         assert(Arg != 0 && "Failed to emit a sext");
19560b57cec5SDimitry Andric         ArgVT = DestVT;
19570b57cec5SDimitry Andric         break;
19580b57cec5SDimitry Andric       }
19590b57cec5SDimitry Andric       case CCValAssign::AExt:
19600b57cec5SDimitry Andric       // Intentional fall-through.  Handle AExt and ZExt.
19610b57cec5SDimitry Andric       case CCValAssign::ZExt: {
19620b57cec5SDimitry Andric         MVT DestVT = VA.getLocVT();
19630b57cec5SDimitry Andric         Arg = ARMEmitIntExt(ArgVT, Arg, DestVT, /*isZExt*/true);
19640b57cec5SDimitry Andric         assert(Arg != 0 && "Failed to emit a zext");
19650b57cec5SDimitry Andric         ArgVT = DestVT;
19660b57cec5SDimitry Andric         break;
19670b57cec5SDimitry Andric       }
19680b57cec5SDimitry Andric       case CCValAssign::BCvt: {
1969fe6060f1SDimitry Andric         unsigned BC = fastEmit_r(ArgVT, VA.getLocVT(), ISD::BITCAST, Arg);
19700b57cec5SDimitry Andric         assert(BC != 0 && "Failed to emit a bitcast!");
19710b57cec5SDimitry Andric         Arg = BC;
19720b57cec5SDimitry Andric         ArgVT = VA.getLocVT();
19730b57cec5SDimitry Andric         break;
19740b57cec5SDimitry Andric       }
19750b57cec5SDimitry Andric       default: llvm_unreachable("Unknown arg promotion!");
19760b57cec5SDimitry Andric     }
19770b57cec5SDimitry Andric 
19780b57cec5SDimitry Andric     // Now copy/store arg to correct locations.
19790b57cec5SDimitry Andric     if (VA.isRegLoc() && !VA.needsCustom()) {
1980bdd1243dSDimitry Andric       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
19810b57cec5SDimitry Andric               TII.get(TargetOpcode::COPY), VA.getLocReg()).addReg(Arg);
19820b57cec5SDimitry Andric       RegArgs.push_back(VA.getLocReg());
19830b57cec5SDimitry Andric     } else if (VA.needsCustom()) {
19840b57cec5SDimitry Andric       // TODO: We need custom lowering for vector (v2f64) args.
19850b57cec5SDimitry Andric       assert(VA.getLocVT() == MVT::f64 &&
19860b57cec5SDimitry Andric              "Custom lowering for v2f64 args not available");
19870b57cec5SDimitry Andric 
19880b57cec5SDimitry Andric       // FIXME: ArgLocs[++i] may extend beyond ArgLocs.size()
19890b57cec5SDimitry Andric       CCValAssign &NextVA = ArgLocs[++i];
19900b57cec5SDimitry Andric 
19910b57cec5SDimitry Andric       assert(VA.isRegLoc() && NextVA.isRegLoc() &&
19920b57cec5SDimitry Andric              "We only handle register args!");
19930b57cec5SDimitry Andric 
1994bdd1243dSDimitry Andric       AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
19950b57cec5SDimitry Andric                               TII.get(ARM::VMOVRRD), VA.getLocReg())
19960b57cec5SDimitry Andric                       .addReg(NextVA.getLocReg(), RegState::Define)
19970b57cec5SDimitry Andric                       .addReg(Arg));
19980b57cec5SDimitry Andric       RegArgs.push_back(VA.getLocReg());
19990b57cec5SDimitry Andric       RegArgs.push_back(NextVA.getLocReg());
20000b57cec5SDimitry Andric     } else {
20010b57cec5SDimitry Andric       assert(VA.isMemLoc());
20020b57cec5SDimitry Andric       // Need to store on the stack.
20030b57cec5SDimitry Andric 
20040b57cec5SDimitry Andric       // Don't emit stores for undef values.
20050b57cec5SDimitry Andric       if (isa<UndefValue>(ArgVal))
20060b57cec5SDimitry Andric         continue;
20070b57cec5SDimitry Andric 
20080b57cec5SDimitry Andric       Address Addr;
20090b57cec5SDimitry Andric       Addr.BaseType = Address::RegBase;
20100b57cec5SDimitry Andric       Addr.Base.Reg = ARM::SP;
20110b57cec5SDimitry Andric       Addr.Offset = VA.getLocMemOffset();
20120b57cec5SDimitry Andric 
20130b57cec5SDimitry Andric       bool EmitRet = ARMEmitStore(ArgVT, Arg, Addr); (void)EmitRet;
20140b57cec5SDimitry Andric       assert(EmitRet && "Could not emit a store for argument!");
20150b57cec5SDimitry Andric     }
20160b57cec5SDimitry Andric   }
20170b57cec5SDimitry Andric 
20180b57cec5SDimitry Andric   return true;
20190b57cec5SDimitry Andric }
20200b57cec5SDimitry Andric 
20218bcb0991SDimitry Andric bool ARMFastISel::FinishCall(MVT RetVT, SmallVectorImpl<Register> &UsedRegs,
20220b57cec5SDimitry Andric                              const Instruction *I, CallingConv::ID CC,
20230b57cec5SDimitry Andric                              unsigned &NumBytes, bool isVarArg) {
20240b57cec5SDimitry Andric   // Issue CALLSEQ_END
20250b57cec5SDimitry Andric   unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
2026bdd1243dSDimitry Andric   AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
20270b57cec5SDimitry Andric                           TII.get(AdjStackUp))
202804eeddc0SDimitry Andric                   .addImm(NumBytes).addImm(-1ULL));
20290b57cec5SDimitry Andric 
20300b57cec5SDimitry Andric   // Now the return value.
20310b57cec5SDimitry Andric   if (RetVT != MVT::isVoid) {
20320b57cec5SDimitry Andric     SmallVector<CCValAssign, 16> RVLocs;
20330b57cec5SDimitry Andric     CCState CCInfo(CC, isVarArg, *FuncInfo.MF, RVLocs, *Context);
20340b57cec5SDimitry Andric     CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, isVarArg));
20350b57cec5SDimitry Andric 
20360b57cec5SDimitry Andric     // Copy all of the result registers out of their specified physreg.
20370b57cec5SDimitry Andric     if (RVLocs.size() == 2 && RetVT == MVT::f64) {
20380b57cec5SDimitry Andric       // For this move we copy into two registers and then move into the
20390b57cec5SDimitry Andric       // double fp reg we want.
20400b57cec5SDimitry Andric       MVT DestVT = RVLocs[0].getValVT();
20410b57cec5SDimitry Andric       const TargetRegisterClass* DstRC = TLI.getRegClassFor(DestVT);
20428bcb0991SDimitry Andric       Register ResultReg = createResultReg(DstRC);
2043bdd1243dSDimitry Andric       AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
20440b57cec5SDimitry Andric                               TII.get(ARM::VMOVDRR), ResultReg)
20450b57cec5SDimitry Andric                       .addReg(RVLocs[0].getLocReg())
20460b57cec5SDimitry Andric                       .addReg(RVLocs[1].getLocReg()));
20470b57cec5SDimitry Andric 
20480b57cec5SDimitry Andric       UsedRegs.push_back(RVLocs[0].getLocReg());
20490b57cec5SDimitry Andric       UsedRegs.push_back(RVLocs[1].getLocReg());
20500b57cec5SDimitry Andric 
20510b57cec5SDimitry Andric       // Finally update the result.
20520b57cec5SDimitry Andric       updateValueMap(I, ResultReg);
20530b57cec5SDimitry Andric     } else {
20540b57cec5SDimitry Andric       assert(RVLocs.size() == 1 &&"Can't handle non-double multi-reg retvals!");
20550b57cec5SDimitry Andric       MVT CopyVT = RVLocs[0].getValVT();
20560b57cec5SDimitry Andric 
20570b57cec5SDimitry Andric       // Special handling for extended integers.
20580b57cec5SDimitry Andric       if (RetVT == MVT::i1 || RetVT == MVT::i8 || RetVT == MVT::i16)
20590b57cec5SDimitry Andric         CopyVT = MVT::i32;
20600b57cec5SDimitry Andric 
20610b57cec5SDimitry Andric       const TargetRegisterClass* DstRC = TLI.getRegClassFor(CopyVT);
20620b57cec5SDimitry Andric 
20638bcb0991SDimitry Andric       Register ResultReg = createResultReg(DstRC);
2064bdd1243dSDimitry Andric       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
20650b57cec5SDimitry Andric               TII.get(TargetOpcode::COPY),
20660b57cec5SDimitry Andric               ResultReg).addReg(RVLocs[0].getLocReg());
20670b57cec5SDimitry Andric       UsedRegs.push_back(RVLocs[0].getLocReg());
20680b57cec5SDimitry Andric 
20690b57cec5SDimitry Andric       // Finally update the result.
20700b57cec5SDimitry Andric       updateValueMap(I, ResultReg);
20710b57cec5SDimitry Andric     }
20720b57cec5SDimitry Andric   }
20730b57cec5SDimitry Andric 
20740b57cec5SDimitry Andric   return true;
20750b57cec5SDimitry Andric }
20760b57cec5SDimitry Andric 
20770b57cec5SDimitry Andric bool ARMFastISel::SelectRet(const Instruction *I) {
20780b57cec5SDimitry Andric   const ReturnInst *Ret = cast<ReturnInst>(I);
20790b57cec5SDimitry Andric   const Function &F = *I->getParent()->getParent();
20805ffd83dbSDimitry Andric   const bool IsCmseNSEntry = F.hasFnAttribute("cmse_nonsecure_entry");
20810b57cec5SDimitry Andric 
20820b57cec5SDimitry Andric   if (!FuncInfo.CanLowerReturn)
20830b57cec5SDimitry Andric     return false;
20840b57cec5SDimitry Andric 
20850b57cec5SDimitry Andric   if (TLI.supportSwiftError() &&
20860b57cec5SDimitry Andric       F.getAttributes().hasAttrSomewhere(Attribute::SwiftError))
20870b57cec5SDimitry Andric     return false;
20880b57cec5SDimitry Andric 
20890b57cec5SDimitry Andric   if (TLI.supportSplitCSR(FuncInfo.MF))
20900b57cec5SDimitry Andric     return false;
20910b57cec5SDimitry Andric 
20920b57cec5SDimitry Andric   // Build a list of return value registers.
20930b57cec5SDimitry Andric   SmallVector<unsigned, 4> RetRegs;
20940b57cec5SDimitry Andric 
20950b57cec5SDimitry Andric   CallingConv::ID CC = F.getCallingConv();
20960b57cec5SDimitry Andric   if (Ret->getNumOperands() > 0) {
20970b57cec5SDimitry Andric     SmallVector<ISD::OutputArg, 4> Outs;
20980b57cec5SDimitry Andric     GetReturnInfo(CC, F.getReturnType(), F.getAttributes(), Outs, TLI, DL);
20990b57cec5SDimitry Andric 
21000b57cec5SDimitry Andric     // Analyze operands of the call, assigning locations to each operand.
21010b57cec5SDimitry Andric     SmallVector<CCValAssign, 16> ValLocs;
21020b57cec5SDimitry Andric     CCState CCInfo(CC, F.isVarArg(), *FuncInfo.MF, ValLocs, I->getContext());
21030b57cec5SDimitry Andric     CCInfo.AnalyzeReturn(Outs, CCAssignFnForCall(CC, true /* is Ret */,
21040b57cec5SDimitry Andric                                                  F.isVarArg()));
21050b57cec5SDimitry Andric 
21060b57cec5SDimitry Andric     const Value *RV = Ret->getOperand(0);
210704eeddc0SDimitry Andric     Register Reg = getRegForValue(RV);
21080b57cec5SDimitry Andric     if (Reg == 0)
21090b57cec5SDimitry Andric       return false;
21100b57cec5SDimitry Andric 
21110b57cec5SDimitry Andric     // Only handle a single return value for now.
21120b57cec5SDimitry Andric     if (ValLocs.size() != 1)
21130b57cec5SDimitry Andric       return false;
21140b57cec5SDimitry Andric 
21150b57cec5SDimitry Andric     CCValAssign &VA = ValLocs[0];
21160b57cec5SDimitry Andric 
21170b57cec5SDimitry Andric     // Don't bother handling odd stuff for now.
21180b57cec5SDimitry Andric     if (VA.getLocInfo() != CCValAssign::Full)
21190b57cec5SDimitry Andric       return false;
21200b57cec5SDimitry Andric     // Only handle register returns for now.
21210b57cec5SDimitry Andric     if (!VA.isRegLoc())
21220b57cec5SDimitry Andric       return false;
21230b57cec5SDimitry Andric 
21240b57cec5SDimitry Andric     unsigned SrcReg = Reg + VA.getValNo();
21250b57cec5SDimitry Andric     EVT RVEVT = TLI.getValueType(DL, RV->getType());
21260b57cec5SDimitry Andric     if (!RVEVT.isSimple()) return false;
21270b57cec5SDimitry Andric     MVT RVVT = RVEVT.getSimpleVT();
21280b57cec5SDimitry Andric     MVT DestVT = VA.getValVT();
21290b57cec5SDimitry Andric     // Special handling for extended integers.
21300b57cec5SDimitry Andric     if (RVVT != DestVT) {
21310b57cec5SDimitry Andric       if (RVVT != MVT::i1 && RVVT != MVT::i8 && RVVT != MVT::i16)
21320b57cec5SDimitry Andric         return false;
21330b57cec5SDimitry Andric 
21340b57cec5SDimitry Andric       assert(DestVT == MVT::i32 && "ARM should always ext to i32");
21350b57cec5SDimitry Andric 
21360b57cec5SDimitry Andric       // Perform extension if flagged as either zext or sext.  Otherwise, do
21370b57cec5SDimitry Andric       // nothing.
21380b57cec5SDimitry Andric       if (Outs[0].Flags.isZExt() || Outs[0].Flags.isSExt()) {
21390b57cec5SDimitry Andric         SrcReg = ARMEmitIntExt(RVVT, SrcReg, DestVT, Outs[0].Flags.isZExt());
21400b57cec5SDimitry Andric         if (SrcReg == 0) return false;
21410b57cec5SDimitry Andric       }
21420b57cec5SDimitry Andric     }
21430b57cec5SDimitry Andric 
21440b57cec5SDimitry Andric     // Make the copy.
21458bcb0991SDimitry Andric     Register DstReg = VA.getLocReg();
21460b57cec5SDimitry Andric     const TargetRegisterClass* SrcRC = MRI.getRegClass(SrcReg);
21470b57cec5SDimitry Andric     // Avoid a cross-class copy. This is very unlikely.
21480b57cec5SDimitry Andric     if (!SrcRC->contains(DstReg))
21490b57cec5SDimitry Andric       return false;
2150bdd1243dSDimitry Andric     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
21510b57cec5SDimitry Andric             TII.get(TargetOpcode::COPY), DstReg).addReg(SrcReg);
21520b57cec5SDimitry Andric 
21530b57cec5SDimitry Andric     // Add register to return instruction.
21540b57cec5SDimitry Andric     RetRegs.push_back(VA.getLocReg());
21550b57cec5SDimitry Andric   }
21560b57cec5SDimitry Andric 
21575ffd83dbSDimitry Andric   unsigned RetOpc;
21585ffd83dbSDimitry Andric   if (IsCmseNSEntry)
21595ffd83dbSDimitry Andric     if (isThumb2)
21605ffd83dbSDimitry Andric       RetOpc = ARM::tBXNS_RET;
21615ffd83dbSDimitry Andric     else
21625ffd83dbSDimitry Andric       llvm_unreachable("CMSE not valid for non-Thumb targets");
21635ffd83dbSDimitry Andric   else
21645ffd83dbSDimitry Andric     RetOpc = Subtarget->getReturnOpcode();
21655ffd83dbSDimitry Andric 
2166bdd1243dSDimitry Andric   MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
21675ffd83dbSDimitry Andric                                     TII.get(RetOpc));
21680b57cec5SDimitry Andric   AddOptionalDefs(MIB);
21690b57cec5SDimitry Andric   for (unsigned R : RetRegs)
21700b57cec5SDimitry Andric     MIB.addReg(R, RegState::Implicit);
21710b57cec5SDimitry Andric   return true;
21720b57cec5SDimitry Andric }
21730b57cec5SDimitry Andric 
21740b57cec5SDimitry Andric unsigned ARMFastISel::ARMSelectCallOp(bool UseReg) {
21750b57cec5SDimitry Andric   if (UseReg)
2176e8d8bef9SDimitry Andric     return isThumb2 ? gettBLXrOpcode(*MF) : getBLXOpcode(*MF);
21770b57cec5SDimitry Andric   else
21780b57cec5SDimitry Andric     return isThumb2 ? ARM::tBL : ARM::BL;
21790b57cec5SDimitry Andric }
21800b57cec5SDimitry Andric 
21810b57cec5SDimitry Andric unsigned ARMFastISel::getLibcallReg(const Twine &Name) {
21820b57cec5SDimitry Andric   // Manually compute the global's type to avoid building it when unnecessary.
21835f757f3fSDimitry Andric   Type *GVTy = PointerType::get(*Context, /*AS=*/0);
21840b57cec5SDimitry Andric   EVT LCREVT = TLI.getValueType(DL, GVTy);
21850b57cec5SDimitry Andric   if (!LCREVT.isSimple()) return 0;
21860b57cec5SDimitry Andric 
2187fe6060f1SDimitry Andric   GlobalValue *GV = M.getNamedGlobal(Name.str());
2188fe6060f1SDimitry Andric   if (!GV)
2189fe6060f1SDimitry Andric     GV = new GlobalVariable(M, Type::getInt32Ty(*Context), false,
2190fe6060f1SDimitry Andric                             GlobalValue::ExternalLinkage, nullptr, Name);
2191fe6060f1SDimitry Andric 
21920b57cec5SDimitry Andric   return ARMMaterializeGV(GV, LCREVT.getSimpleVT());
21930b57cec5SDimitry Andric }
21940b57cec5SDimitry Andric 
21950b57cec5SDimitry Andric // A quick function that will emit a call for a named libcall in F with the
21960b57cec5SDimitry Andric // vector of passed arguments for the Instruction in I. We can assume that we
21970b57cec5SDimitry Andric // can emit a call for any libcall we can produce. This is an abridged version
21980b57cec5SDimitry Andric // of the full call infrastructure since we won't need to worry about things
21990b57cec5SDimitry Andric // like computed function pointers or strange arguments at call sites.
22000b57cec5SDimitry Andric // TODO: Try to unify this and the normal call bits for ARM, then try to unify
22010b57cec5SDimitry Andric // with X86.
22020b57cec5SDimitry Andric bool ARMFastISel::ARMEmitLibcall(const Instruction *I, RTLIB::Libcall Call) {
22030b57cec5SDimitry Andric   CallingConv::ID CC = TLI.getLibcallCallingConv(Call);
22040b57cec5SDimitry Andric 
22050b57cec5SDimitry Andric   // Handle *simple* calls for now.
22060b57cec5SDimitry Andric   Type *RetTy = I->getType();
22070b57cec5SDimitry Andric   MVT RetVT;
22080b57cec5SDimitry Andric   if (RetTy->isVoidTy())
22090b57cec5SDimitry Andric     RetVT = MVT::isVoid;
22100b57cec5SDimitry Andric   else if (!isTypeLegal(RetTy, RetVT))
22110b57cec5SDimitry Andric     return false;
22120b57cec5SDimitry Andric 
22130b57cec5SDimitry Andric   // Can't handle non-double multi-reg retvals.
22140b57cec5SDimitry Andric   if (RetVT != MVT::isVoid && RetVT != MVT::i32) {
22150b57cec5SDimitry Andric     SmallVector<CCValAssign, 16> RVLocs;
22160b57cec5SDimitry Andric     CCState CCInfo(CC, false, *FuncInfo.MF, RVLocs, *Context);
22170b57cec5SDimitry Andric     CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, false));
22180b57cec5SDimitry Andric     if (RVLocs.size() >= 2 && RetVT != MVT::f64)
22190b57cec5SDimitry Andric       return false;
22200b57cec5SDimitry Andric   }
22210b57cec5SDimitry Andric 
22220b57cec5SDimitry Andric   // Set up the argument vectors.
22230b57cec5SDimitry Andric   SmallVector<Value*, 8> Args;
22248bcb0991SDimitry Andric   SmallVector<Register, 8> ArgRegs;
22250b57cec5SDimitry Andric   SmallVector<MVT, 8> ArgVTs;
22260b57cec5SDimitry Andric   SmallVector<ISD::ArgFlagsTy, 8> ArgFlags;
22270b57cec5SDimitry Andric   Args.reserve(I->getNumOperands());
22280b57cec5SDimitry Andric   ArgRegs.reserve(I->getNumOperands());
22290b57cec5SDimitry Andric   ArgVTs.reserve(I->getNumOperands());
22300b57cec5SDimitry Andric   ArgFlags.reserve(I->getNumOperands());
22310b57cec5SDimitry Andric   for (Value *Op :  I->operands()) {
223204eeddc0SDimitry Andric     Register Arg = getRegForValue(Op);
22330b57cec5SDimitry Andric     if (Arg == 0) return false;
22340b57cec5SDimitry Andric 
22350b57cec5SDimitry Andric     Type *ArgTy = Op->getType();
22360b57cec5SDimitry Andric     MVT ArgVT;
22370b57cec5SDimitry Andric     if (!isTypeLegal(ArgTy, ArgVT)) return false;
22380b57cec5SDimitry Andric 
22390b57cec5SDimitry Andric     ISD::ArgFlagsTy Flags;
22405ffd83dbSDimitry Andric     Flags.setOrigAlign(DL.getABITypeAlign(ArgTy));
22410b57cec5SDimitry Andric 
22420b57cec5SDimitry Andric     Args.push_back(Op);
22430b57cec5SDimitry Andric     ArgRegs.push_back(Arg);
22440b57cec5SDimitry Andric     ArgVTs.push_back(ArgVT);
22450b57cec5SDimitry Andric     ArgFlags.push_back(Flags);
22460b57cec5SDimitry Andric   }
22470b57cec5SDimitry Andric 
22480b57cec5SDimitry Andric   // Handle the arguments now that we've gotten them.
22498bcb0991SDimitry Andric   SmallVector<Register, 4> RegArgs;
22500b57cec5SDimitry Andric   unsigned NumBytes;
22510b57cec5SDimitry Andric   if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags,
22520b57cec5SDimitry Andric                        RegArgs, CC, NumBytes, false))
22530b57cec5SDimitry Andric     return false;
22540b57cec5SDimitry Andric 
22558bcb0991SDimitry Andric   Register CalleeReg;
22560b57cec5SDimitry Andric   if (Subtarget->genLongCalls()) {
22570b57cec5SDimitry Andric     CalleeReg = getLibcallReg(TLI.getLibcallName(Call));
22580b57cec5SDimitry Andric     if (CalleeReg == 0) return false;
22590b57cec5SDimitry Andric   }
22600b57cec5SDimitry Andric 
22610b57cec5SDimitry Andric   // Issue the call.
22620b57cec5SDimitry Andric   unsigned CallOpc = ARMSelectCallOp(Subtarget->genLongCalls());
22630b57cec5SDimitry Andric   MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
2264bdd1243dSDimitry Andric                                     MIMD, TII.get(CallOpc));
22650b57cec5SDimitry Andric   // BL / BLX don't take a predicate, but tBL / tBLX do.
22660b57cec5SDimitry Andric   if (isThumb2)
22670b57cec5SDimitry Andric     MIB.add(predOps(ARMCC::AL));
2268e8d8bef9SDimitry Andric   if (Subtarget->genLongCalls()) {
2269e8d8bef9SDimitry Andric     CalleeReg =
2270e8d8bef9SDimitry Andric         constrainOperandRegClass(TII.get(CallOpc), CalleeReg, isThumb2 ? 2 : 0);
22710b57cec5SDimitry Andric     MIB.addReg(CalleeReg);
2272e8d8bef9SDimitry Andric   } else
22730b57cec5SDimitry Andric     MIB.addExternalSymbol(TLI.getLibcallName(Call));
22740b57cec5SDimitry Andric 
22750b57cec5SDimitry Andric   // Add implicit physical register uses to the call.
22768bcb0991SDimitry Andric   for (Register R : RegArgs)
22770b57cec5SDimitry Andric     MIB.addReg(R, RegState::Implicit);
22780b57cec5SDimitry Andric 
22790b57cec5SDimitry Andric   // Add a register mask with the call-preserved registers.
22800b57cec5SDimitry Andric   // Proper defs for return values will be added by setPhysRegsDeadExcept().
22810b57cec5SDimitry Andric   MIB.addRegMask(TRI.getCallPreservedMask(*FuncInfo.MF, CC));
22820b57cec5SDimitry Andric 
22830b57cec5SDimitry Andric   // Finish off the call including any return values.
22848bcb0991SDimitry Andric   SmallVector<Register, 4> UsedRegs;
22850b57cec5SDimitry Andric   if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, false)) return false;
22860b57cec5SDimitry Andric 
22870b57cec5SDimitry Andric   // Set all unused physreg defs as dead.
22880b57cec5SDimitry Andric   static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI);
22890b57cec5SDimitry Andric 
22900b57cec5SDimitry Andric   return true;
22910b57cec5SDimitry Andric }
22920b57cec5SDimitry Andric 
22930b57cec5SDimitry Andric bool ARMFastISel::SelectCall(const Instruction *I,
22940b57cec5SDimitry Andric                              const char *IntrMemName = nullptr) {
22950b57cec5SDimitry Andric   const CallInst *CI = cast<CallInst>(I);
22965ffd83dbSDimitry Andric   const Value *Callee = CI->getCalledOperand();
22970b57cec5SDimitry Andric 
22980b57cec5SDimitry Andric   // Can't handle inline asm.
22990b57cec5SDimitry Andric   if (isa<InlineAsm>(Callee)) return false;
23000b57cec5SDimitry Andric 
23010b57cec5SDimitry Andric   // Allow SelectionDAG isel to handle tail calls.
23020b57cec5SDimitry Andric   if (CI->isTailCall()) return false;
23030b57cec5SDimitry Andric 
23040b57cec5SDimitry Andric   // Check the calling convention.
23055ffd83dbSDimitry Andric   CallingConv::ID CC = CI->getCallingConv();
23060b57cec5SDimitry Andric 
23070b57cec5SDimitry Andric   // TODO: Avoid some calling conventions?
23080b57cec5SDimitry Andric 
23095ffd83dbSDimitry Andric   FunctionType *FTy = CI->getFunctionType();
23100b57cec5SDimitry Andric   bool isVarArg = FTy->isVarArg();
23110b57cec5SDimitry Andric 
23120b57cec5SDimitry Andric   // Handle *simple* calls for now.
23130b57cec5SDimitry Andric   Type *RetTy = I->getType();
23140b57cec5SDimitry Andric   MVT RetVT;
23150b57cec5SDimitry Andric   if (RetTy->isVoidTy())
23160b57cec5SDimitry Andric     RetVT = MVT::isVoid;
23170b57cec5SDimitry Andric   else if (!isTypeLegal(RetTy, RetVT) && RetVT != MVT::i16 &&
23180b57cec5SDimitry Andric            RetVT != MVT::i8  && RetVT != MVT::i1)
23190b57cec5SDimitry Andric     return false;
23200b57cec5SDimitry Andric 
23210b57cec5SDimitry Andric   // Can't handle non-double multi-reg retvals.
23220b57cec5SDimitry Andric   if (RetVT != MVT::isVoid && RetVT != MVT::i1 && RetVT != MVT::i8 &&
23230b57cec5SDimitry Andric       RetVT != MVT::i16 && RetVT != MVT::i32) {
23240b57cec5SDimitry Andric     SmallVector<CCValAssign, 16> RVLocs;
23250b57cec5SDimitry Andric     CCState CCInfo(CC, isVarArg, *FuncInfo.MF, RVLocs, *Context);
23260b57cec5SDimitry Andric     CCInfo.AnalyzeCallResult(RetVT, CCAssignFnForCall(CC, true, isVarArg));
23270b57cec5SDimitry Andric     if (RVLocs.size() >= 2 && RetVT != MVT::f64)
23280b57cec5SDimitry Andric       return false;
23290b57cec5SDimitry Andric   }
23300b57cec5SDimitry Andric 
23310b57cec5SDimitry Andric   // Set up the argument vectors.
23320b57cec5SDimitry Andric   SmallVector<Value*, 8> Args;
23338bcb0991SDimitry Andric   SmallVector<Register, 8> ArgRegs;
23340b57cec5SDimitry Andric   SmallVector<MVT, 8> ArgVTs;
23350b57cec5SDimitry Andric   SmallVector<ISD::ArgFlagsTy, 8> ArgFlags;
23365ffd83dbSDimitry Andric   unsigned arg_size = CI->arg_size();
23370b57cec5SDimitry Andric   Args.reserve(arg_size);
23380b57cec5SDimitry Andric   ArgRegs.reserve(arg_size);
23390b57cec5SDimitry Andric   ArgVTs.reserve(arg_size);
23400b57cec5SDimitry Andric   ArgFlags.reserve(arg_size);
23415ffd83dbSDimitry Andric   for (auto ArgI = CI->arg_begin(), ArgE = CI->arg_end(); ArgI != ArgE; ++ArgI) {
23420b57cec5SDimitry Andric     // If we're lowering a memory intrinsic instead of a regular call, skip the
23430b57cec5SDimitry Andric     // last argument, which shouldn't be passed to the underlying function.
23445ffd83dbSDimitry Andric     if (IntrMemName && ArgE - ArgI <= 1)
23450b57cec5SDimitry Andric       break;
23460b57cec5SDimitry Andric 
23470b57cec5SDimitry Andric     ISD::ArgFlagsTy Flags;
23485ffd83dbSDimitry Andric     unsigned ArgIdx = ArgI - CI->arg_begin();
23495ffd83dbSDimitry Andric     if (CI->paramHasAttr(ArgIdx, Attribute::SExt))
23500b57cec5SDimitry Andric       Flags.setSExt();
23515ffd83dbSDimitry Andric     if (CI->paramHasAttr(ArgIdx, Attribute::ZExt))
23520b57cec5SDimitry Andric       Flags.setZExt();
23530b57cec5SDimitry Andric 
23540b57cec5SDimitry Andric     // FIXME: Only handle *easy* calls for now.
23555ffd83dbSDimitry Andric     if (CI->paramHasAttr(ArgIdx, Attribute::InReg) ||
23565ffd83dbSDimitry Andric         CI->paramHasAttr(ArgIdx, Attribute::StructRet) ||
23575ffd83dbSDimitry Andric         CI->paramHasAttr(ArgIdx, Attribute::SwiftSelf) ||
23585ffd83dbSDimitry Andric         CI->paramHasAttr(ArgIdx, Attribute::SwiftError) ||
23595ffd83dbSDimitry Andric         CI->paramHasAttr(ArgIdx, Attribute::Nest) ||
23605ffd83dbSDimitry Andric         CI->paramHasAttr(ArgIdx, Attribute::ByVal))
23610b57cec5SDimitry Andric       return false;
23620b57cec5SDimitry Andric 
23635ffd83dbSDimitry Andric     Type *ArgTy = (*ArgI)->getType();
23640b57cec5SDimitry Andric     MVT ArgVT;
23650b57cec5SDimitry Andric     if (!isTypeLegal(ArgTy, ArgVT) && ArgVT != MVT::i16 && ArgVT != MVT::i8 &&
23660b57cec5SDimitry Andric         ArgVT != MVT::i1)
23670b57cec5SDimitry Andric       return false;
23680b57cec5SDimitry Andric 
23695ffd83dbSDimitry Andric     Register Arg = getRegForValue(*ArgI);
23708bcb0991SDimitry Andric     if (!Arg.isValid())
23710b57cec5SDimitry Andric       return false;
23720b57cec5SDimitry Andric 
23735ffd83dbSDimitry Andric     Flags.setOrigAlign(DL.getABITypeAlign(ArgTy));
23740b57cec5SDimitry Andric 
23755ffd83dbSDimitry Andric     Args.push_back(*ArgI);
23760b57cec5SDimitry Andric     ArgRegs.push_back(Arg);
23770b57cec5SDimitry Andric     ArgVTs.push_back(ArgVT);
23780b57cec5SDimitry Andric     ArgFlags.push_back(Flags);
23790b57cec5SDimitry Andric   }
23800b57cec5SDimitry Andric 
23810b57cec5SDimitry Andric   // Handle the arguments now that we've gotten them.
23828bcb0991SDimitry Andric   SmallVector<Register, 4> RegArgs;
23830b57cec5SDimitry Andric   unsigned NumBytes;
23840b57cec5SDimitry Andric   if (!ProcessCallArgs(Args, ArgRegs, ArgVTs, ArgFlags,
23850b57cec5SDimitry Andric                        RegArgs, CC, NumBytes, isVarArg))
23860b57cec5SDimitry Andric     return false;
23870b57cec5SDimitry Andric 
23880b57cec5SDimitry Andric   bool UseReg = false;
23890b57cec5SDimitry Andric   const GlobalValue *GV = dyn_cast<GlobalValue>(Callee);
23900b57cec5SDimitry Andric   if (!GV || Subtarget->genLongCalls()) UseReg = true;
23910b57cec5SDimitry Andric 
23928bcb0991SDimitry Andric   Register CalleeReg;
23930b57cec5SDimitry Andric   if (UseReg) {
23940b57cec5SDimitry Andric     if (IntrMemName)
23950b57cec5SDimitry Andric       CalleeReg = getLibcallReg(IntrMemName);
23960b57cec5SDimitry Andric     else
23970b57cec5SDimitry Andric       CalleeReg = getRegForValue(Callee);
23980b57cec5SDimitry Andric 
23990b57cec5SDimitry Andric     if (CalleeReg == 0) return false;
24000b57cec5SDimitry Andric   }
24010b57cec5SDimitry Andric 
24020b57cec5SDimitry Andric   // Issue the call.
24030b57cec5SDimitry Andric   unsigned CallOpc = ARMSelectCallOp(UseReg);
24040b57cec5SDimitry Andric   MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
2405bdd1243dSDimitry Andric                                     MIMD, TII.get(CallOpc));
24060b57cec5SDimitry Andric 
24070b57cec5SDimitry Andric   // ARM calls don't take a predicate, but tBL / tBLX do.
24080b57cec5SDimitry Andric   if(isThumb2)
24090b57cec5SDimitry Andric     MIB.add(predOps(ARMCC::AL));
2410e8d8bef9SDimitry Andric   if (UseReg) {
2411e8d8bef9SDimitry Andric     CalleeReg =
2412e8d8bef9SDimitry Andric         constrainOperandRegClass(TII.get(CallOpc), CalleeReg, isThumb2 ? 2 : 0);
24130b57cec5SDimitry Andric     MIB.addReg(CalleeReg);
2414e8d8bef9SDimitry Andric   } else if (!IntrMemName)
24150b57cec5SDimitry Andric     MIB.addGlobalAddress(GV, 0, 0);
24160b57cec5SDimitry Andric   else
24170b57cec5SDimitry Andric     MIB.addExternalSymbol(IntrMemName, 0);
24180b57cec5SDimitry Andric 
24190b57cec5SDimitry Andric   // Add implicit physical register uses to the call.
24208bcb0991SDimitry Andric   for (Register R : RegArgs)
24210b57cec5SDimitry Andric     MIB.addReg(R, RegState::Implicit);
24220b57cec5SDimitry Andric 
24230b57cec5SDimitry Andric   // Add a register mask with the call-preserved registers.
24240b57cec5SDimitry Andric   // Proper defs for return values will be added by setPhysRegsDeadExcept().
24250b57cec5SDimitry Andric   MIB.addRegMask(TRI.getCallPreservedMask(*FuncInfo.MF, CC));
24260b57cec5SDimitry Andric 
24270b57cec5SDimitry Andric   // Finish off the call including any return values.
24288bcb0991SDimitry Andric   SmallVector<Register, 4> UsedRegs;
24290b57cec5SDimitry Andric   if (!FinishCall(RetVT, UsedRegs, I, CC, NumBytes, isVarArg))
24300b57cec5SDimitry Andric     return false;
24310b57cec5SDimitry Andric 
24320b57cec5SDimitry Andric   // Set all unused physreg defs as dead.
24330b57cec5SDimitry Andric   static_cast<MachineInstr *>(MIB)->setPhysRegsDeadExcept(UsedRegs, TRI);
24340b57cec5SDimitry Andric 
24350b57cec5SDimitry Andric   return true;
24360b57cec5SDimitry Andric }
24370b57cec5SDimitry Andric 
24380b57cec5SDimitry Andric bool ARMFastISel::ARMIsMemCpySmall(uint64_t Len) {
24390b57cec5SDimitry Andric   return Len <= 16;
24400b57cec5SDimitry Andric }
24410b57cec5SDimitry Andric 
2442bdd1243dSDimitry Andric bool ARMFastISel::ARMTryEmitSmallMemCpy(Address Dest, Address Src, uint64_t Len,
2443bdd1243dSDimitry Andric                                         MaybeAlign Alignment) {
24440b57cec5SDimitry Andric   // Make sure we don't bloat code by inlining very large memcpy's.
24450b57cec5SDimitry Andric   if (!ARMIsMemCpySmall(Len))
24460b57cec5SDimitry Andric     return false;
24470b57cec5SDimitry Andric 
24480b57cec5SDimitry Andric   while (Len) {
24490b57cec5SDimitry Andric     MVT VT;
2450bdd1243dSDimitry Andric     if (!Alignment || *Alignment >= 4) {
24510b57cec5SDimitry Andric       if (Len >= 4)
24520b57cec5SDimitry Andric         VT = MVT::i32;
24530b57cec5SDimitry Andric       else if (Len >= 2)
24540b57cec5SDimitry Andric         VT = MVT::i16;
24550b57cec5SDimitry Andric       else {
24560b57cec5SDimitry Andric         assert(Len == 1 && "Expected a length of 1!");
24570b57cec5SDimitry Andric         VT = MVT::i8;
24580b57cec5SDimitry Andric       }
24590b57cec5SDimitry Andric     } else {
2460bdd1243dSDimitry Andric       assert(Alignment && "Alignment is set in this branch");
24610b57cec5SDimitry Andric       // Bound based on alignment.
2462bdd1243dSDimitry Andric       if (Len >= 2 && *Alignment == 2)
24630b57cec5SDimitry Andric         VT = MVT::i16;
24640b57cec5SDimitry Andric       else {
24650b57cec5SDimitry Andric         VT = MVT::i8;
24660b57cec5SDimitry Andric       }
24670b57cec5SDimitry Andric     }
24680b57cec5SDimitry Andric 
24690b57cec5SDimitry Andric     bool RV;
24708bcb0991SDimitry Andric     Register ResultReg;
24710b57cec5SDimitry Andric     RV = ARMEmitLoad(VT, ResultReg, Src);
24720b57cec5SDimitry Andric     assert(RV && "Should be able to handle this load.");
24730b57cec5SDimitry Andric     RV = ARMEmitStore(VT, ResultReg, Dest);
24740b57cec5SDimitry Andric     assert(RV && "Should be able to handle this store.");
24750b57cec5SDimitry Andric     (void)RV;
24760b57cec5SDimitry Andric 
24770b57cec5SDimitry Andric     unsigned Size = VT.getSizeInBits()/8;
24780b57cec5SDimitry Andric     Len -= Size;
24790b57cec5SDimitry Andric     Dest.Offset += Size;
24800b57cec5SDimitry Andric     Src.Offset += Size;
24810b57cec5SDimitry Andric   }
24820b57cec5SDimitry Andric 
24830b57cec5SDimitry Andric   return true;
24840b57cec5SDimitry Andric }
24850b57cec5SDimitry Andric 
24860b57cec5SDimitry Andric bool ARMFastISel::SelectIntrinsicCall(const IntrinsicInst &I) {
24870b57cec5SDimitry Andric   // FIXME: Handle more intrinsics.
24880b57cec5SDimitry Andric   switch (I.getIntrinsicID()) {
24890b57cec5SDimitry Andric   default: return false;
24900b57cec5SDimitry Andric   case Intrinsic::frameaddress: {
24910b57cec5SDimitry Andric     MachineFrameInfo &MFI = FuncInfo.MF->getFrameInfo();
24920b57cec5SDimitry Andric     MFI.setFrameAddressIsTaken(true);
24930b57cec5SDimitry Andric 
24940b57cec5SDimitry Andric     unsigned LdrOpc = isThumb2 ? ARM::t2LDRi12 : ARM::LDRi12;
24950b57cec5SDimitry Andric     const TargetRegisterClass *RC = isThumb2 ? &ARM::tGPRRegClass
24960b57cec5SDimitry Andric                                              : &ARM::GPRRegClass;
24970b57cec5SDimitry Andric 
24980b57cec5SDimitry Andric     const ARMBaseRegisterInfo *RegInfo =
24990b57cec5SDimitry Andric         static_cast<const ARMBaseRegisterInfo *>(Subtarget->getRegisterInfo());
25008bcb0991SDimitry Andric     Register FramePtr = RegInfo->getFrameRegister(*(FuncInfo.MF));
25010b57cec5SDimitry Andric     unsigned SrcReg = FramePtr;
25020b57cec5SDimitry Andric 
25030b57cec5SDimitry Andric     // Recursively load frame address
25040b57cec5SDimitry Andric     // ldr r0 [fp]
25050b57cec5SDimitry Andric     // ldr r0 [r0]
25060b57cec5SDimitry Andric     // ldr r0 [r0]
25070b57cec5SDimitry Andric     // ...
25080b57cec5SDimitry Andric     unsigned DestReg;
25090b57cec5SDimitry Andric     unsigned Depth = cast<ConstantInt>(I.getOperand(0))->getZExtValue();
25100b57cec5SDimitry Andric     while (Depth--) {
25110b57cec5SDimitry Andric       DestReg = createResultReg(RC);
2512bdd1243dSDimitry Andric       AddOptionalDefs(BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
25130b57cec5SDimitry Andric                               TII.get(LdrOpc), DestReg)
25140b57cec5SDimitry Andric                       .addReg(SrcReg).addImm(0));
25150b57cec5SDimitry Andric       SrcReg = DestReg;
25160b57cec5SDimitry Andric     }
25170b57cec5SDimitry Andric     updateValueMap(&I, SrcReg);
25180b57cec5SDimitry Andric     return true;
25190b57cec5SDimitry Andric   }
25200b57cec5SDimitry Andric   case Intrinsic::memcpy:
25210b57cec5SDimitry Andric   case Intrinsic::memmove: {
25220b57cec5SDimitry Andric     const MemTransferInst &MTI = cast<MemTransferInst>(I);
25230b57cec5SDimitry Andric     // Don't handle volatile.
25240b57cec5SDimitry Andric     if (MTI.isVolatile())
25250b57cec5SDimitry Andric       return false;
25260b57cec5SDimitry Andric 
25270b57cec5SDimitry Andric     // Disable inlining for memmove before calls to ComputeAddress.  Otherwise,
25280b57cec5SDimitry Andric     // we would emit dead code because we don't currently handle memmoves.
25290b57cec5SDimitry Andric     bool isMemCpy = (I.getIntrinsicID() == Intrinsic::memcpy);
25300b57cec5SDimitry Andric     if (isa<ConstantInt>(MTI.getLength()) && isMemCpy) {
25310b57cec5SDimitry Andric       // Small memcpy's are common enough that we want to do them without a call
25320b57cec5SDimitry Andric       // if possible.
25330b57cec5SDimitry Andric       uint64_t Len = cast<ConstantInt>(MTI.getLength())->getZExtValue();
25340b57cec5SDimitry Andric       if (ARMIsMemCpySmall(Len)) {
25350b57cec5SDimitry Andric         Address Dest, Src;
25360b57cec5SDimitry Andric         if (!ARMComputeAddress(MTI.getRawDest(), Dest) ||
25370b57cec5SDimitry Andric             !ARMComputeAddress(MTI.getRawSource(), Src))
25380b57cec5SDimitry Andric           return false;
2539bdd1243dSDimitry Andric         MaybeAlign Alignment;
2540bdd1243dSDimitry Andric         if (MTI.getDestAlign() || MTI.getSourceAlign())
2541bdd1243dSDimitry Andric           Alignment = std::min(MTI.getDestAlign().valueOrOne(),
2542bdd1243dSDimitry Andric                                MTI.getSourceAlign().valueOrOne());
25430b57cec5SDimitry Andric         if (ARMTryEmitSmallMemCpy(Dest, Src, Len, Alignment))
25440b57cec5SDimitry Andric           return true;
25450b57cec5SDimitry Andric       }
25460b57cec5SDimitry Andric     }
25470b57cec5SDimitry Andric 
25480b57cec5SDimitry Andric     if (!MTI.getLength()->getType()->isIntegerTy(32))
25490b57cec5SDimitry Andric       return false;
25500b57cec5SDimitry Andric 
25510b57cec5SDimitry Andric     if (MTI.getSourceAddressSpace() > 255 || MTI.getDestAddressSpace() > 255)
25520b57cec5SDimitry Andric       return false;
25530b57cec5SDimitry Andric 
25540b57cec5SDimitry Andric     const char *IntrMemName = isa<MemCpyInst>(I) ? "memcpy" : "memmove";
25550b57cec5SDimitry Andric     return SelectCall(&I, IntrMemName);
25560b57cec5SDimitry Andric   }
25570b57cec5SDimitry Andric   case Intrinsic::memset: {
25580b57cec5SDimitry Andric     const MemSetInst &MSI = cast<MemSetInst>(I);
25590b57cec5SDimitry Andric     // Don't handle volatile.
25600b57cec5SDimitry Andric     if (MSI.isVolatile())
25610b57cec5SDimitry Andric       return false;
25620b57cec5SDimitry Andric 
25630b57cec5SDimitry Andric     if (!MSI.getLength()->getType()->isIntegerTy(32))
25640b57cec5SDimitry Andric       return false;
25650b57cec5SDimitry Andric 
25660b57cec5SDimitry Andric     if (MSI.getDestAddressSpace() > 255)
25670b57cec5SDimitry Andric       return false;
25680b57cec5SDimitry Andric 
25690b57cec5SDimitry Andric     return SelectCall(&I, "memset");
25700b57cec5SDimitry Andric   }
25710b57cec5SDimitry Andric   case Intrinsic::trap: {
2572480093f4SDimitry Andric     unsigned Opcode;
2573480093f4SDimitry Andric     if (Subtarget->isThumb())
2574480093f4SDimitry Andric       Opcode = ARM::tTRAP;
2575480093f4SDimitry Andric     else
2576480093f4SDimitry Andric       Opcode = Subtarget->useNaClTrap() ? ARM::TRAPNaCl : ARM::TRAP;
2577bdd1243dSDimitry Andric     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opcode));
25780b57cec5SDimitry Andric     return true;
25790b57cec5SDimitry Andric   }
25800b57cec5SDimitry Andric   }
25810b57cec5SDimitry Andric }
25820b57cec5SDimitry Andric 
25830b57cec5SDimitry Andric bool ARMFastISel::SelectTrunc(const Instruction *I) {
25840b57cec5SDimitry Andric   // The high bits for a type smaller than the register size are assumed to be
25850b57cec5SDimitry Andric   // undefined.
25860b57cec5SDimitry Andric   Value *Op = I->getOperand(0);
25870b57cec5SDimitry Andric 
25880b57cec5SDimitry Andric   EVT SrcVT, DestVT;
25890b57cec5SDimitry Andric   SrcVT = TLI.getValueType(DL, Op->getType(), true);
25900b57cec5SDimitry Andric   DestVT = TLI.getValueType(DL, I->getType(), true);
25910b57cec5SDimitry Andric 
25920b57cec5SDimitry Andric   if (SrcVT != MVT::i32 && SrcVT != MVT::i16 && SrcVT != MVT::i8)
25930b57cec5SDimitry Andric     return false;
25940b57cec5SDimitry Andric   if (DestVT != MVT::i16 && DestVT != MVT::i8 && DestVT != MVT::i1)
25950b57cec5SDimitry Andric     return false;
25960b57cec5SDimitry Andric 
259704eeddc0SDimitry Andric   Register SrcReg = getRegForValue(Op);
25980b57cec5SDimitry Andric   if (!SrcReg) return false;
25990b57cec5SDimitry Andric 
26000b57cec5SDimitry Andric   // Because the high bits are undefined, a truncate doesn't generate
26010b57cec5SDimitry Andric   // any code.
26020b57cec5SDimitry Andric   updateValueMap(I, SrcReg);
26030b57cec5SDimitry Andric   return true;
26040b57cec5SDimitry Andric }
26050b57cec5SDimitry Andric 
26060b57cec5SDimitry Andric unsigned ARMFastISel::ARMEmitIntExt(MVT SrcVT, unsigned SrcReg, MVT DestVT,
26070b57cec5SDimitry Andric                                     bool isZExt) {
26080b57cec5SDimitry Andric   if (DestVT != MVT::i32 && DestVT != MVT::i16 && DestVT != MVT::i8)
26090b57cec5SDimitry Andric     return 0;
26100b57cec5SDimitry Andric   if (SrcVT != MVT::i16 && SrcVT != MVT::i8 && SrcVT != MVT::i1)
26110b57cec5SDimitry Andric     return 0;
26120b57cec5SDimitry Andric 
26130b57cec5SDimitry Andric   // Table of which combinations can be emitted as a single instruction,
26140b57cec5SDimitry Andric   // and which will require two.
26150b57cec5SDimitry Andric   static const uint8_t isSingleInstrTbl[3][2][2][2] = {
26160b57cec5SDimitry Andric     //            ARM                     Thumb
26170b57cec5SDimitry Andric     //           !hasV6Ops  hasV6Ops     !hasV6Ops  hasV6Ops
26180b57cec5SDimitry Andric     //    ext:     s  z      s  z          s  z      s  z
26190b57cec5SDimitry Andric     /*  1 */ { { { 0, 1 }, { 0, 1 } }, { { 0, 0 }, { 0, 1 } } },
26200b57cec5SDimitry Andric     /*  8 */ { { { 0, 1 }, { 1, 1 } }, { { 0, 0 }, { 1, 1 } } },
26210b57cec5SDimitry Andric     /* 16 */ { { { 0, 0 }, { 1, 1 } }, { { 0, 0 }, { 1, 1 } } }
26220b57cec5SDimitry Andric   };
26230b57cec5SDimitry Andric 
26240b57cec5SDimitry Andric   // Target registers for:
26250b57cec5SDimitry Andric   //  - For ARM can never be PC.
26260b57cec5SDimitry Andric   //  - For 16-bit Thumb are restricted to lower 8 registers.
26270b57cec5SDimitry Andric   //  - For 32-bit Thumb are restricted to non-SP and non-PC.
26280b57cec5SDimitry Andric   static const TargetRegisterClass *RCTbl[2][2] = {
26290b57cec5SDimitry Andric     // Instructions: Two                     Single
26300b57cec5SDimitry Andric     /* ARM      */ { &ARM::GPRnopcRegClass, &ARM::GPRnopcRegClass },
26310b57cec5SDimitry Andric     /* Thumb    */ { &ARM::tGPRRegClass,    &ARM::rGPRRegClass    }
26320b57cec5SDimitry Andric   };
26330b57cec5SDimitry Andric 
26340b57cec5SDimitry Andric   // Table governing the instruction(s) to be emitted.
26350b57cec5SDimitry Andric   static const struct InstructionTable {
26360b57cec5SDimitry Andric     uint32_t Opc   : 16;
26370b57cec5SDimitry Andric     uint32_t hasS  :  1; // Some instructions have an S bit, always set it to 0.
26380b57cec5SDimitry Andric     uint32_t Shift :  7; // For shift operand addressing mode, used by MOVsi.
26390b57cec5SDimitry Andric     uint32_t Imm   :  8; // All instructions have either a shift or a mask.
26400b57cec5SDimitry Andric   } IT[2][2][3][2] = {
26410b57cec5SDimitry Andric     { // Two instructions (first is left shift, second is in this table).
26420b57cec5SDimitry Andric       { // ARM                Opc           S  Shift             Imm
26430b57cec5SDimitry Andric         /*  1 bit sext */ { { ARM::MOVsi  , 1, ARM_AM::asr     ,  31 },
26440b57cec5SDimitry Andric         /*  1 bit zext */   { ARM::MOVsi  , 1, ARM_AM::lsr     ,  31 } },
26450b57cec5SDimitry Andric         /*  8 bit sext */ { { ARM::MOVsi  , 1, ARM_AM::asr     ,  24 },
26460b57cec5SDimitry Andric         /*  8 bit zext */   { ARM::MOVsi  , 1, ARM_AM::lsr     ,  24 } },
26470b57cec5SDimitry Andric         /* 16 bit sext */ { { ARM::MOVsi  , 1, ARM_AM::asr     ,  16 },
26480b57cec5SDimitry Andric         /* 16 bit zext */   { ARM::MOVsi  , 1, ARM_AM::lsr     ,  16 } }
26490b57cec5SDimitry Andric       },
26500b57cec5SDimitry Andric       { // Thumb              Opc           S  Shift             Imm
26510b57cec5SDimitry Andric         /*  1 bit sext */ { { ARM::tASRri , 0, ARM_AM::no_shift,  31 },
26520b57cec5SDimitry Andric         /*  1 bit zext */   { ARM::tLSRri , 0, ARM_AM::no_shift,  31 } },
26530b57cec5SDimitry Andric         /*  8 bit sext */ { { ARM::tASRri , 0, ARM_AM::no_shift,  24 },
26540b57cec5SDimitry Andric         /*  8 bit zext */   { ARM::tLSRri , 0, ARM_AM::no_shift,  24 } },
26550b57cec5SDimitry Andric         /* 16 bit sext */ { { ARM::tASRri , 0, ARM_AM::no_shift,  16 },
26560b57cec5SDimitry Andric         /* 16 bit zext */   { ARM::tLSRri , 0, ARM_AM::no_shift,  16 } }
26570b57cec5SDimitry Andric       }
26580b57cec5SDimitry Andric     },
26590b57cec5SDimitry Andric     { // Single instruction.
26600b57cec5SDimitry Andric       { // ARM                Opc           S  Shift             Imm
26610b57cec5SDimitry Andric         /*  1 bit sext */ { { ARM::KILL   , 0, ARM_AM::no_shift,   0 },
26620b57cec5SDimitry Andric         /*  1 bit zext */   { ARM::ANDri  , 1, ARM_AM::no_shift,   1 } },
26630b57cec5SDimitry Andric         /*  8 bit sext */ { { ARM::SXTB   , 0, ARM_AM::no_shift,   0 },
26640b57cec5SDimitry Andric         /*  8 bit zext */   { ARM::ANDri  , 1, ARM_AM::no_shift, 255 } },
26650b57cec5SDimitry Andric         /* 16 bit sext */ { { ARM::SXTH   , 0, ARM_AM::no_shift,   0 },
26660b57cec5SDimitry Andric         /* 16 bit zext */   { ARM::UXTH   , 0, ARM_AM::no_shift,   0 } }
26670b57cec5SDimitry Andric       },
26680b57cec5SDimitry Andric       { // Thumb              Opc           S  Shift             Imm
26690b57cec5SDimitry Andric         /*  1 bit sext */ { { ARM::KILL   , 0, ARM_AM::no_shift,   0 },
26700b57cec5SDimitry Andric         /*  1 bit zext */   { ARM::t2ANDri, 1, ARM_AM::no_shift,   1 } },
26710b57cec5SDimitry Andric         /*  8 bit sext */ { { ARM::t2SXTB , 0, ARM_AM::no_shift,   0 },
26720b57cec5SDimitry Andric         /*  8 bit zext */   { ARM::t2ANDri, 1, ARM_AM::no_shift, 255 } },
26730b57cec5SDimitry Andric         /* 16 bit sext */ { { ARM::t2SXTH , 0, ARM_AM::no_shift,   0 },
26740b57cec5SDimitry Andric         /* 16 bit zext */   { ARM::t2UXTH , 0, ARM_AM::no_shift,   0 } }
26750b57cec5SDimitry Andric       }
26760b57cec5SDimitry Andric     }
26770b57cec5SDimitry Andric   };
26780b57cec5SDimitry Andric 
26790b57cec5SDimitry Andric   unsigned SrcBits = SrcVT.getSizeInBits();
26800b57cec5SDimitry Andric   unsigned DestBits = DestVT.getSizeInBits();
26810b57cec5SDimitry Andric   (void) DestBits;
26820b57cec5SDimitry Andric   assert((SrcBits < DestBits) && "can only extend to larger types");
26830b57cec5SDimitry Andric   assert((DestBits == 32 || DestBits == 16 || DestBits == 8) &&
26840b57cec5SDimitry Andric          "other sizes unimplemented");
26850b57cec5SDimitry Andric   assert((SrcBits == 16 || SrcBits == 8 || SrcBits == 1) &&
26860b57cec5SDimitry Andric          "other sizes unimplemented");
26870b57cec5SDimitry Andric 
26880b57cec5SDimitry Andric   bool hasV6Ops = Subtarget->hasV6Ops();
26890b57cec5SDimitry Andric   unsigned Bitness = SrcBits / 8;  // {1,8,16}=>{0,1,2}
26900b57cec5SDimitry Andric   assert((Bitness < 3) && "sanity-check table bounds");
26910b57cec5SDimitry Andric 
26920b57cec5SDimitry Andric   bool isSingleInstr = isSingleInstrTbl[Bitness][isThumb2][hasV6Ops][isZExt];
26930b57cec5SDimitry Andric   const TargetRegisterClass *RC = RCTbl[isThumb2][isSingleInstr];
26940b57cec5SDimitry Andric   const InstructionTable *ITP = &IT[isSingleInstr][isThumb2][Bitness][isZExt];
26950b57cec5SDimitry Andric   unsigned Opc = ITP->Opc;
26960b57cec5SDimitry Andric   assert(ARM::KILL != Opc && "Invalid table entry");
26970b57cec5SDimitry Andric   unsigned hasS = ITP->hasS;
26980b57cec5SDimitry Andric   ARM_AM::ShiftOpc Shift = (ARM_AM::ShiftOpc) ITP->Shift;
26990b57cec5SDimitry Andric   assert(((Shift == ARM_AM::no_shift) == (Opc != ARM::MOVsi)) &&
27000b57cec5SDimitry Andric          "only MOVsi has shift operand addressing mode");
27010b57cec5SDimitry Andric   unsigned Imm = ITP->Imm;
27020b57cec5SDimitry Andric 
27030b57cec5SDimitry Andric   // 16-bit Thumb instructions always set CPSR (unless they're in an IT block).
27040b57cec5SDimitry Andric   bool setsCPSR = &ARM::tGPRRegClass == RC;
27050b57cec5SDimitry Andric   unsigned LSLOpc = isThumb2 ? ARM::tLSLri : ARM::MOVsi;
27060b57cec5SDimitry Andric   unsigned ResultReg;
27070b57cec5SDimitry Andric   // MOVsi encodes shift and immediate in shift operand addressing mode.
27080b57cec5SDimitry Andric   // The following condition has the same value when emitting two
27090b57cec5SDimitry Andric   // instruction sequences: both are shifts.
27100b57cec5SDimitry Andric   bool ImmIsSO = (Shift != ARM_AM::no_shift);
27110b57cec5SDimitry Andric 
27120b57cec5SDimitry Andric   // Either one or two instructions are emitted.
27130b57cec5SDimitry Andric   // They're always of the form:
27140b57cec5SDimitry Andric   //   dst = in OP imm
27150b57cec5SDimitry Andric   // CPSR is set only by 16-bit Thumb instructions.
27160b57cec5SDimitry Andric   // Predicate, if any, is AL.
27170b57cec5SDimitry Andric   // S bit, if available, is always 0.
27180b57cec5SDimitry Andric   // When two are emitted the first's result will feed as the second's input,
27190b57cec5SDimitry Andric   // that value is then dead.
27200b57cec5SDimitry Andric   unsigned NumInstrsEmitted = isSingleInstr ? 1 : 2;
27210b57cec5SDimitry Andric   for (unsigned Instr = 0; Instr != NumInstrsEmitted; ++Instr) {
27220b57cec5SDimitry Andric     ResultReg = createResultReg(RC);
27230b57cec5SDimitry Andric     bool isLsl = (0 == Instr) && !isSingleInstr;
27240b57cec5SDimitry Andric     unsigned Opcode = isLsl ? LSLOpc : Opc;
27250b57cec5SDimitry Andric     ARM_AM::ShiftOpc ShiftAM = isLsl ? ARM_AM::lsl : Shift;
27260b57cec5SDimitry Andric     unsigned ImmEnc = ImmIsSO ? ARM_AM::getSORegOpc(ShiftAM, Imm) : Imm;
27270b57cec5SDimitry Andric     bool isKill = 1 == Instr;
27280b57cec5SDimitry Andric     MachineInstrBuilder MIB = BuildMI(
2729bdd1243dSDimitry Andric         *FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opcode), ResultReg);
27300b57cec5SDimitry Andric     if (setsCPSR)
27310b57cec5SDimitry Andric       MIB.addReg(ARM::CPSR, RegState::Define);
27320b57cec5SDimitry Andric     SrcReg = constrainOperandRegClass(TII.get(Opcode), SrcReg, 1 + setsCPSR);
27330b57cec5SDimitry Andric     MIB.addReg(SrcReg, isKill * RegState::Kill)
27340b57cec5SDimitry Andric         .addImm(ImmEnc)
27350b57cec5SDimitry Andric         .add(predOps(ARMCC::AL));
27360b57cec5SDimitry Andric     if (hasS)
27370b57cec5SDimitry Andric       MIB.add(condCodeOp());
27380b57cec5SDimitry Andric     // Second instruction consumes the first's result.
27390b57cec5SDimitry Andric     SrcReg = ResultReg;
27400b57cec5SDimitry Andric   }
27410b57cec5SDimitry Andric 
27420b57cec5SDimitry Andric   return ResultReg;
27430b57cec5SDimitry Andric }
27440b57cec5SDimitry Andric 
27450b57cec5SDimitry Andric bool ARMFastISel::SelectIntExt(const Instruction *I) {
27460b57cec5SDimitry Andric   // On ARM, in general, integer casts don't involve legal types; this code
27470b57cec5SDimitry Andric   // handles promotable integers.
27480b57cec5SDimitry Andric   Type *DestTy = I->getType();
27490b57cec5SDimitry Andric   Value *Src = I->getOperand(0);
27500b57cec5SDimitry Andric   Type *SrcTy = Src->getType();
27510b57cec5SDimitry Andric 
27520b57cec5SDimitry Andric   bool isZExt = isa<ZExtInst>(I);
275304eeddc0SDimitry Andric   Register SrcReg = getRegForValue(Src);
27540b57cec5SDimitry Andric   if (!SrcReg) return false;
27550b57cec5SDimitry Andric 
27560b57cec5SDimitry Andric   EVT SrcEVT, DestEVT;
27570b57cec5SDimitry Andric   SrcEVT = TLI.getValueType(DL, SrcTy, true);
27580b57cec5SDimitry Andric   DestEVT = TLI.getValueType(DL, DestTy, true);
27590b57cec5SDimitry Andric   if (!SrcEVT.isSimple()) return false;
27600b57cec5SDimitry Andric   if (!DestEVT.isSimple()) return false;
27610b57cec5SDimitry Andric 
27620b57cec5SDimitry Andric   MVT SrcVT = SrcEVT.getSimpleVT();
27630b57cec5SDimitry Andric   MVT DestVT = DestEVT.getSimpleVT();
27640b57cec5SDimitry Andric   unsigned ResultReg = ARMEmitIntExt(SrcVT, SrcReg, DestVT, isZExt);
27650b57cec5SDimitry Andric   if (ResultReg == 0) return false;
27660b57cec5SDimitry Andric   updateValueMap(I, ResultReg);
27670b57cec5SDimitry Andric   return true;
27680b57cec5SDimitry Andric }
27690b57cec5SDimitry Andric 
27700b57cec5SDimitry Andric bool ARMFastISel::SelectShift(const Instruction *I,
27710b57cec5SDimitry Andric                               ARM_AM::ShiftOpc ShiftTy) {
27720b57cec5SDimitry Andric   // We handle thumb2 mode by target independent selector
27730b57cec5SDimitry Andric   // or SelectionDAG ISel.
27740b57cec5SDimitry Andric   if (isThumb2)
27750b57cec5SDimitry Andric     return false;
27760b57cec5SDimitry Andric 
27770b57cec5SDimitry Andric   // Only handle i32 now.
27780b57cec5SDimitry Andric   EVT DestVT = TLI.getValueType(DL, I->getType(), true);
27790b57cec5SDimitry Andric   if (DestVT != MVT::i32)
27800b57cec5SDimitry Andric     return false;
27810b57cec5SDimitry Andric 
27820b57cec5SDimitry Andric   unsigned Opc = ARM::MOVsr;
27830b57cec5SDimitry Andric   unsigned ShiftImm;
27840b57cec5SDimitry Andric   Value *Src2Value = I->getOperand(1);
27850b57cec5SDimitry Andric   if (const ConstantInt *CI = dyn_cast<ConstantInt>(Src2Value)) {
27860b57cec5SDimitry Andric     ShiftImm = CI->getZExtValue();
27870b57cec5SDimitry Andric 
27880b57cec5SDimitry Andric     // Fall back to selection DAG isel if the shift amount
27890b57cec5SDimitry Andric     // is zero or greater than the width of the value type.
27900b57cec5SDimitry Andric     if (ShiftImm == 0 || ShiftImm >=32)
27910b57cec5SDimitry Andric       return false;
27920b57cec5SDimitry Andric 
27930b57cec5SDimitry Andric     Opc = ARM::MOVsi;
27940b57cec5SDimitry Andric   }
27950b57cec5SDimitry Andric 
27960b57cec5SDimitry Andric   Value *Src1Value = I->getOperand(0);
279704eeddc0SDimitry Andric   Register Reg1 = getRegForValue(Src1Value);
27980b57cec5SDimitry Andric   if (Reg1 == 0) return false;
27990b57cec5SDimitry Andric 
28000b57cec5SDimitry Andric   unsigned Reg2 = 0;
28010b57cec5SDimitry Andric   if (Opc == ARM::MOVsr) {
28020b57cec5SDimitry Andric     Reg2 = getRegForValue(Src2Value);
28030b57cec5SDimitry Andric     if (Reg2 == 0) return false;
28040b57cec5SDimitry Andric   }
28050b57cec5SDimitry Andric 
280604eeddc0SDimitry Andric   Register ResultReg = createResultReg(&ARM::GPRnopcRegClass);
28070b57cec5SDimitry Andric   if(ResultReg == 0) return false;
28080b57cec5SDimitry Andric 
2809bdd1243dSDimitry Andric   MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
28100b57cec5SDimitry Andric                                     TII.get(Opc), ResultReg)
28110b57cec5SDimitry Andric                             .addReg(Reg1);
28120b57cec5SDimitry Andric 
28130b57cec5SDimitry Andric   if (Opc == ARM::MOVsi)
28140b57cec5SDimitry Andric     MIB.addImm(ARM_AM::getSORegOpc(ShiftTy, ShiftImm));
28150b57cec5SDimitry Andric   else if (Opc == ARM::MOVsr) {
28160b57cec5SDimitry Andric     MIB.addReg(Reg2);
28170b57cec5SDimitry Andric     MIB.addImm(ARM_AM::getSORegOpc(ShiftTy, 0));
28180b57cec5SDimitry Andric   }
28190b57cec5SDimitry Andric 
28200b57cec5SDimitry Andric   AddOptionalDefs(MIB);
28210b57cec5SDimitry Andric   updateValueMap(I, ResultReg);
28220b57cec5SDimitry Andric   return true;
28230b57cec5SDimitry Andric }
28240b57cec5SDimitry Andric 
28250b57cec5SDimitry Andric // TODO: SoftFP support.
28260b57cec5SDimitry Andric bool ARMFastISel::fastSelectInstruction(const Instruction *I) {
28270b57cec5SDimitry Andric   switch (I->getOpcode()) {
28280b57cec5SDimitry Andric     case Instruction::Load:
28290b57cec5SDimitry Andric       return SelectLoad(I);
28300b57cec5SDimitry Andric     case Instruction::Store:
28310b57cec5SDimitry Andric       return SelectStore(I);
28320b57cec5SDimitry Andric     case Instruction::Br:
28330b57cec5SDimitry Andric       return SelectBranch(I);
28340b57cec5SDimitry Andric     case Instruction::IndirectBr:
28350b57cec5SDimitry Andric       return SelectIndirectBr(I);
28360b57cec5SDimitry Andric     case Instruction::ICmp:
28370b57cec5SDimitry Andric     case Instruction::FCmp:
28380b57cec5SDimitry Andric       return SelectCmp(I);
28390b57cec5SDimitry Andric     case Instruction::FPExt:
28400b57cec5SDimitry Andric       return SelectFPExt(I);
28410b57cec5SDimitry Andric     case Instruction::FPTrunc:
28420b57cec5SDimitry Andric       return SelectFPTrunc(I);
28430b57cec5SDimitry Andric     case Instruction::SIToFP:
28440b57cec5SDimitry Andric       return SelectIToFP(I, /*isSigned*/ true);
28450b57cec5SDimitry Andric     case Instruction::UIToFP:
28460b57cec5SDimitry Andric       return SelectIToFP(I, /*isSigned*/ false);
28470b57cec5SDimitry Andric     case Instruction::FPToSI:
28480b57cec5SDimitry Andric       return SelectFPToI(I, /*isSigned*/ true);
28490b57cec5SDimitry Andric     case Instruction::FPToUI:
28500b57cec5SDimitry Andric       return SelectFPToI(I, /*isSigned*/ false);
28510b57cec5SDimitry Andric     case Instruction::Add:
28520b57cec5SDimitry Andric       return SelectBinaryIntOp(I, ISD::ADD);
28530b57cec5SDimitry Andric     case Instruction::Or:
28540b57cec5SDimitry Andric       return SelectBinaryIntOp(I, ISD::OR);
28550b57cec5SDimitry Andric     case Instruction::Sub:
28560b57cec5SDimitry Andric       return SelectBinaryIntOp(I, ISD::SUB);
28570b57cec5SDimitry Andric     case Instruction::FAdd:
28580b57cec5SDimitry Andric       return SelectBinaryFPOp(I, ISD::FADD);
28590b57cec5SDimitry Andric     case Instruction::FSub:
28600b57cec5SDimitry Andric       return SelectBinaryFPOp(I, ISD::FSUB);
28610b57cec5SDimitry Andric     case Instruction::FMul:
28620b57cec5SDimitry Andric       return SelectBinaryFPOp(I, ISD::FMUL);
28630b57cec5SDimitry Andric     case Instruction::SDiv:
28640b57cec5SDimitry Andric       return SelectDiv(I, /*isSigned*/ true);
28650b57cec5SDimitry Andric     case Instruction::UDiv:
28660b57cec5SDimitry Andric       return SelectDiv(I, /*isSigned*/ false);
28670b57cec5SDimitry Andric     case Instruction::SRem:
28680b57cec5SDimitry Andric       return SelectRem(I, /*isSigned*/ true);
28690b57cec5SDimitry Andric     case Instruction::URem:
28700b57cec5SDimitry Andric       return SelectRem(I, /*isSigned*/ false);
28710b57cec5SDimitry Andric     case Instruction::Call:
28720b57cec5SDimitry Andric       if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
28730b57cec5SDimitry Andric         return SelectIntrinsicCall(*II);
28740b57cec5SDimitry Andric       return SelectCall(I);
28750b57cec5SDimitry Andric     case Instruction::Select:
28760b57cec5SDimitry Andric       return SelectSelect(I);
28770b57cec5SDimitry Andric     case Instruction::Ret:
28780b57cec5SDimitry Andric       return SelectRet(I);
28790b57cec5SDimitry Andric     case Instruction::Trunc:
28800b57cec5SDimitry Andric       return SelectTrunc(I);
28810b57cec5SDimitry Andric     case Instruction::ZExt:
28820b57cec5SDimitry Andric     case Instruction::SExt:
28830b57cec5SDimitry Andric       return SelectIntExt(I);
28840b57cec5SDimitry Andric     case Instruction::Shl:
28850b57cec5SDimitry Andric       return SelectShift(I, ARM_AM::lsl);
28860b57cec5SDimitry Andric     case Instruction::LShr:
28870b57cec5SDimitry Andric       return SelectShift(I, ARM_AM::lsr);
28880b57cec5SDimitry Andric     case Instruction::AShr:
28890b57cec5SDimitry Andric       return SelectShift(I, ARM_AM::asr);
28900b57cec5SDimitry Andric     default: break;
28910b57cec5SDimitry Andric   }
28920b57cec5SDimitry Andric   return false;
28930b57cec5SDimitry Andric }
28940b57cec5SDimitry Andric 
28950b57cec5SDimitry Andric // This table describes sign- and zero-extend instructions which can be
28960b57cec5SDimitry Andric // folded into a preceding load. All of these extends have an immediate
28970b57cec5SDimitry Andric // (sometimes a mask and sometimes a shift) that's applied after
28980b57cec5SDimitry Andric // extension.
28990b57cec5SDimitry Andric static const struct FoldableLoadExtendsStruct {
29000b57cec5SDimitry Andric   uint16_t Opc[2];  // ARM, Thumb.
29010b57cec5SDimitry Andric   uint8_t ExpectedImm;
29020b57cec5SDimitry Andric   uint8_t isZExt     : 1;
29030b57cec5SDimitry Andric   uint8_t ExpectedVT : 7;
29040b57cec5SDimitry Andric } FoldableLoadExtends[] = {
29050b57cec5SDimitry Andric   { { ARM::SXTH,  ARM::t2SXTH  },   0, 0, MVT::i16 },
29060b57cec5SDimitry Andric   { { ARM::UXTH,  ARM::t2UXTH  },   0, 1, MVT::i16 },
29070b57cec5SDimitry Andric   { { ARM::ANDri, ARM::t2ANDri }, 255, 1, MVT::i8  },
29080b57cec5SDimitry Andric   { { ARM::SXTB,  ARM::t2SXTB  },   0, 0, MVT::i8  },
29090b57cec5SDimitry Andric   { { ARM::UXTB,  ARM::t2UXTB  },   0, 1, MVT::i8  }
29100b57cec5SDimitry Andric };
29110b57cec5SDimitry Andric 
29120b57cec5SDimitry Andric /// The specified machine instr operand is a vreg, and that
29130b57cec5SDimitry Andric /// vreg is being provided by the specified load instruction.  If possible,
29140b57cec5SDimitry Andric /// try to fold the load as an operand to the instruction, returning true if
29150b57cec5SDimitry Andric /// successful.
29160b57cec5SDimitry Andric bool ARMFastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
29170b57cec5SDimitry Andric                                       const LoadInst *LI) {
29180b57cec5SDimitry Andric   // Verify we have a legal type before going any further.
29190b57cec5SDimitry Andric   MVT VT;
29200b57cec5SDimitry Andric   if (!isLoadTypeLegal(LI->getType(), VT))
29210b57cec5SDimitry Andric     return false;
29220b57cec5SDimitry Andric 
29230b57cec5SDimitry Andric   // Combine load followed by zero- or sign-extend.
29240b57cec5SDimitry Andric   // ldrb r1, [r0]       ldrb r1, [r0]
29250b57cec5SDimitry Andric   // uxtb r2, r1     =>
29260b57cec5SDimitry Andric   // mov  r3, r2         mov  r3, r1
29270b57cec5SDimitry Andric   if (MI->getNumOperands() < 3 || !MI->getOperand(2).isImm())
29280b57cec5SDimitry Andric     return false;
29290b57cec5SDimitry Andric   const uint64_t Imm = MI->getOperand(2).getImm();
29300b57cec5SDimitry Andric 
29310b57cec5SDimitry Andric   bool Found = false;
29320b57cec5SDimitry Andric   bool isZExt;
29330b57cec5SDimitry Andric   for (const FoldableLoadExtendsStruct &FLE : FoldableLoadExtends) {
29340b57cec5SDimitry Andric     if (FLE.Opc[isThumb2] == MI->getOpcode() &&
29350b57cec5SDimitry Andric         (uint64_t)FLE.ExpectedImm == Imm &&
29360b57cec5SDimitry Andric         MVT((MVT::SimpleValueType)FLE.ExpectedVT) == VT) {
29370b57cec5SDimitry Andric       Found = true;
29380b57cec5SDimitry Andric       isZExt = FLE.isZExt;
29390b57cec5SDimitry Andric     }
29400b57cec5SDimitry Andric   }
29410b57cec5SDimitry Andric   if (!Found) return false;
29420b57cec5SDimitry Andric 
29430b57cec5SDimitry Andric   // See if we can handle this address.
29440b57cec5SDimitry Andric   Address Addr;
29450b57cec5SDimitry Andric   if (!ARMComputeAddress(LI->getOperand(0), Addr)) return false;
29460b57cec5SDimitry Andric 
29478bcb0991SDimitry Andric   Register ResultReg = MI->getOperand(0).getReg();
294881ad6265SDimitry Andric   if (!ARMEmitLoad(VT, ResultReg, Addr, LI->getAlign(), isZExt, false))
29490b57cec5SDimitry Andric     return false;
29500b57cec5SDimitry Andric   MachineBasicBlock::iterator I(MI);
29510b57cec5SDimitry Andric   removeDeadCode(I, std::next(I));
29520b57cec5SDimitry Andric   return true;
29530b57cec5SDimitry Andric }
29540b57cec5SDimitry Andric 
29555ffd83dbSDimitry Andric unsigned ARMFastISel::ARMLowerPICELF(const GlobalValue *GV, MVT VT) {
2956*0fca6ea1SDimitry Andric   bool UseGOT_PREL = !GV->isDSOLocal();
29570b57cec5SDimitry Andric   LLVMContext *Context = &MF->getFunction().getContext();
29580b57cec5SDimitry Andric   unsigned ARMPCLabelIndex = AFI->createPICLabelUId();
29590b57cec5SDimitry Andric   unsigned PCAdj = Subtarget->isThumb() ? 4 : 8;
29600b57cec5SDimitry Andric   ARMConstantPoolValue *CPV = ARMConstantPoolConstant::Create(
29610b57cec5SDimitry Andric       GV, ARMPCLabelIndex, ARMCP::CPValue, PCAdj,
29620b57cec5SDimitry Andric       UseGOT_PREL ? ARMCP::GOT_PREL : ARMCP::no_modifier,
29630b57cec5SDimitry Andric       /*AddCurrentAddress=*/UseGOT_PREL);
29640b57cec5SDimitry Andric 
29655ffd83dbSDimitry Andric   Align ConstAlign =
29665f757f3fSDimitry Andric       MF->getDataLayout().getPrefTypeAlign(PointerType::get(*Context, 0));
29670b57cec5SDimitry Andric   unsigned Idx = MF->getConstantPool()->getConstantPoolIndex(CPV, ConstAlign);
29680b57cec5SDimitry Andric   MachineMemOperand *CPMMO =
29690b57cec5SDimitry Andric       MF->getMachineMemOperand(MachinePointerInfo::getConstantPool(*MF),
29705ffd83dbSDimitry Andric                                MachineMemOperand::MOLoad, 4, Align(4));
29710b57cec5SDimitry Andric 
29728bcb0991SDimitry Andric   Register TempReg = MF->getRegInfo().createVirtualRegister(&ARM::rGPRRegClass);
29730b57cec5SDimitry Andric   unsigned Opc = isThumb2 ? ARM::t2LDRpci : ARM::LDRcp;
29740b57cec5SDimitry Andric   MachineInstrBuilder MIB =
2975bdd1243dSDimitry Andric       BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), TempReg)
29760b57cec5SDimitry Andric           .addConstantPoolIndex(Idx)
29770b57cec5SDimitry Andric           .addMemOperand(CPMMO);
29780b57cec5SDimitry Andric   if (Opc == ARM::LDRcp)
29790b57cec5SDimitry Andric     MIB.addImm(0);
29800b57cec5SDimitry Andric   MIB.add(predOps(ARMCC::AL));
29810b57cec5SDimitry Andric 
29820b57cec5SDimitry Andric   // Fix the address by adding pc.
298304eeddc0SDimitry Andric   Register DestReg = createResultReg(TLI.getRegClassFor(VT));
29840b57cec5SDimitry Andric   Opc = Subtarget->isThumb() ? ARM::tPICADD : UseGOT_PREL ? ARM::PICLDR
29850b57cec5SDimitry Andric                                                           : ARM::PICADD;
29860b57cec5SDimitry Andric   DestReg = constrainOperandRegClass(TII.get(Opc), DestReg, 0);
2987bdd1243dSDimitry Andric   MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD, TII.get(Opc), DestReg)
29880b57cec5SDimitry Andric             .addReg(TempReg)
29890b57cec5SDimitry Andric             .addImm(ARMPCLabelIndex);
29900b57cec5SDimitry Andric 
29910b57cec5SDimitry Andric   if (!Subtarget->isThumb())
29920b57cec5SDimitry Andric     MIB.add(predOps(ARMCC::AL));
29930b57cec5SDimitry Andric 
29940b57cec5SDimitry Andric   if (UseGOT_PREL && Subtarget->isThumb()) {
299504eeddc0SDimitry Andric     Register NewDestReg = createResultReg(TLI.getRegClassFor(VT));
2996bdd1243dSDimitry Andric     MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
29970b57cec5SDimitry Andric                   TII.get(ARM::t2LDRi12), NewDestReg)
29980b57cec5SDimitry Andric               .addReg(DestReg)
29990b57cec5SDimitry Andric               .addImm(0);
30000b57cec5SDimitry Andric     DestReg = NewDestReg;
30010b57cec5SDimitry Andric     AddOptionalDefs(MIB);
30020b57cec5SDimitry Andric   }
30030b57cec5SDimitry Andric   return DestReg;
30040b57cec5SDimitry Andric }
30050b57cec5SDimitry Andric 
30060b57cec5SDimitry Andric bool ARMFastISel::fastLowerArguments() {
30070b57cec5SDimitry Andric   if (!FuncInfo.CanLowerReturn)
30080b57cec5SDimitry Andric     return false;
30090b57cec5SDimitry Andric 
30100b57cec5SDimitry Andric   const Function *F = FuncInfo.Fn;
30110b57cec5SDimitry Andric   if (F->isVarArg())
30120b57cec5SDimitry Andric     return false;
30130b57cec5SDimitry Andric 
30140b57cec5SDimitry Andric   CallingConv::ID CC = F->getCallingConv();
30150b57cec5SDimitry Andric   switch (CC) {
30160b57cec5SDimitry Andric   default:
30170b57cec5SDimitry Andric     return false;
30180b57cec5SDimitry Andric   case CallingConv::Fast:
30190b57cec5SDimitry Andric   case CallingConv::C:
30200b57cec5SDimitry Andric   case CallingConv::ARM_AAPCS_VFP:
30210b57cec5SDimitry Andric   case CallingConv::ARM_AAPCS:
30220b57cec5SDimitry Andric   case CallingConv::ARM_APCS:
30230b57cec5SDimitry Andric   case CallingConv::Swift:
3024fe6060f1SDimitry Andric   case CallingConv::SwiftTail:
30250b57cec5SDimitry Andric     break;
30260b57cec5SDimitry Andric   }
30270b57cec5SDimitry Andric 
30280b57cec5SDimitry Andric   // Only handle simple cases. i.e. Up to 4 i8/i16/i32 scalar arguments
30290b57cec5SDimitry Andric   // which are passed in r0 - r3.
30300b57cec5SDimitry Andric   for (const Argument &Arg : F->args()) {
30310b57cec5SDimitry Andric     if (Arg.getArgNo() >= 4)
30320b57cec5SDimitry Andric       return false;
30330b57cec5SDimitry Andric 
30340b57cec5SDimitry Andric     if (Arg.hasAttribute(Attribute::InReg) ||
30350b57cec5SDimitry Andric         Arg.hasAttribute(Attribute::StructRet) ||
30360b57cec5SDimitry Andric         Arg.hasAttribute(Attribute::SwiftSelf) ||
30370b57cec5SDimitry Andric         Arg.hasAttribute(Attribute::SwiftError) ||
30380b57cec5SDimitry Andric         Arg.hasAttribute(Attribute::ByVal))
30390b57cec5SDimitry Andric       return false;
30400b57cec5SDimitry Andric 
30410b57cec5SDimitry Andric     Type *ArgTy = Arg.getType();
30420b57cec5SDimitry Andric     if (ArgTy->isStructTy() || ArgTy->isArrayTy() || ArgTy->isVectorTy())
30430b57cec5SDimitry Andric       return false;
30440b57cec5SDimitry Andric 
30450b57cec5SDimitry Andric     EVT ArgVT = TLI.getValueType(DL, ArgTy);
30460b57cec5SDimitry Andric     if (!ArgVT.isSimple()) return false;
30470b57cec5SDimitry Andric     switch (ArgVT.getSimpleVT().SimpleTy) {
30480b57cec5SDimitry Andric     case MVT::i8:
30490b57cec5SDimitry Andric     case MVT::i16:
30500b57cec5SDimitry Andric     case MVT::i32:
30510b57cec5SDimitry Andric       break;
30520b57cec5SDimitry Andric     default:
30530b57cec5SDimitry Andric       return false;
30540b57cec5SDimitry Andric     }
30550b57cec5SDimitry Andric   }
30560b57cec5SDimitry Andric 
30570b57cec5SDimitry Andric   static const MCPhysReg GPRArgRegs[] = {
30580b57cec5SDimitry Andric     ARM::R0, ARM::R1, ARM::R2, ARM::R3
30590b57cec5SDimitry Andric   };
30600b57cec5SDimitry Andric 
30610b57cec5SDimitry Andric   const TargetRegisterClass *RC = &ARM::rGPRRegClass;
30620b57cec5SDimitry Andric   for (const Argument &Arg : F->args()) {
30630b57cec5SDimitry Andric     unsigned ArgNo = Arg.getArgNo();
30640b57cec5SDimitry Andric     unsigned SrcReg = GPRArgRegs[ArgNo];
306504eeddc0SDimitry Andric     Register DstReg = FuncInfo.MF->addLiveIn(SrcReg, RC);
30660b57cec5SDimitry Andric     // FIXME: Unfortunately it's necessary to emit a copy from the livein copy.
30670b57cec5SDimitry Andric     // Without this, EmitLiveInCopies may eliminate the livein if its only
30680b57cec5SDimitry Andric     // use is a bitcast (which isn't turned into an instruction).
306904eeddc0SDimitry Andric     Register ResultReg = createResultReg(RC);
3070bdd1243dSDimitry Andric     BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, MIMD,
30710b57cec5SDimitry Andric             TII.get(TargetOpcode::COPY),
30720b57cec5SDimitry Andric             ResultReg).addReg(DstReg, getKillRegState(true));
30730b57cec5SDimitry Andric     updateValueMap(&Arg, ResultReg);
30740b57cec5SDimitry Andric   }
30750b57cec5SDimitry Andric 
30760b57cec5SDimitry Andric   return true;
30770b57cec5SDimitry Andric }
30780b57cec5SDimitry Andric 
30790b57cec5SDimitry Andric namespace llvm {
30800b57cec5SDimitry Andric 
30810b57cec5SDimitry Andric   FastISel *ARM::createFastISel(FunctionLoweringInfo &funcInfo,
30820b57cec5SDimitry Andric                                 const TargetLibraryInfo *libInfo) {
30830b57cec5SDimitry Andric     if (funcInfo.MF->getSubtarget<ARMSubtarget>().useFastISel())
30840b57cec5SDimitry Andric       return new ARMFastISel(funcInfo, libInfo);
30850b57cec5SDimitry Andric 
30860b57cec5SDimitry Andric     return nullptr;
30870b57cec5SDimitry Andric   }
30880b57cec5SDimitry Andric 
30890b57cec5SDimitry Andric } // end namespace llvm
3090