1 //===-- lib/CodeGen/GlobalISel/CallLowering.cpp - Call lowering -----------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 /// 10 /// \file 11 /// This file implements some simple delegations needed for call lowering. 12 /// 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/CodeGen/GlobalISel/CallLowering.h" 16 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" 17 #include "llvm/CodeGen/MachineOperand.h" 18 #include "llvm/CodeGen/MachineRegisterInfo.h" 19 #include "llvm/IR/DataLayout.h" 20 #include "llvm/IR/Instructions.h" 21 #include "llvm/IR/Module.h" 22 #include "llvm/Target/TargetLowering.h" 23 24 using namespace llvm; 25 26 template<typename CallInstTy> 27 bool CallLowering::lowerCall( 28 MachineIRBuilder &MIRBuilder, const CallInstTy &CI, unsigned ResReg, 29 ArrayRef<unsigned> ArgRegs, std::function<unsigned()> GetCalleeReg) const { 30 auto &DL = CI.getParent()->getParent()->getParent()->getDataLayout(); 31 32 // First step is to marshall all the function's parameters into the correct 33 // physregs and memory locations. Gather the sequence of argument types that 34 // we'll pass to the assigner function. 35 SmallVector<ArgInfo, 8> OrigArgs; 36 unsigned i = 0; 37 unsigned NumFixedArgs = CI.getFunctionType()->getNumParams(); 38 for (auto &Arg : CI.arg_operands()) { 39 ArgInfo OrigArg{ArgRegs[i], Arg->getType(), ISD::ArgFlagsTy{}, 40 i < NumFixedArgs}; 41 setArgFlags(OrigArg, i + 1, DL, CI); 42 OrigArgs.push_back(OrigArg); 43 ++i; 44 } 45 46 MachineOperand Callee = MachineOperand::CreateImm(0); 47 if (Function *F = CI.getCalledFunction()) 48 Callee = MachineOperand::CreateGA(F, 0); 49 else 50 Callee = MachineOperand::CreateReg(GetCalleeReg(), false); 51 52 ArgInfo OrigRet{ResReg, CI.getType(), ISD::ArgFlagsTy{}}; 53 if (!OrigRet.Ty->isVoidTy()) 54 setArgFlags(OrigRet, AttributeSet::ReturnIndex, DL, CI); 55 56 return lowerCall(MIRBuilder, Callee, OrigRet, OrigArgs); 57 } 58 59 template bool 60 CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const CallInst &CI, 61 unsigned ResReg, ArrayRef<unsigned> ArgRegs, 62 std::function<unsigned()> GetCalleeReg) const; 63 64 template bool 65 CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, const InvokeInst &CI, 66 unsigned ResReg, ArrayRef<unsigned> ArgRegs, 67 std::function<unsigned()> GetCalleeReg) const; 68 69 template <typename FuncInfoTy> 70 void CallLowering::setArgFlags(CallLowering::ArgInfo &Arg, unsigned OpIdx, 71 const DataLayout &DL, 72 const FuncInfoTy &FuncInfo) const { 73 const AttributeSet &Attrs = FuncInfo.getAttributes(); 74 if (Attrs.hasAttribute(OpIdx, Attribute::ZExt)) 75 Arg.Flags.setZExt(); 76 if (Attrs.hasAttribute(OpIdx, Attribute::SExt)) 77 Arg.Flags.setSExt(); 78 if (Attrs.hasAttribute(OpIdx, Attribute::InReg)) 79 Arg.Flags.setInReg(); 80 if (Attrs.hasAttribute(OpIdx, Attribute::StructRet)) 81 Arg.Flags.setSRet(); 82 if (Attrs.hasAttribute(OpIdx, Attribute::SwiftSelf)) 83 Arg.Flags.setSwiftSelf(); 84 if (Attrs.hasAttribute(OpIdx, Attribute::SwiftError)) 85 Arg.Flags.setSwiftError(); 86 if (Attrs.hasAttribute(OpIdx, Attribute::ByVal)) 87 Arg.Flags.setByVal(); 88 if (Attrs.hasAttribute(OpIdx, Attribute::InAlloca)) 89 Arg.Flags.setInAlloca(); 90 91 if (Arg.Flags.isByVal() || Arg.Flags.isInAlloca()) { 92 Type *ElementTy = cast<PointerType>(Arg.Ty)->getElementType(); 93 Arg.Flags.setByValSize(DL.getTypeAllocSize(ElementTy)); 94 // For ByVal, alignment should be passed from FE. BE will guess if 95 // this info is not there but there are cases it cannot get right. 96 unsigned FrameAlign; 97 if (FuncInfo.getParamAlignment(OpIdx)) 98 FrameAlign = FuncInfo.getParamAlignment(OpIdx); 99 else 100 FrameAlign = getTLI()->getByValTypeAlignment(ElementTy, DL); 101 Arg.Flags.setByValAlign(FrameAlign); 102 } 103 if (Attrs.hasAttribute(OpIdx, Attribute::Nest)) 104 Arg.Flags.setNest(); 105 Arg.Flags.setOrigAlign(DL.getABITypeAlignment(Arg.Ty)); 106 } 107 108 template void 109 CallLowering::setArgFlags<Function>(CallLowering::ArgInfo &Arg, unsigned OpIdx, 110 const DataLayout &DL, 111 const Function &FuncInfo) const; 112 113 template void 114 CallLowering::setArgFlags<CallInst>(CallLowering::ArgInfo &Arg, unsigned OpIdx, 115 const DataLayout &DL, 116 const CallInst &FuncInfo) const; 117 118 bool CallLowering::handleAssignments(MachineIRBuilder &MIRBuilder, 119 ArrayRef<ArgInfo> Args, 120 ValueHandler &Handler) const { 121 MachineFunction &MF = MIRBuilder.getMF(); 122 const Function &F = *MF.getFunction(); 123 const DataLayout &DL = F.getParent()->getDataLayout(); 124 125 SmallVector<CCValAssign, 16> ArgLocs; 126 CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext()); 127 128 unsigned NumArgs = Args.size(); 129 for (unsigned i = 0; i != NumArgs; ++i) { 130 MVT CurVT = MVT::getVT(Args[i].Ty); 131 if (Handler.assignArg(i, CurVT, CurVT, CCValAssign::Full, Args[i], CCInfo)) 132 return false; 133 } 134 135 for (unsigned i = 0, e = Args.size(), j = 0; i != e; ++i, ++j) { 136 assert(j < ArgLocs.size() && "Skipped too many arg locs"); 137 138 CCValAssign &VA = ArgLocs[j]; 139 assert(VA.getValNo() == i && "Location doesn't correspond to current arg"); 140 141 if (VA.needsCustom()) { 142 j += Handler.assignCustomValue(Args[i], makeArrayRef(ArgLocs).slice(j)); 143 continue; 144 } 145 146 if (VA.isRegLoc()) 147 Handler.assignValueToReg(Args[i].Reg, VA.getLocReg(), VA); 148 else if (VA.isMemLoc()) { 149 unsigned Size = VA.getValVT() == MVT::iPTR 150 ? DL.getPointerSize() 151 : alignTo(VA.getValVT().getSizeInBits(), 8) / 8; 152 unsigned Offset = VA.getLocMemOffset(); 153 MachinePointerInfo MPO; 154 unsigned StackAddr = Handler.getStackAddress(Size, Offset, MPO); 155 Handler.assignValueToAddress(Args[i].Reg, StackAddr, Size, MPO, VA); 156 } else { 157 // FIXME: Support byvals and other weirdness 158 return false; 159 } 160 } 161 return true; 162 } 163 164 unsigned CallLowering::ValueHandler::extendRegister(unsigned ValReg, 165 CCValAssign &VA) { 166 LLT LocTy{VA.getLocVT()}; 167 switch (VA.getLocInfo()) { 168 default: break; 169 case CCValAssign::Full: 170 case CCValAssign::BCvt: 171 // FIXME: bitconverting between vector types may or may not be a 172 // nop in big-endian situations. 173 return ValReg; 174 case CCValAssign::AExt: 175 assert(!VA.getLocVT().isVector() && "unexpected vector extend"); 176 // Otherwise, it's a nop. 177 return ValReg; 178 case CCValAssign::SExt: { 179 unsigned NewReg = MRI.createGenericVirtualRegister(LocTy); 180 MIRBuilder.buildSExt(NewReg, ValReg); 181 return NewReg; 182 } 183 case CCValAssign::ZExt: { 184 unsigned NewReg = MRI.createGenericVirtualRegister(LocTy); 185 MIRBuilder.buildZExt(NewReg, ValReg); 186 return NewReg; 187 } 188 } 189 llvm_unreachable("unable to extend register"); 190 } 191