1 //===-- lib/CodeGen/GlobalISel/CallLowering.cpp - Call lowering -----------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 /// 10 /// \file 11 /// This file implements some simple delegations needed for call lowering. 12 /// 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/CodeGen/GlobalISel/CallLowering.h" 16 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h" 17 #include "llvm/CodeGen/MachineOperand.h" 18 #include "llvm/CodeGen/MachineRegisterInfo.h" 19 #include "llvm/CodeGen/TargetLowering.h" 20 #include "llvm/IR/DataLayout.h" 21 #include "llvm/IR/Instructions.h" 22 #include "llvm/IR/Module.h" 23 24 using namespace llvm; 25 26 bool CallLowering::lowerCall( 27 MachineIRBuilder &MIRBuilder, ImmutableCallSite CS, unsigned ResReg, 28 ArrayRef<unsigned> ArgRegs, std::function<unsigned()> GetCalleeReg) const { 29 auto &DL = CS.getParent()->getParent()->getParent()->getDataLayout(); 30 31 // First step is to marshall all the function's parameters into the correct 32 // physregs and memory locations. Gather the sequence of argument types that 33 // we'll pass to the assigner function. 34 SmallVector<ArgInfo, 8> OrigArgs; 35 unsigned i = 0; 36 unsigned NumFixedArgs = CS.getFunctionType()->getNumParams(); 37 for (auto &Arg : CS.args()) { 38 ArgInfo OrigArg{ArgRegs[i], Arg->getType(), ISD::ArgFlagsTy{}, 39 i < NumFixedArgs}; 40 setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, CS); 41 // We don't currently support swifterror or swiftself args. 42 if (OrigArg.Flags.isSwiftError() || OrigArg.Flags.isSwiftSelf()) 43 return false; 44 OrigArgs.push_back(OrigArg); 45 ++i; 46 } 47 48 MachineOperand Callee = MachineOperand::CreateImm(0); 49 if (const Function *F = CS.getCalledFunction()) 50 Callee = MachineOperand::CreateGA(F, 0); 51 else 52 Callee = MachineOperand::CreateReg(GetCalleeReg(), false); 53 54 ArgInfo OrigRet{ResReg, CS.getType(), ISD::ArgFlagsTy{}}; 55 if (!OrigRet.Ty->isVoidTy()) 56 setArgFlags(OrigRet, AttributeList::ReturnIndex, DL, CS); 57 58 return lowerCall(MIRBuilder, CS.getCallingConv(), Callee, OrigRet, OrigArgs); 59 } 60 61 template <typename FuncInfoTy> 62 void CallLowering::setArgFlags(CallLowering::ArgInfo &Arg, unsigned OpIdx, 63 const DataLayout &DL, 64 const FuncInfoTy &FuncInfo) const { 65 const AttributeList &Attrs = FuncInfo.getAttributes(); 66 if (Attrs.hasAttribute(OpIdx, Attribute::ZExt)) 67 Arg.Flags.setZExt(); 68 if (Attrs.hasAttribute(OpIdx, Attribute::SExt)) 69 Arg.Flags.setSExt(); 70 if (Attrs.hasAttribute(OpIdx, Attribute::InReg)) 71 Arg.Flags.setInReg(); 72 if (Attrs.hasAttribute(OpIdx, Attribute::StructRet)) 73 Arg.Flags.setSRet(); 74 if (Attrs.hasAttribute(OpIdx, Attribute::SwiftSelf)) 75 Arg.Flags.setSwiftSelf(); 76 if (Attrs.hasAttribute(OpIdx, Attribute::SwiftError)) 77 Arg.Flags.setSwiftError(); 78 if (Attrs.hasAttribute(OpIdx, Attribute::ByVal)) 79 Arg.Flags.setByVal(); 80 if (Attrs.hasAttribute(OpIdx, Attribute::InAlloca)) 81 Arg.Flags.setInAlloca(); 82 83 if (Arg.Flags.isByVal() || Arg.Flags.isInAlloca()) { 84 Type *ElementTy = cast<PointerType>(Arg.Ty)->getElementType(); 85 Arg.Flags.setByValSize(DL.getTypeAllocSize(ElementTy)); 86 // For ByVal, alignment should be passed from FE. BE will guess if 87 // this info is not there but there are cases it cannot get right. 88 unsigned FrameAlign; 89 if (FuncInfo.getParamAlignment(OpIdx - 2)) 90 FrameAlign = FuncInfo.getParamAlignment(OpIdx - 2); 91 else 92 FrameAlign = getTLI()->getByValTypeAlignment(ElementTy, DL); 93 Arg.Flags.setByValAlign(FrameAlign); 94 } 95 if (Attrs.hasAttribute(OpIdx, Attribute::Nest)) 96 Arg.Flags.setNest(); 97 Arg.Flags.setOrigAlign(DL.getABITypeAlignment(Arg.Ty)); 98 } 99 100 template void 101 CallLowering::setArgFlags<Function>(CallLowering::ArgInfo &Arg, unsigned OpIdx, 102 const DataLayout &DL, 103 const Function &FuncInfo) const; 104 105 template void 106 CallLowering::setArgFlags<CallInst>(CallLowering::ArgInfo &Arg, unsigned OpIdx, 107 const DataLayout &DL, 108 const CallInst &FuncInfo) const; 109 110 bool CallLowering::handleAssignments(MachineIRBuilder &MIRBuilder, 111 ArrayRef<ArgInfo> Args, 112 ValueHandler &Handler) const { 113 MachineFunction &MF = MIRBuilder.getMF(); 114 const Function &F = MF.getFunction(); 115 const DataLayout &DL = F.getParent()->getDataLayout(); 116 117 SmallVector<CCValAssign, 16> ArgLocs; 118 CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext()); 119 120 unsigned NumArgs = Args.size(); 121 for (unsigned i = 0; i != NumArgs; ++i) { 122 MVT CurVT = MVT::getVT(Args[i].Ty); 123 if (Handler.assignArg(i, CurVT, CurVT, CCValAssign::Full, Args[i], CCInfo)) 124 return false; 125 } 126 127 for (unsigned i = 0, e = Args.size(), j = 0; i != e; ++i, ++j) { 128 assert(j < ArgLocs.size() && "Skipped too many arg locs"); 129 130 CCValAssign &VA = ArgLocs[j]; 131 assert(VA.getValNo() == i && "Location doesn't correspond to current arg"); 132 133 if (VA.needsCustom()) { 134 j += Handler.assignCustomValue(Args[i], makeArrayRef(ArgLocs).slice(j)); 135 continue; 136 } 137 138 if (VA.isRegLoc()) 139 Handler.assignValueToReg(Args[i].Reg, VA.getLocReg(), VA); 140 else if (VA.isMemLoc()) { 141 unsigned Size = VA.getValVT() == MVT::iPTR 142 ? DL.getPointerSize() 143 : alignTo(VA.getValVT().getSizeInBits(), 8) / 8; 144 unsigned Offset = VA.getLocMemOffset(); 145 MachinePointerInfo MPO; 146 unsigned StackAddr = Handler.getStackAddress(Size, Offset, MPO); 147 Handler.assignValueToAddress(Args[i].Reg, StackAddr, Size, MPO, VA); 148 } else { 149 // FIXME: Support byvals and other weirdness 150 return false; 151 } 152 } 153 return true; 154 } 155 156 unsigned CallLowering::ValueHandler::extendRegister(unsigned ValReg, 157 CCValAssign &VA) { 158 LLT LocTy{VA.getLocVT()}; 159 switch (VA.getLocInfo()) { 160 default: break; 161 case CCValAssign::Full: 162 case CCValAssign::BCvt: 163 // FIXME: bitconverting between vector types may or may not be a 164 // nop in big-endian situations. 165 return ValReg; 166 case CCValAssign::AExt: { 167 auto MIB = MIRBuilder.buildAnyExt(LocTy, ValReg); 168 return MIB->getOperand(0).getReg(); 169 } 170 case CCValAssign::SExt: { 171 unsigned NewReg = MRI.createGenericVirtualRegister(LocTy); 172 MIRBuilder.buildSExt(NewReg, ValReg); 173 return NewReg; 174 } 175 case CCValAssign::ZExt: { 176 unsigned NewReg = MRI.createGenericVirtualRegister(LocTy); 177 MIRBuilder.buildZExt(NewReg, ValReg); 178 return NewReg; 179 } 180 } 181 llvm_unreachable("unable to extend register"); 182 } 183