1 //===-- CallingConvLower.cpp - Calling Conventions ------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file implements the CCState class, used for lowering and implementing 10 // calling conventions. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "llvm/CodeGen/CallingConvLower.h" 15 #include "llvm/CodeGen/MachineFrameInfo.h" 16 #include "llvm/CodeGen/MachineFunction.h" 17 #include "llvm/CodeGen/MachineRegisterInfo.h" 18 #include "llvm/CodeGen/TargetLowering.h" 19 #include "llvm/CodeGen/TargetRegisterInfo.h" 20 #include "llvm/CodeGen/TargetSubtargetInfo.h" 21 #include "llvm/IR/DataLayout.h" 22 #include "llvm/Support/Debug.h" 23 #include "llvm/Support/ErrorHandling.h" 24 #include "llvm/Support/SaveAndRestore.h" 25 #include "llvm/Support/raw_ostream.h" 26 #include <algorithm> 27 28 using namespace llvm; 29 30 CCState::CCState(CallingConv::ID CC, bool isVarArg, MachineFunction &mf, 31 SmallVectorImpl<CCValAssign> &locs, LLVMContext &C) 32 : CallingConv(CC), IsVarArg(isVarArg), MF(mf), 33 TRI(*MF.getSubtarget().getRegisterInfo()), Locs(locs), Context(C) { 34 // No stack is used. 35 StackOffset = 0; 36 37 clearByValRegsInfo(); 38 UsedRegs.resize((TRI.getNumRegs()+31)/32); 39 } 40 41 /// Allocate space on the stack large enough to pass an argument by value. 42 /// The size and alignment information of the argument is encoded in 43 /// its parameter attribute. 44 void CCState::HandleByVal(unsigned ValNo, MVT ValVT, MVT LocVT, 45 CCValAssign::LocInfo LocInfo, int MinSize, 46 Align MinAlign, ISD::ArgFlagsTy ArgFlags) { 47 Align Alignment = ArgFlags.getNonZeroByValAlign(); 48 unsigned Size = ArgFlags.getByValSize(); 49 if (MinSize > (int)Size) 50 Size = MinSize; 51 if (MinAlign > Alignment) 52 Alignment = MinAlign; 53 ensureMaxAlignment(Alignment); 54 MF.getSubtarget().getTargetLowering()->HandleByVal(this, Size, Alignment); 55 Size = unsigned(alignTo(Size, MinAlign)); 56 unsigned Offset = AllocateStack(Size, Alignment); 57 addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo)); 58 } 59 60 /// Mark a register and all of its aliases as allocated. 61 void CCState::MarkAllocated(MCPhysReg Reg) { 62 for (MCRegAliasIterator AI(Reg, &TRI, true); AI.isValid(); ++AI) 63 UsedRegs[*AI / 32] |= 1 << (*AI & 31); 64 } 65 66 void CCState::MarkUnallocated(MCPhysReg Reg) { 67 for (MCRegAliasIterator AI(Reg, &TRI, true); AI.isValid(); ++AI) 68 UsedRegs[*AI / 32] &= ~(1 << (*AI & 31)); 69 } 70 71 bool CCState::IsShadowAllocatedReg(MCRegister Reg) const { 72 if (!isAllocated(Reg)) 73 return false; 74 75 for (auto const &ValAssign : Locs) 76 if (ValAssign.isRegLoc() && TRI.regsOverlap(ValAssign.getLocReg(), Reg)) 77 return false; 78 return true; 79 } 80 81 /// Analyze an array of argument values, 82 /// incorporating info about the formals into this state. 83 void 84 CCState::AnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Ins, 85 CCAssignFn Fn) { 86 unsigned NumArgs = Ins.size(); 87 88 for (unsigned i = 0; i != NumArgs; ++i) { 89 MVT ArgVT = Ins[i].VT; 90 ISD::ArgFlagsTy ArgFlags = Ins[i].Flags; 91 if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) 92 report_fatal_error("unable to allocate function argument #" + Twine(i)); 93 } 94 } 95 96 /// Analyze the return values of a function, returning true if the return can 97 /// be performed without sret-demotion and false otherwise. 98 bool CCState::CheckReturn(const SmallVectorImpl<ISD::OutputArg> &Outs, 99 CCAssignFn Fn) { 100 // Determine which register each value should be copied into. 101 for (unsigned i = 0, e = Outs.size(); i != e; ++i) { 102 MVT VT = Outs[i].VT; 103 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 104 if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, *this)) 105 return false; 106 } 107 return true; 108 } 109 110 /// Analyze the returned values of a return, 111 /// incorporating info about the result values into this state. 112 void CCState::AnalyzeReturn(const SmallVectorImpl<ISD::OutputArg> &Outs, 113 CCAssignFn Fn) { 114 // Determine which register each value should be copied into. 115 for (unsigned i = 0, e = Outs.size(); i != e; ++i) { 116 MVT VT = Outs[i].VT; 117 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 118 if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, *this)) 119 report_fatal_error("unable to allocate function return #" + Twine(i)); 120 } 121 } 122 123 /// Analyze the outgoing arguments to a call, 124 /// incorporating info about the passed values into this state. 125 void CCState::AnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs, 126 CCAssignFn Fn) { 127 unsigned NumOps = Outs.size(); 128 for (unsigned i = 0; i != NumOps; ++i) { 129 MVT ArgVT = Outs[i].VT; 130 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 131 if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) { 132 #ifndef NDEBUG 133 dbgs() << "Call operand #" << i << " has unhandled type " 134 << EVT(ArgVT).getEVTString() << '\n'; 135 #endif 136 llvm_unreachable(nullptr); 137 } 138 } 139 } 140 141 /// Same as above except it takes vectors of types and argument flags. 142 void CCState::AnalyzeCallOperands(SmallVectorImpl<MVT> &ArgVTs, 143 SmallVectorImpl<ISD::ArgFlagsTy> &Flags, 144 CCAssignFn Fn) { 145 unsigned NumOps = ArgVTs.size(); 146 for (unsigned i = 0; i != NumOps; ++i) { 147 MVT ArgVT = ArgVTs[i]; 148 ISD::ArgFlagsTy ArgFlags = Flags[i]; 149 if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) { 150 #ifndef NDEBUG 151 dbgs() << "Call operand #" << i << " has unhandled type " 152 << EVT(ArgVT).getEVTString() << '\n'; 153 #endif 154 llvm_unreachable(nullptr); 155 } 156 } 157 } 158 159 /// Analyze the return values of a call, incorporating info about the passed 160 /// values into this state. 161 void CCState::AnalyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins, 162 CCAssignFn Fn) { 163 for (unsigned i = 0, e = Ins.size(); i != e; ++i) { 164 MVT VT = Ins[i].VT; 165 ISD::ArgFlagsTy Flags = Ins[i].Flags; 166 if (Fn(i, VT, VT, CCValAssign::Full, Flags, *this)) { 167 #ifndef NDEBUG 168 dbgs() << "Call result #" << i << " has unhandled type " 169 << EVT(VT).getEVTString() << '\n'; 170 #endif 171 llvm_unreachable(nullptr); 172 } 173 } 174 } 175 176 /// Same as above except it's specialized for calls that produce a single value. 177 void CCState::AnalyzeCallResult(MVT VT, CCAssignFn Fn) { 178 if (Fn(0, VT, VT, CCValAssign::Full, ISD::ArgFlagsTy(), *this)) { 179 #ifndef NDEBUG 180 dbgs() << "Call result has unhandled type " 181 << EVT(VT).getEVTString() << '\n'; 182 #endif 183 llvm_unreachable(nullptr); 184 } 185 } 186 187 void CCState::ensureMaxAlignment(Align Alignment) { 188 if (!AnalyzingMustTailForwardedRegs) 189 MF.getFrameInfo().ensureMaxAlignment(Alignment); 190 } 191 192 static bool isValueTypeInRegForCC(CallingConv::ID CC, MVT VT) { 193 if (VT.isVector()) 194 return true; // Assume -msse-regparm might be in effect. 195 if (!VT.isInteger()) 196 return false; 197 return (CC == CallingConv::X86_VectorCall || CC == CallingConv::X86_FastCall); 198 } 199 200 void CCState::getRemainingRegParmsForType(SmallVectorImpl<MCPhysReg> &Regs, 201 MVT VT, CCAssignFn Fn) { 202 unsigned SavedStackOffset = StackOffset; 203 Align SavedMaxStackArgAlign = MaxStackArgAlign; 204 unsigned NumLocs = Locs.size(); 205 206 // Set the 'inreg' flag if it is used for this calling convention. 207 ISD::ArgFlagsTy Flags; 208 if (isValueTypeInRegForCC(CallingConv, VT)) 209 Flags.setInReg(); 210 211 // Allocate something of this value type repeatedly until we get assigned a 212 // location in memory. 213 bool HaveRegParm; 214 do { 215 if (Fn(0, VT, VT, CCValAssign::Full, Flags, *this)) { 216 #ifndef NDEBUG 217 dbgs() << "Call has unhandled type " << EVT(VT).getEVTString() 218 << " while computing remaining regparms\n"; 219 #endif 220 llvm_unreachable(nullptr); 221 } 222 HaveRegParm = Locs.back().isRegLoc(); 223 } while (HaveRegParm); 224 225 // Copy all the registers from the value locations we added. 226 assert(NumLocs < Locs.size() && "CC assignment failed to add location"); 227 for (unsigned I = NumLocs, E = Locs.size(); I != E; ++I) 228 if (Locs[I].isRegLoc()) 229 Regs.push_back(MCPhysReg(Locs[I].getLocReg())); 230 231 // Clear the assigned values and stack memory. We leave the registers marked 232 // as allocated so that future queries don't return the same registers, i.e. 233 // when i64 and f64 are both passed in GPRs. 234 StackOffset = SavedStackOffset; 235 MaxStackArgAlign = SavedMaxStackArgAlign; 236 Locs.resize(NumLocs); 237 } 238 239 void CCState::analyzeMustTailForwardedRegisters( 240 SmallVectorImpl<ForwardedRegister> &Forwards, ArrayRef<MVT> RegParmTypes, 241 CCAssignFn Fn) { 242 // Oftentimes calling conventions will not user register parameters for 243 // variadic functions, so we need to assume we're not variadic so that we get 244 // all the registers that might be used in a non-variadic call. 245 SaveAndRestore<bool> SavedVarArg(IsVarArg, false); 246 SaveAndRestore<bool> SavedMustTail(AnalyzingMustTailForwardedRegs, true); 247 248 for (MVT RegVT : RegParmTypes) { 249 SmallVector<MCPhysReg, 8> RemainingRegs; 250 getRemainingRegParmsForType(RemainingRegs, RegVT, Fn); 251 const TargetLowering *TL = MF.getSubtarget().getTargetLowering(); 252 const TargetRegisterClass *RC = TL->getRegClassFor(RegVT); 253 for (MCPhysReg PReg : RemainingRegs) { 254 Register VReg = MF.addLiveIn(PReg, RC); 255 Forwards.push_back(ForwardedRegister(VReg, PReg, RegVT)); 256 } 257 } 258 } 259 260 bool CCState::resultsCompatible(CallingConv::ID CalleeCC, 261 CallingConv::ID CallerCC, MachineFunction &MF, 262 LLVMContext &C, 263 const SmallVectorImpl<ISD::InputArg> &Ins, 264 CCAssignFn CalleeFn, CCAssignFn CallerFn) { 265 if (CalleeCC == CallerCC) 266 return true; 267 SmallVector<CCValAssign, 4> RVLocs1; 268 CCState CCInfo1(CalleeCC, false, MF, RVLocs1, C); 269 CCInfo1.AnalyzeCallResult(Ins, CalleeFn); 270 271 SmallVector<CCValAssign, 4> RVLocs2; 272 CCState CCInfo2(CallerCC, false, MF, RVLocs2, C); 273 CCInfo2.AnalyzeCallResult(Ins, CallerFn); 274 275 if (RVLocs1.size() != RVLocs2.size()) 276 return false; 277 for (unsigned I = 0, E = RVLocs1.size(); I != E; ++I) { 278 const CCValAssign &Loc1 = RVLocs1[I]; 279 const CCValAssign &Loc2 = RVLocs2[I]; 280 281 if ( // Must both be in registers, or both in memory 282 Loc1.isRegLoc() != Loc2.isRegLoc() || 283 // Must fill the same part of their locations 284 Loc1.getLocInfo() != Loc2.getLocInfo() || 285 // Memory offset/register number must be the same 286 Loc1.getExtraInfo() != Loc2.getExtraInfo()) 287 return false; 288 } 289 return true; 290 } 291