1 //===--- ByteCodeEmitter.cpp - Instruction emitter for the VM ---*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "ByteCodeEmitter.h" 10 #include "Context.h" 11 #include "Floating.h" 12 #include "IntegralAP.h" 13 #include "Opcode.h" 14 #include "Program.h" 15 #include "clang/AST/ASTLambda.h" 16 #include "clang/AST/Attr.h" 17 #include "clang/AST/DeclCXX.h" 18 #include "clang/Basic/Builtins.h" 19 #include <type_traits> 20 21 using namespace clang; 22 using namespace clang::interp; 23 24 /// Unevaluated builtins don't get their arguments put on the stack 25 /// automatically. They instead operate on the AST of their Call 26 /// Expression. 27 /// Similar information is available via ASTContext::BuiltinInfo, 28 /// but that is not correct for our use cases. 29 static bool isUnevaluatedBuiltin(unsigned BuiltinID) { 30 return BuiltinID == Builtin::BI__builtin_classify_type || 31 BuiltinID == Builtin::BI__builtin_os_log_format_buffer_size || 32 BuiltinID == Builtin::BI__builtin_constant_p; 33 } 34 35 Function *ByteCodeEmitter::compileFunc(const FunctionDecl *FuncDecl) { 36 37 // Manually created functions that haven't been assigned proper 38 // parameters yet. 39 if (!FuncDecl->param_empty() && !FuncDecl->param_begin()) 40 return nullptr; 41 42 bool IsLambdaStaticInvoker = false; 43 if (const auto *MD = dyn_cast<CXXMethodDecl>(FuncDecl); 44 MD && MD->isLambdaStaticInvoker()) { 45 // For a lambda static invoker, we might have to pick a specialized 46 // version if the lambda is generic. In that case, the picked function 47 // will *NOT* be a static invoker anymore. However, it will still 48 // be a non-static member function, this (usually) requiring an 49 // instance pointer. We suppress that later in this function. 50 IsLambdaStaticInvoker = true; 51 52 const CXXRecordDecl *ClosureClass = MD->getParent(); 53 assert(ClosureClass->captures_begin() == ClosureClass->captures_end()); 54 if (ClosureClass->isGenericLambda()) { 55 const CXXMethodDecl *LambdaCallOp = ClosureClass->getLambdaCallOperator(); 56 assert(MD->isFunctionTemplateSpecialization() && 57 "A generic lambda's static-invoker function must be a " 58 "template specialization"); 59 const TemplateArgumentList *TAL = MD->getTemplateSpecializationArgs(); 60 FunctionTemplateDecl *CallOpTemplate = 61 LambdaCallOp->getDescribedFunctionTemplate(); 62 void *InsertPos = nullptr; 63 const FunctionDecl *CorrespondingCallOpSpecialization = 64 CallOpTemplate->findSpecialization(TAL->asArray(), InsertPos); 65 assert(CorrespondingCallOpSpecialization); 66 FuncDecl = cast<CXXMethodDecl>(CorrespondingCallOpSpecialization); 67 } 68 } 69 70 // Set up argument indices. 71 unsigned ParamOffset = 0; 72 SmallVector<PrimType, 8> ParamTypes; 73 SmallVector<unsigned, 8> ParamOffsets; 74 llvm::DenseMap<unsigned, Function::ParamDescriptor> ParamDescriptors; 75 76 // If the return is not a primitive, a pointer to the storage where the 77 // value is initialized in is passed as the first argument. See 'RVO' 78 // elsewhere in the code. 79 QualType Ty = FuncDecl->getReturnType(); 80 bool HasRVO = false; 81 if (!Ty->isVoidType() && !Ctx.classify(Ty)) { 82 HasRVO = true; 83 ParamTypes.push_back(PT_Ptr); 84 ParamOffsets.push_back(ParamOffset); 85 ParamOffset += align(primSize(PT_Ptr)); 86 } 87 88 // If the function decl is a member decl, the next parameter is 89 // the 'this' pointer. This parameter is pop()ed from the 90 // InterpStack when calling the function. 91 bool HasThisPointer = false; 92 if (const auto *MD = dyn_cast<CXXMethodDecl>(FuncDecl)) { 93 if (!IsLambdaStaticInvoker) { 94 HasThisPointer = MD->isInstance(); 95 if (MD->isImplicitObjectMemberFunction()) { 96 ParamTypes.push_back(PT_Ptr); 97 ParamOffsets.push_back(ParamOffset); 98 ParamOffset += align(primSize(PT_Ptr)); 99 } 100 } 101 102 // Set up lambda capture to closure record field mapping. 103 if (isLambdaCallOperator(MD)) { 104 // The parent record needs to be complete, we need to know about all 105 // the lambda captures. 106 if (!MD->getParent()->isCompleteDefinition()) 107 return nullptr; 108 109 const Record *R = P.getOrCreateRecord(MD->getParent()); 110 llvm::DenseMap<const ValueDecl *, FieldDecl *> LC; 111 FieldDecl *LTC; 112 113 MD->getParent()->getCaptureFields(LC, LTC); 114 115 for (auto Cap : LC) { 116 // Static lambdas cannot have any captures. If this one does, 117 // it has already been diagnosed and we can only ignore it. 118 if (MD->isStatic()) 119 return nullptr; 120 121 unsigned Offset = R->getField(Cap.second)->Offset; 122 this->LambdaCaptures[Cap.first] = { 123 Offset, Cap.second->getType()->isReferenceType()}; 124 } 125 if (LTC) { 126 QualType CaptureType = R->getField(LTC)->Decl->getType(); 127 this->LambdaThisCapture = {R->getField(LTC)->Offset, 128 CaptureType->isReferenceType() || 129 CaptureType->isPointerType()}; 130 } 131 } 132 } 133 134 // Assign descriptors to all parameters. 135 // Composite objects are lowered to pointers. 136 for (const ParmVarDecl *PD : FuncDecl->parameters()) { 137 std::optional<PrimType> T = Ctx.classify(PD->getType()); 138 PrimType PT = T.value_or(PT_Ptr); 139 Descriptor *Desc = P.createDescriptor(PD, PT); 140 ParamDescriptors.insert({ParamOffset, {PT, Desc}}); 141 Params.insert({PD, {ParamOffset, T != std::nullopt}}); 142 ParamOffsets.push_back(ParamOffset); 143 ParamOffset += align(primSize(PT)); 144 ParamTypes.push_back(PT); 145 } 146 147 // Create a handle over the emitted code. 148 Function *Func = P.getFunction(FuncDecl); 149 if (!Func) { 150 bool IsUnevaluatedBuiltin = false; 151 if (unsigned BI = FuncDecl->getBuiltinID()) 152 IsUnevaluatedBuiltin = isUnevaluatedBuiltin(BI); 153 154 Func = 155 P.createFunction(FuncDecl, ParamOffset, std::move(ParamTypes), 156 std::move(ParamDescriptors), std::move(ParamOffsets), 157 HasThisPointer, HasRVO, IsUnevaluatedBuiltin); 158 } 159 160 assert(Func); 161 // For not-yet-defined functions, we only create a Function instance and 162 // compile their body later. 163 if (!FuncDecl->isDefined() || 164 (FuncDecl->willHaveBody() && !FuncDecl->hasBody())) { 165 Func->setDefined(false); 166 return Func; 167 } 168 169 Func->setDefined(true); 170 171 // Lambda static invokers are a special case that we emit custom code for. 172 bool IsEligibleForCompilation = false; 173 if (const auto *MD = dyn_cast<CXXMethodDecl>(FuncDecl)) 174 IsEligibleForCompilation = MD->isLambdaStaticInvoker(); 175 if (!IsEligibleForCompilation) 176 IsEligibleForCompilation = 177 FuncDecl->isConstexpr() || FuncDecl->hasAttr<MSConstexprAttr>(); 178 179 // Compile the function body. 180 if (!IsEligibleForCompilation || !visitFunc(FuncDecl)) { 181 Func->setIsFullyCompiled(true); 182 return Func; 183 } 184 185 // Create scopes from descriptors. 186 llvm::SmallVector<Scope, 2> Scopes; 187 for (auto &DS : Descriptors) { 188 Scopes.emplace_back(std::move(DS)); 189 } 190 191 // Set the function's code. 192 Func->setCode(NextLocalOffset, std::move(Code), std::move(SrcMap), 193 std::move(Scopes), FuncDecl->hasBody()); 194 Func->setIsFullyCompiled(true); 195 return Func; 196 } 197 198 Scope::Local ByteCodeEmitter::createLocal(Descriptor *D) { 199 NextLocalOffset += sizeof(Block); 200 unsigned Location = NextLocalOffset; 201 NextLocalOffset += align(D->getAllocSize()); 202 return {Location, D}; 203 } 204 205 void ByteCodeEmitter::emitLabel(LabelTy Label) { 206 const size_t Target = Code.size(); 207 LabelOffsets.insert({Label, Target}); 208 209 if (auto It = LabelRelocs.find(Label); It != LabelRelocs.end()) { 210 for (unsigned Reloc : It->second) { 211 using namespace llvm::support; 212 213 // Rewrite the operand of all jumps to this label. 214 void *Location = Code.data() + Reloc - align(sizeof(int32_t)); 215 assert(aligned(Location)); 216 const int32_t Offset = Target - static_cast<int64_t>(Reloc); 217 endian::write<int32_t, llvm::endianness::native>(Location, Offset); 218 } 219 LabelRelocs.erase(It); 220 } 221 } 222 223 int32_t ByteCodeEmitter::getOffset(LabelTy Label) { 224 // Compute the PC offset which the jump is relative to. 225 const int64_t Position = 226 Code.size() + align(sizeof(Opcode)) + align(sizeof(int32_t)); 227 assert(aligned(Position)); 228 229 // If target is known, compute jump offset. 230 if (auto It = LabelOffsets.find(Label); It != LabelOffsets.end()) 231 return It->second - Position; 232 233 // Otherwise, record relocation and return dummy offset. 234 LabelRelocs[Label].push_back(Position); 235 return 0ull; 236 } 237 238 /// Helper to write bytecode and bail out if 32-bit offsets become invalid. 239 /// Pointers will be automatically marshalled as 32-bit IDs. 240 template <typename T> 241 static void emit(Program &P, std::vector<std::byte> &Code, const T &Val, 242 bool &Success) { 243 size_t Size; 244 245 if constexpr (std::is_pointer_v<T>) 246 Size = sizeof(uint32_t); 247 else 248 Size = sizeof(T); 249 250 if (Code.size() + Size > std::numeric_limits<unsigned>::max()) { 251 Success = false; 252 return; 253 } 254 255 // Access must be aligned! 256 size_t ValPos = align(Code.size()); 257 Size = align(Size); 258 assert(aligned(ValPos + Size)); 259 Code.resize(ValPos + Size); 260 261 if constexpr (!std::is_pointer_v<T>) { 262 new (Code.data() + ValPos) T(Val); 263 } else { 264 uint32_t ID = P.getOrCreateNativePointer(Val); 265 new (Code.data() + ValPos) uint32_t(ID); 266 } 267 } 268 269 /// Emits a serializable value. These usually (potentially) contain 270 /// heap-allocated memory and aren't trivially copyable. 271 template <typename T> 272 static void emitSerialized(std::vector<std::byte> &Code, const T &Val, 273 bool &Success) { 274 size_t Size = Val.bytesToSerialize(); 275 276 if (Code.size() + Size > std::numeric_limits<unsigned>::max()) { 277 Success = false; 278 return; 279 } 280 281 // Access must be aligned! 282 size_t ValPos = align(Code.size()); 283 Size = align(Size); 284 assert(aligned(ValPos + Size)); 285 Code.resize(ValPos + Size); 286 287 Val.serialize(Code.data() + ValPos); 288 } 289 290 template <> 291 void emit(Program &P, std::vector<std::byte> &Code, const Floating &Val, 292 bool &Success) { 293 emitSerialized(Code, Val, Success); 294 } 295 296 template <> 297 void emit(Program &P, std::vector<std::byte> &Code, 298 const IntegralAP<false> &Val, bool &Success) { 299 emitSerialized(Code, Val, Success); 300 } 301 302 template <> 303 void emit(Program &P, std::vector<std::byte> &Code, const IntegralAP<true> &Val, 304 bool &Success) { 305 emitSerialized(Code, Val, Success); 306 } 307 308 template <typename... Tys> 309 bool ByteCodeEmitter::emitOp(Opcode Op, const Tys &...Args, 310 const SourceInfo &SI) { 311 bool Success = true; 312 313 // The opcode is followed by arguments. The source info is 314 // attached to the address after the opcode. 315 emit(P, Code, Op, Success); 316 if (SI) 317 SrcMap.emplace_back(Code.size(), SI); 318 319 (..., emit(P, Code, Args, Success)); 320 return Success; 321 } 322 323 bool ByteCodeEmitter::jumpTrue(const LabelTy &Label) { 324 return emitJt(getOffset(Label), SourceInfo{}); 325 } 326 327 bool ByteCodeEmitter::jumpFalse(const LabelTy &Label) { 328 return emitJf(getOffset(Label), SourceInfo{}); 329 } 330 331 bool ByteCodeEmitter::jump(const LabelTy &Label) { 332 return emitJmp(getOffset(Label), SourceInfo{}); 333 } 334 335 bool ByteCodeEmitter::fallthrough(const LabelTy &Label) { 336 emitLabel(Label); 337 return true; 338 } 339 340 //===----------------------------------------------------------------------===// 341 // Opcode emitters 342 //===----------------------------------------------------------------------===// 343 344 #define GET_LINK_IMPL 345 #include "Opcodes.inc" 346 #undef GET_LINK_IMPL 347