xref: /llvm-project/llvm/lib/Target/SPIRV/SPIRVPrepareFunctions.cpp (revision fe7cb156064ff59dba7c0496db3b4da39fb1a663)
1 //===-- SPIRVPrepareFunctions.cpp - modify function signatures --*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass modifies function signatures containing aggregate arguments
10 // and/or return value before IRTranslator. Information about the original
11 // signatures is stored in metadata. It is used during call lowering to
12 // restore correct SPIR-V types of function arguments and return values.
13 // This pass also substitutes some llvm intrinsic calls with calls to newly
14 // generated functions (as the Khronos LLVM/SPIR-V Translator does).
15 //
16 // NOTE: this pass is a module-level one due to the necessity to modify
17 // GVs/functions.
18 //
19 //===----------------------------------------------------------------------===//
20 
21 #include "SPIRV.h"
22 #include "SPIRVSubtarget.h"
23 #include "SPIRVTargetMachine.h"
24 #include "SPIRVUtils.h"
25 #include "llvm/ADT/StringExtras.h"
26 #include "llvm/Analysis/ValueTracking.h"
27 #include "llvm/CodeGen/IntrinsicLowering.h"
28 #include "llvm/IR/IRBuilder.h"
29 #include "llvm/IR/IntrinsicInst.h"
30 #include "llvm/IR/Intrinsics.h"
31 #include "llvm/IR/IntrinsicsSPIRV.h"
32 #include "llvm/Transforms/Utils/Cloning.h"
33 #include "llvm/Transforms/Utils/LowerMemIntrinsics.h"
34 #include <regex>
35 
36 using namespace llvm;
37 
38 namespace llvm {
39 void initializeSPIRVPrepareFunctionsPass(PassRegistry &);
40 }
41 
42 namespace {
43 
44 class SPIRVPrepareFunctions : public ModulePass {
45   const SPIRVTargetMachine &TM;
46   bool substituteIntrinsicCalls(Function *F);
47   Function *removeAggregateTypesFromSignature(Function *F);
48 
49 public:
50   static char ID;
51   SPIRVPrepareFunctions(const SPIRVTargetMachine &TM) : ModulePass(ID), TM(TM) {
52     initializeSPIRVPrepareFunctionsPass(*PassRegistry::getPassRegistry());
53   }
54 
55   bool runOnModule(Module &M) override;
56 
57   StringRef getPassName() const override { return "SPIRV prepare functions"; }
58 
59   void getAnalysisUsage(AnalysisUsage &AU) const override {
60     ModulePass::getAnalysisUsage(AU);
61   }
62 };
63 
64 } // namespace
65 
66 char SPIRVPrepareFunctions::ID = 0;
67 
68 INITIALIZE_PASS(SPIRVPrepareFunctions, "prepare-functions",
69                 "SPIRV prepare functions", false, false)
70 
71 std::string lowerLLVMIntrinsicName(IntrinsicInst *II) {
72   Function *IntrinsicFunc = II->getCalledFunction();
73   assert(IntrinsicFunc && "Missing function");
74   std::string FuncName = IntrinsicFunc->getName().str();
75   std::replace(FuncName.begin(), FuncName.end(), '.', '_');
76   FuncName = "spirv." + FuncName;
77   return FuncName;
78 }
79 
80 static Function *getOrCreateFunction(Module *M, Type *RetTy,
81                                      ArrayRef<Type *> ArgTypes,
82                                      StringRef Name) {
83   FunctionType *FT = FunctionType::get(RetTy, ArgTypes, false);
84   Function *F = M->getFunction(Name);
85   if (F && F->getFunctionType() == FT)
86     return F;
87   Function *NewF = Function::Create(FT, GlobalValue::ExternalLinkage, Name, M);
88   if (F)
89     NewF->setDSOLocal(F->isDSOLocal());
90   NewF->setCallingConv(CallingConv::SPIR_FUNC);
91   return NewF;
92 }
93 
94 static bool lowerIntrinsicToFunction(IntrinsicInst *Intrinsic) {
95   // For @llvm.memset.* intrinsic cases with constant value and length arguments
96   // are emulated via "storing" a constant array to the destination. For other
97   // cases we wrap the intrinsic in @spirv.llvm_memset_* function and expand the
98   // intrinsic to a loop via expandMemSetAsLoop().
99   if (auto *MSI = dyn_cast<MemSetInst>(Intrinsic))
100     if (isa<Constant>(MSI->getValue()) && isa<ConstantInt>(MSI->getLength()))
101       return false; // It is handled later using OpCopyMemorySized.
102 
103   Module *M = Intrinsic->getModule();
104   std::string FuncName = lowerLLVMIntrinsicName(Intrinsic);
105   if (Intrinsic->isVolatile())
106     FuncName += ".volatile";
107   // Redirect @llvm.intrinsic.* call to @spirv.llvm_intrinsic_*
108   Function *F = M->getFunction(FuncName);
109   if (F) {
110     Intrinsic->setCalledFunction(F);
111     return true;
112   }
113   // TODO copy arguments attributes: nocapture writeonly.
114   FunctionCallee FC =
115       M->getOrInsertFunction(FuncName, Intrinsic->getFunctionType());
116   auto IntrinsicID = Intrinsic->getIntrinsicID();
117   Intrinsic->setCalledFunction(FC);
118 
119   F = dyn_cast<Function>(FC.getCallee());
120   assert(F && "Callee must be a function");
121 
122   switch (IntrinsicID) {
123   case Intrinsic::memset: {
124     auto *MSI = static_cast<MemSetInst *>(Intrinsic);
125     Argument *Dest = F->getArg(0);
126     Argument *Val = F->getArg(1);
127     Argument *Len = F->getArg(2);
128     Argument *IsVolatile = F->getArg(3);
129     Dest->setName("dest");
130     Val->setName("val");
131     Len->setName("len");
132     IsVolatile->setName("isvolatile");
133     BasicBlock *EntryBB = BasicBlock::Create(M->getContext(), "entry", F);
134     IRBuilder<> IRB(EntryBB);
135     auto *MemSet = IRB.CreateMemSet(Dest, Val, Len, MSI->getDestAlign(),
136                                     MSI->isVolatile());
137     IRB.CreateRetVoid();
138     expandMemSetAsLoop(cast<MemSetInst>(MemSet));
139     MemSet->eraseFromParent();
140     break;
141   }
142   case Intrinsic::bswap: {
143     BasicBlock *EntryBB = BasicBlock::Create(M->getContext(), "entry", F);
144     IRBuilder<> IRB(EntryBB);
145     auto *BSwap = IRB.CreateIntrinsic(Intrinsic::bswap, Intrinsic->getType(),
146                                       F->getArg(0));
147     IRB.CreateRet(BSwap);
148     IntrinsicLowering IL(M->getDataLayout());
149     IL.LowerIntrinsicCall(BSwap);
150     break;
151   }
152   default:
153     break;
154   }
155   return true;
156 }
157 
158 static std::string getAnnotation(Value *AnnoVal, Value *OptAnnoVal) {
159   if (auto *Ref = dyn_cast_or_null<GetElementPtrInst>(AnnoVal))
160     AnnoVal = Ref->getOperand(0);
161   if (auto *Ref = dyn_cast_or_null<BitCastInst>(OptAnnoVal))
162     OptAnnoVal = Ref->getOperand(0);
163 
164   std::string Anno;
165   if (auto *C = dyn_cast_or_null<Constant>(AnnoVal)) {
166     StringRef Str;
167     if (getConstantStringInfo(C, Str))
168       Anno = Str;
169   }
170   // handle optional annotation parameter in a way that Khronos Translator do
171   // (collect integers wrapped in a struct)
172   if (auto *C = dyn_cast_or_null<Constant>(OptAnnoVal);
173       C && C->getNumOperands()) {
174     Value *MaybeStruct = C->getOperand(0);
175     if (auto *Struct = dyn_cast<ConstantStruct>(MaybeStruct)) {
176       for (unsigned I = 0, E = Struct->getNumOperands(); I != E; ++I) {
177         if (auto *CInt = dyn_cast<ConstantInt>(Struct->getOperand(I)))
178           Anno += (I == 0 ? ": " : ", ") +
179                   std::to_string(CInt->getType()->getIntegerBitWidth() == 1
180                                      ? CInt->getZExtValue()
181                                      : CInt->getSExtValue());
182       }
183     } else if (auto *Struct = dyn_cast<ConstantAggregateZero>(MaybeStruct)) {
184       // { i32 i32 ... } zeroinitializer
185       for (unsigned I = 0, E = Struct->getType()->getStructNumElements();
186            I != E; ++I)
187         Anno += I == 0 ? ": 0" : ", 0";
188     }
189   }
190   return Anno;
191 }
192 
193 static SmallVector<Metadata *> parseAnnotation(Value *I,
194                                                const std::string &Anno,
195                                                LLVMContext &Ctx,
196                                                Type *Int32Ty) {
197   // Try to parse the annotation string according to the following rules:
198   // annotation := ({kind} | {kind:value,value,...})+
199   // kind := number
200   // value := number | string
201   static const std::regex R(
202       "\\{(\\d+)(?:[:,](\\d+|\"[^\"]*\")(?:,(\\d+|\"[^\"]*\"))*)?\\}");
203   SmallVector<Metadata *> MDs;
204   int Pos = 0;
205   for (std::sregex_iterator
206            It = std::sregex_iterator(Anno.begin(), Anno.end(), R),
207            ItEnd = std::sregex_iterator();
208        It != ItEnd; ++It) {
209     if (It->position() != Pos)
210       return SmallVector<Metadata *>{};
211     Pos = It->position() + It->length();
212     std::smatch Match = *It;
213     SmallVector<Metadata *> MDsItem;
214     for (std::size_t i = 1; i < Match.size(); ++i) {
215       std::ssub_match SMatch = Match[i];
216       std::string Item = SMatch.str();
217       if (Item.length() == 0)
218         break;
219       if (Item[0] == '"') {
220         Item = Item.substr(1, Item.length() - 2);
221         // Acceptable format of the string snippet is:
222         static const std::regex RStr("^(\\d+)(?:,(\\d+))*$");
223         if (std::smatch MatchStr; std::regex_match(Item, MatchStr, RStr)) {
224           for (std::size_t SubIdx = 1; SubIdx < MatchStr.size(); ++SubIdx)
225             if (std::string SubStr = MatchStr[SubIdx].str(); SubStr.length())
226               MDsItem.push_back(ConstantAsMetadata::get(
227                   ConstantInt::get(Int32Ty, std::stoi(SubStr))));
228         } else {
229           MDsItem.push_back(MDString::get(Ctx, Item));
230         }
231       } else if (int32_t Num; llvm::to_integer(StringRef(Item), Num, 10)) {
232         MDsItem.push_back(
233             ConstantAsMetadata::get(ConstantInt::get(Int32Ty, Num)));
234       } else {
235         MDsItem.push_back(MDString::get(Ctx, Item));
236       }
237     }
238     if (MDsItem.size() == 0)
239       return SmallVector<Metadata *>{};
240     MDs.push_back(MDNode::get(Ctx, MDsItem));
241   }
242   return Pos == static_cast<int>(Anno.length()) ? MDs
243                                                 : SmallVector<Metadata *>{};
244 }
245 
246 static void lowerPtrAnnotation(IntrinsicInst *II) {
247   LLVMContext &Ctx = II->getContext();
248   Type *Int32Ty = Type::getInt32Ty(Ctx);
249 
250   // Retrieve an annotation string from arguments.
251   Value *PtrArg = nullptr;
252   if (auto *BI = dyn_cast<BitCastInst>(II->getArgOperand(0)))
253     PtrArg = BI->getOperand(0);
254   else
255     PtrArg = II->getOperand(0);
256   std::string Anno =
257       getAnnotation(II->getArgOperand(1),
258                     4 < II->arg_size() ? II->getArgOperand(4) : nullptr);
259 
260   // Parse the annotation.
261   SmallVector<Metadata *> MDs = parseAnnotation(II, Anno, Ctx, Int32Ty);
262 
263   // If the annotation string is not parsed successfully we don't know the
264   // format used and output it as a general UserSemantic decoration.
265   // Otherwise MDs is a Metadata tuple (a decoration list) in the format
266   // expected by `spirv.Decorations`.
267   if (MDs.size() == 0) {
268     auto UserSemantic = ConstantAsMetadata::get(ConstantInt::get(
269         Int32Ty, static_cast<uint32_t>(SPIRV::Decoration::UserSemantic)));
270     MDs.push_back(MDNode::get(Ctx, {UserSemantic, MDString::get(Ctx, Anno)}));
271   }
272 
273   // Build the internal intrinsic function.
274   IRBuilder<> IRB(II->getParent());
275   IRB.SetInsertPoint(II);
276   IRB.CreateIntrinsic(
277       Intrinsic::spv_assign_decoration, {PtrArg->getType()},
278       {PtrArg, MetadataAsValue::get(Ctx, MDNode::get(Ctx, MDs))});
279   II->replaceAllUsesWith(II->getOperand(0));
280 }
281 
282 static void lowerFunnelShifts(IntrinsicInst *FSHIntrinsic) {
283   // Get a separate function - otherwise, we'd have to rework the CFG of the
284   // current one. Then simply replace the intrinsic uses with a call to the new
285   // function.
286   // Generate LLVM IR for  i* @spirv.llvm_fsh?_i* (i* %a, i* %b, i* %c)
287   Module *M = FSHIntrinsic->getModule();
288   FunctionType *FSHFuncTy = FSHIntrinsic->getFunctionType();
289   Type *FSHRetTy = FSHFuncTy->getReturnType();
290   const std::string FuncName = lowerLLVMIntrinsicName(FSHIntrinsic);
291   Function *FSHFunc =
292       getOrCreateFunction(M, FSHRetTy, FSHFuncTy->params(), FuncName);
293 
294   if (!FSHFunc->empty()) {
295     FSHIntrinsic->setCalledFunction(FSHFunc);
296     return;
297   }
298   BasicBlock *RotateBB = BasicBlock::Create(M->getContext(), "rotate", FSHFunc);
299   IRBuilder<> IRB(RotateBB);
300   Type *Ty = FSHFunc->getReturnType();
301   // Build the actual funnel shift rotate logic.
302   // In the comments, "int" is used interchangeably with "vector of int
303   // elements".
304   FixedVectorType *VectorTy = dyn_cast<FixedVectorType>(Ty);
305   Type *IntTy = VectorTy ? VectorTy->getElementType() : Ty;
306   unsigned BitWidth = IntTy->getIntegerBitWidth();
307   ConstantInt *BitWidthConstant = IRB.getInt({BitWidth, BitWidth});
308   Value *BitWidthForInsts =
309       VectorTy
310           ? IRB.CreateVectorSplat(VectorTy->getNumElements(), BitWidthConstant)
311           : BitWidthConstant;
312   Value *RotateModVal =
313       IRB.CreateURem(/*Rotate*/ FSHFunc->getArg(2), BitWidthForInsts);
314   Value *FirstShift = nullptr, *SecShift = nullptr;
315   if (FSHIntrinsic->getIntrinsicID() == Intrinsic::fshr) {
316     // Shift the less significant number right, the "rotate" number of bits
317     // will be 0-filled on the left as a result of this regular shift.
318     FirstShift = IRB.CreateLShr(FSHFunc->getArg(1), RotateModVal);
319   } else {
320     // Shift the more significant number left, the "rotate" number of bits
321     // will be 0-filled on the right as a result of this regular shift.
322     FirstShift = IRB.CreateShl(FSHFunc->getArg(0), RotateModVal);
323   }
324   // We want the "rotate" number of the more significant int's LSBs (MSBs) to
325   // occupy the leftmost (rightmost) "0 space" left by the previous operation.
326   // Therefore, subtract the "rotate" number from the integer bitsize...
327   Value *SubRotateVal = IRB.CreateSub(BitWidthForInsts, RotateModVal);
328   if (FSHIntrinsic->getIntrinsicID() == Intrinsic::fshr) {
329     // ...and left-shift the more significant int by this number, zero-filling
330     // the LSBs.
331     SecShift = IRB.CreateShl(FSHFunc->getArg(0), SubRotateVal);
332   } else {
333     // ...and right-shift the less significant int by this number, zero-filling
334     // the MSBs.
335     SecShift = IRB.CreateLShr(FSHFunc->getArg(1), SubRotateVal);
336   }
337   // A simple binary addition of the shifted ints yields the final result.
338   IRB.CreateRet(IRB.CreateOr(FirstShift, SecShift));
339 
340   FSHIntrinsic->setCalledFunction(FSHFunc);
341 }
342 
343 static void lowerExpectAssume(IntrinsicInst *II) {
344   // If we cannot use the SPV_KHR_expect_assume extension, then we need to
345   // ignore the intrinsic and move on. It should be removed later on by LLVM.
346   // Otherwise we should lower the intrinsic to the corresponding SPIR-V
347   // instruction.
348   // For @llvm.assume we have OpAssumeTrueKHR.
349   // For @llvm.expect we have OpExpectKHR.
350   //
351   // We need to lower this into a builtin and then the builtin into a SPIR-V
352   // instruction.
353   if (II->getIntrinsicID() == Intrinsic::assume) {
354     Function *F = Intrinsic::getOrInsertDeclaration(
355         II->getModule(), Intrinsic::SPVIntrinsics::spv_assume);
356     II->setCalledFunction(F);
357   } else if (II->getIntrinsicID() == Intrinsic::expect) {
358     Function *F = Intrinsic::getOrInsertDeclaration(
359         II->getModule(), Intrinsic::SPVIntrinsics::spv_expect,
360         {II->getOperand(0)->getType()});
361     II->setCalledFunction(F);
362   } else {
363     llvm_unreachable("Unknown intrinsic");
364   }
365 
366   return;
367 }
368 
369 static bool toSpvOverloadedIntrinsic(IntrinsicInst *II, Intrinsic::ID NewID,
370                                      ArrayRef<unsigned> OpNos) {
371   Function *F = nullptr;
372   if (OpNos.empty()) {
373     F = Intrinsic::getOrInsertDeclaration(II->getModule(), NewID);
374   } else {
375     SmallVector<Type *, 4> Tys;
376     for (unsigned OpNo : OpNos)
377       Tys.push_back(II->getOperand(OpNo)->getType());
378     F = Intrinsic::getOrInsertDeclaration(II->getModule(), NewID, Tys);
379   }
380   II->setCalledFunction(F);
381   return true;
382 }
383 
384 // Substitutes calls to LLVM intrinsics with either calls to SPIR-V intrinsics
385 // or calls to proper generated functions. Returns True if F was modified.
386 bool SPIRVPrepareFunctions::substituteIntrinsicCalls(Function *F) {
387   bool Changed = false;
388   for (BasicBlock &BB : *F) {
389     for (Instruction &I : BB) {
390       auto Call = dyn_cast<CallInst>(&I);
391       if (!Call)
392         continue;
393       Function *CF = Call->getCalledFunction();
394       if (!CF || !CF->isIntrinsic())
395         continue;
396       auto *II = cast<IntrinsicInst>(Call);
397       switch (II->getIntrinsicID()) {
398       case Intrinsic::memset:
399       case Intrinsic::bswap:
400         Changed |= lowerIntrinsicToFunction(II);
401         break;
402       case Intrinsic::fshl:
403       case Intrinsic::fshr:
404         lowerFunnelShifts(II);
405         Changed = true;
406         break;
407       case Intrinsic::assume:
408       case Intrinsic::expect: {
409         const SPIRVSubtarget &STI = TM.getSubtarget<SPIRVSubtarget>(*F);
410         if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
411           lowerExpectAssume(II);
412         Changed = true;
413       } break;
414       case Intrinsic::lifetime_start:
415         Changed |= toSpvOverloadedIntrinsic(
416             II, Intrinsic::SPVIntrinsics::spv_lifetime_start, {1});
417         break;
418       case Intrinsic::lifetime_end:
419         Changed |= toSpvOverloadedIntrinsic(
420             II, Intrinsic::SPVIntrinsics::spv_lifetime_end, {1});
421         break;
422       case Intrinsic::ptr_annotation:
423         lowerPtrAnnotation(II);
424         Changed = true;
425         break;
426       }
427     }
428   }
429   return Changed;
430 }
431 
432 // Returns F if aggregate argument/return types are not present or cloned F
433 // function with the types replaced by i32 types. The change in types is
434 // noted in 'spv.cloned_funcs' metadata for later restoration.
435 Function *
436 SPIRVPrepareFunctions::removeAggregateTypesFromSignature(Function *F) {
437   bool IsRetAggr = F->getReturnType()->isAggregateType();
438   // Allow intrinsics with aggregate return type to reach GlobalISel
439   if (F->isIntrinsic() && IsRetAggr)
440     return F;
441 
442   IRBuilder<> B(F->getContext());
443 
444   bool HasAggrArg =
445       std::any_of(F->arg_begin(), F->arg_end(), [](Argument &Arg) {
446         return Arg.getType()->isAggregateType();
447       });
448   bool DoClone = IsRetAggr || HasAggrArg;
449   if (!DoClone)
450     return F;
451   SmallVector<std::pair<int, Type *>, 4> ChangedTypes;
452   Type *RetType = IsRetAggr ? B.getInt32Ty() : F->getReturnType();
453   if (IsRetAggr)
454     ChangedTypes.push_back(std::pair<int, Type *>(-1, F->getReturnType()));
455   SmallVector<Type *, 4> ArgTypes;
456   for (const auto &Arg : F->args()) {
457     if (Arg.getType()->isAggregateType()) {
458       ArgTypes.push_back(B.getInt32Ty());
459       ChangedTypes.push_back(
460           std::pair<int, Type *>(Arg.getArgNo(), Arg.getType()));
461     } else
462       ArgTypes.push_back(Arg.getType());
463   }
464   FunctionType *NewFTy =
465       FunctionType::get(RetType, ArgTypes, F->getFunctionType()->isVarArg());
466   Function *NewF =
467       Function::Create(NewFTy, F->getLinkage(), F->getName(), *F->getParent());
468 
469   ValueToValueMapTy VMap;
470   auto NewFArgIt = NewF->arg_begin();
471   for (auto &Arg : F->args()) {
472     StringRef ArgName = Arg.getName();
473     NewFArgIt->setName(ArgName);
474     VMap[&Arg] = &(*NewFArgIt++);
475   }
476   SmallVector<ReturnInst *, 8> Returns;
477 
478   CloneFunctionInto(NewF, F, VMap, CloneFunctionChangeType::LocalChangesOnly,
479                     Returns);
480   NewF->takeName(F);
481 
482   NamedMDNode *FuncMD =
483       F->getParent()->getOrInsertNamedMetadata("spv.cloned_funcs");
484   SmallVector<Metadata *, 2> MDArgs;
485   MDArgs.push_back(MDString::get(B.getContext(), NewF->getName()));
486   for (auto &ChangedTyP : ChangedTypes)
487     MDArgs.push_back(MDNode::get(
488         B.getContext(),
489         {ConstantAsMetadata::get(B.getInt32(ChangedTyP.first)),
490          ValueAsMetadata::get(Constant::getNullValue(ChangedTyP.second))}));
491   MDNode *ThisFuncMD = MDNode::get(B.getContext(), MDArgs);
492   FuncMD->addOperand(ThisFuncMD);
493 
494   for (auto *U : make_early_inc_range(F->users())) {
495     if (auto *CI = dyn_cast<CallInst>(U))
496       CI->mutateFunctionType(NewF->getFunctionType());
497     U->replaceUsesOfWith(F, NewF);
498   }
499 
500   // register the mutation
501   if (RetType != F->getReturnType())
502     TM.getSubtarget<SPIRVSubtarget>(*F).getSPIRVGlobalRegistry()->addMutated(
503         NewF, F->getReturnType());
504   return NewF;
505 }
506 
507 bool SPIRVPrepareFunctions::runOnModule(Module &M) {
508   bool Changed = false;
509   for (Function &F : M) {
510     Changed |= substituteIntrinsicCalls(&F);
511     Changed |= sortBlocks(F);
512   }
513 
514   std::vector<Function *> FuncsWorklist;
515   for (auto &F : M)
516     FuncsWorklist.push_back(&F);
517 
518   for (auto *F : FuncsWorklist) {
519     Function *NewF = removeAggregateTypesFromSignature(F);
520 
521     if (NewF != F) {
522       F->eraseFromParent();
523       Changed = true;
524     }
525   }
526   return Changed;
527 }
528 
529 ModulePass *
530 llvm::createSPIRVPrepareFunctionsPass(const SPIRVTargetMachine &TM) {
531   return new SPIRVPrepareFunctions(TM);
532 }
533