xref: /llvm-project/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp (revision 116f7a2dcb86a7a8812a60fb7101f90329dada19)
1 //===- SPIRVModuleAnalysis.cpp - analysis of global instrs & regs - C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // The analysis collects instructions that should be output at the module level
10 // and performs the global register numbering.
11 //
12 // The results of this analysis are used in AsmPrinter to rename registers
13 // globally and to output required instructions at the module level.
14 //
15 //===----------------------------------------------------------------------===//
16 
17 #include "SPIRVModuleAnalysis.h"
18 #include "SPIRV.h"
19 #include "SPIRVSubtarget.h"
20 #include "SPIRVTargetMachine.h"
21 #include "SPIRVUtils.h"
22 #include "TargetInfo/SPIRVTargetInfo.h"
23 #include "llvm/ADT/STLExtras.h"
24 #include "llvm/CodeGen/MachineModuleInfo.h"
25 #include "llvm/CodeGen/TargetPassConfig.h"
26 
27 using namespace llvm;
28 
29 #define DEBUG_TYPE "spirv-module-analysis"
30 
31 static cl::opt<bool>
32     SPVDumpDeps("spv-dump-deps",
33                 cl::desc("Dump MIR with SPIR-V dependencies info"),
34                 cl::Optional, cl::init(false));
35 
36 char llvm::SPIRVModuleAnalysis::ID = 0;
37 
38 namespace llvm {
39 void initializeSPIRVModuleAnalysisPass(PassRegistry &);
40 } // namespace llvm
41 
42 INITIALIZE_PASS(SPIRVModuleAnalysis, DEBUG_TYPE, "SPIRV module analysis", true,
43                 true)
44 
45 // Retrieve an unsigned from an MDNode with a list of them as operands.
46 static unsigned getMetadataUInt(MDNode *MdNode, unsigned OpIndex,
47                                 unsigned DefaultVal = 0) {
48   if (MdNode && OpIndex < MdNode->getNumOperands()) {
49     const auto &Op = MdNode->getOperand(OpIndex);
50     return mdconst::extract<ConstantInt>(Op)->getZExtValue();
51   }
52   return DefaultVal;
53 }
54 
55 static SPIRV::Requirements
56 getSymbolicOperandRequirements(SPIRV::OperandCategory::OperandCategory Category,
57                                unsigned i, const SPIRVSubtarget &ST,
58                                SPIRV::RequirementHandler &Reqs) {
59   unsigned ReqMinVer = getSymbolicOperandMinVersion(Category, i);
60   unsigned ReqMaxVer = getSymbolicOperandMaxVersion(Category, i);
61   unsigned TargetVer = ST.getSPIRVVersion();
62   bool MinVerOK = !ReqMinVer || !TargetVer || TargetVer >= ReqMinVer;
63   bool MaxVerOK = !ReqMaxVer || !TargetVer || TargetVer <= ReqMaxVer;
64   CapabilityList ReqCaps = getSymbolicOperandCapabilities(Category, i);
65   ExtensionList ReqExts = getSymbolicOperandExtensions(Category, i);
66   if (ReqCaps.empty()) {
67     if (ReqExts.empty()) {
68       if (MinVerOK && MaxVerOK)
69         return {true, {}, {}, ReqMinVer, ReqMaxVer};
70       return {false, {}, {}, 0, 0};
71     }
72   } else if (MinVerOK && MaxVerOK) {
73     for (auto Cap : ReqCaps) { // Only need 1 of the capabilities to work.
74       if (Reqs.isCapabilityAvailable(Cap))
75         return {true, {Cap}, {}, ReqMinVer, ReqMaxVer};
76     }
77   }
78   // If there are no capabilities, or we can't satisfy the version or
79   // capability requirements, use the list of extensions (if the subtarget
80   // can handle them all).
81   if (llvm::all_of(ReqExts, [&ST](const SPIRV::Extension::Extension &Ext) {
82         return ST.canUseExtension(Ext);
83       })) {
84     return {true, {}, ReqExts, 0, 0}; // TODO: add versions to extensions.
85   }
86   return {false, {}, {}, 0, 0};
87 }
88 
89 void SPIRVModuleAnalysis::setBaseInfo(const Module &M) {
90   MAI.MaxID = 0;
91   for (int i = 0; i < SPIRV::NUM_MODULE_SECTIONS; i++)
92     MAI.MS[i].clear();
93   MAI.RegisterAliasTable.clear();
94   MAI.InstrsToDelete.clear();
95   MAI.FuncMap.clear();
96   MAI.GlobalVarList.clear();
97   MAI.ExtInstSetMap.clear();
98   MAI.Reqs.clear();
99   MAI.Reqs.initAvailableCapabilities(*ST);
100 
101   // TODO: determine memory model and source language from the configuratoin.
102   if (auto MemModel = M.getNamedMetadata("spirv.MemoryModel")) {
103     auto MemMD = MemModel->getOperand(0);
104     MAI.Addr = static_cast<SPIRV::AddressingModel::AddressingModel>(
105         getMetadataUInt(MemMD, 0));
106     MAI.Mem =
107         static_cast<SPIRV::MemoryModel::MemoryModel>(getMetadataUInt(MemMD, 1));
108   } else {
109     // TODO: Add support for VulkanMemoryModel.
110     MAI.Mem = ST->isOpenCLEnv() ? SPIRV::MemoryModel::OpenCL
111                                 : SPIRV::MemoryModel::GLSL450;
112     if (MAI.Mem == SPIRV::MemoryModel::OpenCL) {
113       unsigned PtrSize = ST->getPointerSize();
114       MAI.Addr = PtrSize == 32   ? SPIRV::AddressingModel::Physical32
115                  : PtrSize == 64 ? SPIRV::AddressingModel::Physical64
116                                  : SPIRV::AddressingModel::Logical;
117     } else {
118       // TODO: Add support for PhysicalStorageBufferAddress.
119       MAI.Addr = SPIRV::AddressingModel::Logical;
120     }
121   }
122   // Get the OpenCL version number from metadata.
123   // TODO: support other source languages.
124   if (auto VerNode = M.getNamedMetadata("opencl.ocl.version")) {
125     MAI.SrcLang = SPIRV::SourceLanguage::OpenCL_C;
126     // Construct version literal in accordance with SPIRV-LLVM-Translator.
127     // TODO: support multiple OCL version metadata.
128     assert(VerNode->getNumOperands() > 0 && "Invalid SPIR");
129     auto VersionMD = VerNode->getOperand(0);
130     unsigned MajorNum = getMetadataUInt(VersionMD, 0, 2);
131     unsigned MinorNum = getMetadataUInt(VersionMD, 1);
132     unsigned RevNum = getMetadataUInt(VersionMD, 2);
133     MAI.SrcLangVersion = (MajorNum * 100 + MinorNum) * 1000 + RevNum;
134   } else {
135     MAI.SrcLang = SPIRV::SourceLanguage::Unknown;
136     MAI.SrcLangVersion = 0;
137   }
138 
139   if (auto ExtNode = M.getNamedMetadata("opencl.used.extensions")) {
140     for (unsigned I = 0, E = ExtNode->getNumOperands(); I != E; ++I) {
141       MDNode *MD = ExtNode->getOperand(I);
142       if (!MD || MD->getNumOperands() == 0)
143         continue;
144       for (unsigned J = 0, N = MD->getNumOperands(); J != N; ++J)
145         MAI.SrcExt.insert(cast<MDString>(MD->getOperand(J))->getString());
146     }
147   }
148 
149   // Update required capabilities for this memory model, addressing model and
150   // source language.
151   MAI.Reqs.getAndAddRequirements(SPIRV::OperandCategory::MemoryModelOperand,
152                                  MAI.Mem, *ST);
153   MAI.Reqs.getAndAddRequirements(SPIRV::OperandCategory::SourceLanguageOperand,
154                                  MAI.SrcLang, *ST);
155   MAI.Reqs.getAndAddRequirements(SPIRV::OperandCategory::AddressingModelOperand,
156                                  MAI.Addr, *ST);
157 
158   if (ST->isOpenCLEnv()) {
159     // TODO: check if it's required by default.
160     MAI.ExtInstSetMap[static_cast<unsigned>(
161         SPIRV::InstructionSet::OpenCL_std)] =
162         Register::index2VirtReg(MAI.getNextID());
163   }
164 }
165 
166 // Collect MI which defines the register in the given machine function.
167 static void collectDefInstr(Register Reg, const MachineFunction *MF,
168                             SPIRV::ModuleAnalysisInfo *MAI,
169                             SPIRV::ModuleSectionType MSType,
170                             bool DoInsert = true) {
171   assert(MAI->hasRegisterAlias(MF, Reg) && "Cannot find register alias");
172   MachineInstr *MI = MF->getRegInfo().getUniqueVRegDef(Reg);
173   assert(MI && "There should be an instruction that defines the register");
174   MAI->setSkipEmission(MI);
175   if (DoInsert)
176     MAI->MS[MSType].push_back(MI);
177 }
178 
179 void SPIRVModuleAnalysis::collectGlobalEntities(
180     const std::vector<SPIRV::DTSortableEntry *> &DepsGraph,
181     SPIRV::ModuleSectionType MSType,
182     std::function<bool(const SPIRV::DTSortableEntry *)> Pred,
183     bool UsePreOrder = false) {
184   DenseSet<const SPIRV::DTSortableEntry *> Visited;
185   for (const auto *E : DepsGraph) {
186     std::function<void(const SPIRV::DTSortableEntry *)> RecHoistUtil;
187     // NOTE: here we prefer recursive approach over iterative because
188     // we don't expect depchains long enough to cause SO.
189     RecHoistUtil = [MSType, UsePreOrder, &Visited, &Pred,
190                     &RecHoistUtil](const SPIRV::DTSortableEntry *E) {
191       if (Visited.count(E) || !Pred(E))
192         return;
193       Visited.insert(E);
194 
195       // Traversing deps graph in post-order allows us to get rid of
196       // register aliases preprocessing.
197       // But pre-order is required for correct processing of function
198       // declaration and arguments processing.
199       if (!UsePreOrder)
200         for (auto *S : E->getDeps())
201           RecHoistUtil(S);
202 
203       Register GlobalReg = Register::index2VirtReg(MAI.getNextID());
204       bool IsFirst = true;
205       for (auto &U : *E) {
206         const MachineFunction *MF = U.first;
207         Register Reg = U.second;
208         MAI.setRegisterAlias(MF, Reg, GlobalReg);
209         if (!MF->getRegInfo().getUniqueVRegDef(Reg))
210           continue;
211         collectDefInstr(Reg, MF, &MAI, MSType, IsFirst);
212         IsFirst = false;
213         if (E->getIsGV())
214           MAI.GlobalVarList.push_back(MF->getRegInfo().getUniqueVRegDef(Reg));
215       }
216 
217       if (UsePreOrder)
218         for (auto *S : E->getDeps())
219           RecHoistUtil(S);
220     };
221     RecHoistUtil(E);
222   }
223 }
224 
225 // The function initializes global register alias table for types, consts,
226 // global vars and func decls and collects these instruction for output
227 // at module level. Also it collects explicit OpExtension/OpCapability
228 // instructions.
229 void SPIRVModuleAnalysis::processDefInstrs(const Module &M) {
230   std::vector<SPIRV::DTSortableEntry *> DepsGraph;
231 
232   GR->buildDepsGraph(DepsGraph, SPVDumpDeps ? MMI : nullptr);
233 
234   collectGlobalEntities(
235       DepsGraph, SPIRV::MB_TypeConstVars,
236       [](const SPIRV::DTSortableEntry *E) { return !E->getIsFunc(); });
237 
238   for (auto F = M.begin(), E = M.end(); F != E; ++F) {
239     MachineFunction *MF = MMI->getMachineFunction(*F);
240     if (!MF)
241       continue;
242     // Iterate through and collect OpExtension/OpCapability instructions.
243     for (MachineBasicBlock &MBB : *MF) {
244       for (MachineInstr &MI : MBB) {
245         if (MI.getOpcode() == SPIRV::OpExtension) {
246           // Here, OpExtension just has a single enum operand, not a string.
247           auto Ext = SPIRV::Extension::Extension(MI.getOperand(0).getImm());
248           MAI.Reqs.addExtension(Ext);
249           MAI.setSkipEmission(&MI);
250         } else if (MI.getOpcode() == SPIRV::OpCapability) {
251           auto Cap = SPIRV::Capability::Capability(MI.getOperand(0).getImm());
252           MAI.Reqs.addCapability(Cap);
253           MAI.setSkipEmission(&MI);
254         }
255       }
256     }
257   }
258 
259   collectGlobalEntities(
260       DepsGraph, SPIRV::MB_ExtFuncDecls,
261       [](const SPIRV::DTSortableEntry *E) { return E->getIsFunc(); }, true);
262 }
263 
264 // True if there is an instruction in the MS list with all the same operands as
265 // the given instruction has (after the given starting index).
266 // TODO: maybe it needs to check Opcodes too.
267 static bool findSameInstrInMS(const MachineInstr &A,
268                               SPIRV::ModuleSectionType MSType,
269                               SPIRV::ModuleAnalysisInfo &MAI,
270                               unsigned StartOpIndex = 0) {
271   for (const auto *B : MAI.MS[MSType]) {
272     const unsigned NumAOps = A.getNumOperands();
273     if (NumAOps != B->getNumOperands() || A.getNumDefs() != B->getNumDefs())
274       continue;
275     bool AllOpsMatch = true;
276     for (unsigned i = StartOpIndex; i < NumAOps && AllOpsMatch; ++i) {
277       if (A.getOperand(i).isReg() && B->getOperand(i).isReg()) {
278         Register RegA = A.getOperand(i).getReg();
279         Register RegB = B->getOperand(i).getReg();
280         AllOpsMatch = MAI.getRegisterAlias(A.getMF(), RegA) ==
281                       MAI.getRegisterAlias(B->getMF(), RegB);
282       } else {
283         AllOpsMatch = A.getOperand(i).isIdenticalTo(B->getOperand(i));
284       }
285     }
286     if (AllOpsMatch)
287       return true;
288   }
289   return false;
290 }
291 
292 // Look for IDs declared with Import linkage, and map the corresponding function
293 // to the register defining that variable (which will usually be the result of
294 // an OpFunction). This lets us call externally imported functions using
295 // the correct ID registers.
296 void SPIRVModuleAnalysis::collectFuncNames(MachineInstr &MI,
297                                            const Function *F) {
298   if (MI.getOpcode() == SPIRV::OpDecorate) {
299     // If it's got Import linkage.
300     auto Dec = MI.getOperand(1).getImm();
301     if (Dec == static_cast<unsigned>(SPIRV::Decoration::LinkageAttributes)) {
302       auto Lnk = MI.getOperand(MI.getNumOperands() - 1).getImm();
303       if (Lnk == static_cast<unsigned>(SPIRV::LinkageType::Import)) {
304         // Map imported function name to function ID register.
305         const Function *ImportedFunc =
306             F->getParent()->getFunction(getStringImm(MI, 2));
307         Register Target = MI.getOperand(0).getReg();
308         MAI.FuncMap[ImportedFunc] = MAI.getRegisterAlias(MI.getMF(), Target);
309       }
310     }
311   } else if (MI.getOpcode() == SPIRV::OpFunction) {
312     // Record all internal OpFunction declarations.
313     Register Reg = MI.defs().begin()->getReg();
314     Register GlobalReg = MAI.getRegisterAlias(MI.getMF(), Reg);
315     assert(GlobalReg.isValid());
316     MAI.FuncMap[F] = GlobalReg;
317   }
318 }
319 
320 // Collect the given instruction in the specified MS. We assume global register
321 // numbering has already occurred by this point. We can directly compare reg
322 // arguments when detecting duplicates.
323 static void collectOtherInstr(MachineInstr &MI, SPIRV::ModuleAnalysisInfo &MAI,
324                               SPIRV::ModuleSectionType MSType,
325                               bool Append = true) {
326   MAI.setSkipEmission(&MI);
327   if (findSameInstrInMS(MI, MSType, MAI))
328     return; // Found a duplicate, so don't add it.
329   // No duplicates, so add it.
330   if (Append)
331     MAI.MS[MSType].push_back(&MI);
332   else
333     MAI.MS[MSType].insert(MAI.MS[MSType].begin(), &MI);
334 }
335 
336 // Some global instructions make reference to function-local ID regs, so cannot
337 // be correctly collected until these registers are globally numbered.
338 void SPIRVModuleAnalysis::processOtherInstrs(const Module &M) {
339   for (auto F = M.begin(), E = M.end(); F != E; ++F) {
340     if ((*F).isDeclaration())
341       continue;
342     MachineFunction *MF = MMI->getMachineFunction(*F);
343     assert(MF);
344     for (MachineBasicBlock &MBB : *MF)
345       for (MachineInstr &MI : MBB) {
346         if (MAI.getSkipEmission(&MI))
347           continue;
348         const unsigned OpCode = MI.getOpcode();
349         if (OpCode == SPIRV::OpName || OpCode == SPIRV::OpMemberName) {
350           collectOtherInstr(MI, MAI, SPIRV::MB_DebugNames);
351         } else if (OpCode == SPIRV::OpEntryPoint) {
352           collectOtherInstr(MI, MAI, SPIRV::MB_EntryPoints);
353         } else if (TII->isDecorationInstr(MI)) {
354           collectOtherInstr(MI, MAI, SPIRV::MB_Annotations);
355           collectFuncNames(MI, &*F);
356         } else if (TII->isConstantInstr(MI)) {
357           // Now OpSpecConstant*s are not in DT,
358           // but they need to be collected anyway.
359           collectOtherInstr(MI, MAI, SPIRV::MB_TypeConstVars);
360         } else if (OpCode == SPIRV::OpFunction) {
361           collectFuncNames(MI, &*F);
362         } else if (OpCode == SPIRV::OpTypeForwardPointer) {
363           collectOtherInstr(MI, MAI, SPIRV::MB_TypeConstVars, false);
364         }
365       }
366   }
367 }
368 
369 // Number registers in all functions globally from 0 onwards and store
370 // the result in global register alias table. Some registers are already
371 // numbered in collectGlobalEntities.
372 void SPIRVModuleAnalysis::numberRegistersGlobally(const Module &M) {
373   for (auto F = M.begin(), E = M.end(); F != E; ++F) {
374     if ((*F).isDeclaration())
375       continue;
376     MachineFunction *MF = MMI->getMachineFunction(*F);
377     assert(MF);
378     for (MachineBasicBlock &MBB : *MF) {
379       for (MachineInstr &MI : MBB) {
380         for (MachineOperand &Op : MI.operands()) {
381           if (!Op.isReg())
382             continue;
383           Register Reg = Op.getReg();
384           if (MAI.hasRegisterAlias(MF, Reg))
385             continue;
386           Register NewReg = Register::index2VirtReg(MAI.getNextID());
387           MAI.setRegisterAlias(MF, Reg, NewReg);
388         }
389         if (MI.getOpcode() != SPIRV::OpExtInst)
390           continue;
391         auto Set = MI.getOperand(2).getImm();
392         if (MAI.ExtInstSetMap.find(Set) == MAI.ExtInstSetMap.end())
393           MAI.ExtInstSetMap[Set] = Register::index2VirtReg(MAI.getNextID());
394       }
395     }
396   }
397 }
398 
399 // RequirementHandler implementations.
400 void SPIRV::RequirementHandler::getAndAddRequirements(
401     SPIRV::OperandCategory::OperandCategory Category, uint32_t i,
402     const SPIRVSubtarget &ST) {
403   addRequirements(getSymbolicOperandRequirements(Category, i, ST, *this));
404 }
405 
406 void SPIRV::RequirementHandler::pruneCapabilities(
407     const CapabilityList &ToPrune) {
408   for (const auto &Cap : ToPrune) {
409     AllCaps.insert(Cap);
410     auto FoundIndex = std::find(MinimalCaps.begin(), MinimalCaps.end(), Cap);
411     if (FoundIndex != MinimalCaps.end())
412       MinimalCaps.erase(FoundIndex);
413     CapabilityList ImplicitDecls =
414         getSymbolicOperandCapabilities(OperandCategory::CapabilityOperand, Cap);
415     pruneCapabilities(ImplicitDecls);
416   }
417 }
418 
419 void SPIRV::RequirementHandler::addCapabilities(const CapabilityList &ToAdd) {
420   for (const auto &Cap : ToAdd) {
421     bool IsNewlyInserted = AllCaps.insert(Cap).second;
422     if (!IsNewlyInserted) // Don't re-add if it's already been declared.
423       continue;
424     CapabilityList ImplicitDecls =
425         getSymbolicOperandCapabilities(OperandCategory::CapabilityOperand, Cap);
426     pruneCapabilities(ImplicitDecls);
427     MinimalCaps.push_back(Cap);
428   }
429 }
430 
431 void SPIRV::RequirementHandler::addRequirements(
432     const SPIRV::Requirements &Req) {
433   if (!Req.IsSatisfiable)
434     report_fatal_error("Adding SPIR-V requirements this target can't satisfy.");
435 
436   if (Req.Cap.has_value())
437     addCapabilities({Req.Cap.value()});
438 
439   addExtensions(Req.Exts);
440 
441   if (Req.MinVer) {
442     if (MaxVersion && Req.MinVer > MaxVersion) {
443       LLVM_DEBUG(dbgs() << "Conflicting version requirements: >= " << Req.MinVer
444                         << " and <= " << MaxVersion << "\n");
445       report_fatal_error("Adding SPIR-V requirements that can't be satisfied.");
446     }
447 
448     if (MinVersion == 0 || Req.MinVer > MinVersion)
449       MinVersion = Req.MinVer;
450   }
451 
452   if (Req.MaxVer) {
453     if (MinVersion && Req.MaxVer < MinVersion) {
454       LLVM_DEBUG(dbgs() << "Conflicting version requirements: <= " << Req.MaxVer
455                         << " and >= " << MinVersion << "\n");
456       report_fatal_error("Adding SPIR-V requirements that can't be satisfied.");
457     }
458 
459     if (MaxVersion == 0 || Req.MaxVer < MaxVersion)
460       MaxVersion = Req.MaxVer;
461   }
462 }
463 
464 void SPIRV::RequirementHandler::checkSatisfiable(
465     const SPIRVSubtarget &ST) const {
466   // Report as many errors as possible before aborting the compilation.
467   bool IsSatisfiable = true;
468   auto TargetVer = ST.getSPIRVVersion();
469 
470   if (MaxVersion && TargetVer && MaxVersion < TargetVer) {
471     LLVM_DEBUG(
472         dbgs() << "Target SPIR-V version too high for required features\n"
473                << "Required max version: " << MaxVersion << " target version "
474                << TargetVer << "\n");
475     IsSatisfiable = false;
476   }
477 
478   if (MinVersion && TargetVer && MinVersion > TargetVer) {
479     LLVM_DEBUG(dbgs() << "Target SPIR-V version too low for required features\n"
480                       << "Required min version: " << MinVersion
481                       << " target version " << TargetVer << "\n");
482     IsSatisfiable = false;
483   }
484 
485   if (MinVersion && MaxVersion && MinVersion > MaxVersion) {
486     LLVM_DEBUG(
487         dbgs()
488         << "Version is too low for some features and too high for others.\n"
489         << "Required SPIR-V min version: " << MinVersion
490         << " required SPIR-V max version " << MaxVersion << "\n");
491     IsSatisfiable = false;
492   }
493 
494   for (auto Cap : MinimalCaps) {
495     if (AvailableCaps.contains(Cap))
496       continue;
497     LLVM_DEBUG(dbgs() << "Capability not supported: "
498                       << getSymbolicOperandMnemonic(
499                              OperandCategory::CapabilityOperand, Cap)
500                       << "\n");
501     IsSatisfiable = false;
502   }
503 
504   for (auto Ext : AllExtensions) {
505     if (ST.canUseExtension(Ext))
506       continue;
507     LLVM_DEBUG(dbgs() << "Extension not suported: "
508                       << getSymbolicOperandMnemonic(
509                              OperandCategory::ExtensionOperand, Ext)
510                       << "\n");
511     IsSatisfiable = false;
512   }
513 
514   if (!IsSatisfiable)
515     report_fatal_error("Unable to meet SPIR-V requirements for this target.");
516 }
517 
518 // Add the given capabilities and all their implicitly defined capabilities too.
519 void SPIRV::RequirementHandler::addAvailableCaps(const CapabilityList &ToAdd) {
520   for (const auto Cap : ToAdd)
521     if (AvailableCaps.insert(Cap).second)
522       addAvailableCaps(getSymbolicOperandCapabilities(
523           SPIRV::OperandCategory::CapabilityOperand, Cap));
524 }
525 
526 namespace llvm {
527 namespace SPIRV {
528 void RequirementHandler::initAvailableCapabilities(const SPIRVSubtarget &ST) {
529   if (ST.isOpenCLEnv()) {
530     initAvailableCapabilitiesForOpenCL(ST);
531     return;
532   }
533 
534   if (ST.isVulkanEnv()) {
535     initAvailableCapabilitiesForVulkan(ST);
536     return;
537   }
538 
539   report_fatal_error("Unimplemented environment for SPIR-V generation.");
540 }
541 
542 void RequirementHandler::initAvailableCapabilitiesForOpenCL(
543     const SPIRVSubtarget &ST) {
544   // Add the min requirements for different OpenCL and SPIR-V versions.
545   addAvailableCaps({Capability::Addresses, Capability::Float16Buffer,
546                     Capability::Int16, Capability::Int8, Capability::Kernel,
547                     Capability::Linkage, Capability::Vector16,
548                     Capability::Groups, Capability::GenericPointer,
549                     Capability::Shader});
550   if (ST.hasOpenCLFullProfile())
551     addAvailableCaps({Capability::Int64, Capability::Int64Atomics});
552   if (ST.hasOpenCLImageSupport()) {
553     addAvailableCaps({Capability::ImageBasic, Capability::LiteralSampler,
554                       Capability::Image1D, Capability::SampledBuffer,
555                       Capability::ImageBuffer});
556     if (ST.isAtLeastOpenCLVer(20))
557       addAvailableCaps({Capability::ImageReadWrite});
558   }
559   if (ST.isAtLeastSPIRVVer(11) && ST.isAtLeastOpenCLVer(22))
560     addAvailableCaps({Capability::SubgroupDispatch, Capability::PipeStorage});
561   if (ST.isAtLeastSPIRVVer(13))
562     addAvailableCaps({Capability::GroupNonUniform,
563                       Capability::GroupNonUniformVote,
564                       Capability::GroupNonUniformArithmetic,
565                       Capability::GroupNonUniformBallot,
566                       Capability::GroupNonUniformClustered,
567                       Capability::GroupNonUniformShuffle,
568                       Capability::GroupNonUniformShuffleRelative});
569   if (ST.isAtLeastSPIRVVer(14))
570     addAvailableCaps({Capability::DenormPreserve, Capability::DenormFlushToZero,
571                       Capability::SignedZeroInfNanPreserve,
572                       Capability::RoundingModeRTE,
573                       Capability::RoundingModeRTZ});
574   // TODO: verify if this needs some checks.
575   addAvailableCaps({Capability::Float16, Capability::Float64});
576 
577   // Add capabilities enabled by extensions.
578   for (auto Extension : ST.getAllAvailableExtensions()) {
579     CapabilityList EnabledCapabilities =
580         getCapabilitiesEnabledByExtension(Extension);
581     addAvailableCaps(EnabledCapabilities);
582   }
583 
584   // TODO: add OpenCL extensions.
585 }
586 
587 void RequirementHandler::initAvailableCapabilitiesForVulkan(
588     const SPIRVSubtarget &ST) {
589   addAvailableCaps({Capability::Shader, Capability::Linkage});
590 
591   // Provided by Vulkan version 1.0.
592   addAvailableCaps({Capability::Int16, Capability::Int64, Capability::Float64});
593 }
594 
595 } // namespace SPIRV
596 } // namespace llvm
597 
598 // Add the required capabilities from a decoration instruction (including
599 // BuiltIns).
600 static void addOpDecorateReqs(const MachineInstr &MI, unsigned DecIndex,
601                               SPIRV::RequirementHandler &Reqs,
602                               const SPIRVSubtarget &ST) {
603   int64_t DecOp = MI.getOperand(DecIndex).getImm();
604   auto Dec = static_cast<SPIRV::Decoration::Decoration>(DecOp);
605   Reqs.addRequirements(getSymbolicOperandRequirements(
606       SPIRV::OperandCategory::DecorationOperand, Dec, ST, Reqs));
607 
608   if (Dec == SPIRV::Decoration::BuiltIn) {
609     int64_t BuiltInOp = MI.getOperand(DecIndex + 1).getImm();
610     auto BuiltIn = static_cast<SPIRV::BuiltIn::BuiltIn>(BuiltInOp);
611     Reqs.addRequirements(getSymbolicOperandRequirements(
612         SPIRV::OperandCategory::BuiltInOperand, BuiltIn, ST, Reqs));
613   }
614 }
615 
616 // Add requirements for image handling.
617 static void addOpTypeImageReqs(const MachineInstr &MI,
618                                SPIRV::RequirementHandler &Reqs,
619                                const SPIRVSubtarget &ST) {
620   assert(MI.getNumOperands() >= 8 && "Insufficient operands for OpTypeImage");
621   // The operand indices used here are based on the OpTypeImage layout, which
622   // the MachineInstr follows as well.
623   int64_t ImgFormatOp = MI.getOperand(7).getImm();
624   auto ImgFormat = static_cast<SPIRV::ImageFormat::ImageFormat>(ImgFormatOp);
625   Reqs.getAndAddRequirements(SPIRV::OperandCategory::ImageFormatOperand,
626                              ImgFormat, ST);
627 
628   bool IsArrayed = MI.getOperand(4).getImm() == 1;
629   bool IsMultisampled = MI.getOperand(5).getImm() == 1;
630   bool NoSampler = MI.getOperand(6).getImm() == 2;
631   // Add dimension requirements.
632   assert(MI.getOperand(2).isImm());
633   switch (MI.getOperand(2).getImm()) {
634   case SPIRV::Dim::DIM_1D:
635     Reqs.addRequirements(NoSampler ? SPIRV::Capability::Image1D
636                                    : SPIRV::Capability::Sampled1D);
637     break;
638   case SPIRV::Dim::DIM_2D:
639     if (IsMultisampled && NoSampler)
640       Reqs.addRequirements(SPIRV::Capability::ImageMSArray);
641     break;
642   case SPIRV::Dim::DIM_Cube:
643     Reqs.addRequirements(SPIRV::Capability::Shader);
644     if (IsArrayed)
645       Reqs.addRequirements(NoSampler ? SPIRV::Capability::ImageCubeArray
646                                      : SPIRV::Capability::SampledCubeArray);
647     break;
648   case SPIRV::Dim::DIM_Rect:
649     Reqs.addRequirements(NoSampler ? SPIRV::Capability::ImageRect
650                                    : SPIRV::Capability::SampledRect);
651     break;
652   case SPIRV::Dim::DIM_Buffer:
653     Reqs.addRequirements(NoSampler ? SPIRV::Capability::ImageBuffer
654                                    : SPIRV::Capability::SampledBuffer);
655     break;
656   case SPIRV::Dim::DIM_SubpassData:
657     Reqs.addRequirements(SPIRV::Capability::InputAttachment);
658     break;
659   }
660 
661   // Has optional access qualifier.
662   // TODO: check if it's OpenCL's kernel.
663   if (MI.getNumOperands() > 8 &&
664       MI.getOperand(8).getImm() == SPIRV::AccessQualifier::ReadWrite)
665     Reqs.addRequirements(SPIRV::Capability::ImageReadWrite);
666   else
667     Reqs.addRequirements(SPIRV::Capability::ImageBasic);
668 }
669 
670 void addInstrRequirements(const MachineInstr &MI,
671                           SPIRV::RequirementHandler &Reqs,
672                           const SPIRVSubtarget &ST) {
673   switch (MI.getOpcode()) {
674   case SPIRV::OpMemoryModel: {
675     int64_t Addr = MI.getOperand(0).getImm();
676     Reqs.getAndAddRequirements(SPIRV::OperandCategory::AddressingModelOperand,
677                                Addr, ST);
678     int64_t Mem = MI.getOperand(1).getImm();
679     Reqs.getAndAddRequirements(SPIRV::OperandCategory::MemoryModelOperand, Mem,
680                                ST);
681     break;
682   }
683   case SPIRV::OpEntryPoint: {
684     int64_t Exe = MI.getOperand(0).getImm();
685     Reqs.getAndAddRequirements(SPIRV::OperandCategory::ExecutionModelOperand,
686                                Exe, ST);
687     break;
688   }
689   case SPIRV::OpExecutionMode:
690   case SPIRV::OpExecutionModeId: {
691     int64_t Exe = MI.getOperand(1).getImm();
692     Reqs.getAndAddRequirements(SPIRV::OperandCategory::ExecutionModeOperand,
693                                Exe, ST);
694     break;
695   }
696   case SPIRV::OpTypeMatrix:
697     Reqs.addCapability(SPIRV::Capability::Matrix);
698     break;
699   case SPIRV::OpTypeInt: {
700     unsigned BitWidth = MI.getOperand(1).getImm();
701     if (BitWidth == 64)
702       Reqs.addCapability(SPIRV::Capability::Int64);
703     else if (BitWidth == 16)
704       Reqs.addCapability(SPIRV::Capability::Int16);
705     else if (BitWidth == 8)
706       Reqs.addCapability(SPIRV::Capability::Int8);
707     break;
708   }
709   case SPIRV::OpTypeFloat: {
710     unsigned BitWidth = MI.getOperand(1).getImm();
711     if (BitWidth == 64)
712       Reqs.addCapability(SPIRV::Capability::Float64);
713     else if (BitWidth == 16)
714       Reqs.addCapability(SPIRV::Capability::Float16);
715     break;
716   }
717   case SPIRV::OpTypeVector: {
718     unsigned NumComponents = MI.getOperand(2).getImm();
719     if (NumComponents == 8 || NumComponents == 16)
720       Reqs.addCapability(SPIRV::Capability::Vector16);
721     break;
722   }
723   case SPIRV::OpTypePointer: {
724     auto SC = MI.getOperand(1).getImm();
725     Reqs.getAndAddRequirements(SPIRV::OperandCategory::StorageClassOperand, SC,
726                                ST);
727     // If it's a type of pointer to float16, add Float16Buffer capability.
728     assert(MI.getOperand(2).isReg());
729     const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
730     SPIRVType *TypeDef = MRI.getVRegDef(MI.getOperand(2).getReg());
731     if (TypeDef->getOpcode() == SPIRV::OpTypeFloat &&
732         TypeDef->getOperand(1).getImm() == 16)
733       Reqs.addCapability(SPIRV::Capability::Float16Buffer);
734     break;
735   }
736   case SPIRV::OpBitReverse:
737   case SPIRV::OpTypeRuntimeArray:
738     Reqs.addCapability(SPIRV::Capability::Shader);
739     break;
740   case SPIRV::OpTypeOpaque:
741   case SPIRV::OpTypeEvent:
742     Reqs.addCapability(SPIRV::Capability::Kernel);
743     break;
744   case SPIRV::OpTypePipe:
745   case SPIRV::OpTypeReserveId:
746     Reqs.addCapability(SPIRV::Capability::Pipes);
747     break;
748   case SPIRV::OpTypeDeviceEvent:
749   case SPIRV::OpTypeQueue:
750   case SPIRV::OpBuildNDRange:
751     Reqs.addCapability(SPIRV::Capability::DeviceEnqueue);
752     break;
753   case SPIRV::OpDecorate:
754   case SPIRV::OpDecorateId:
755   case SPIRV::OpDecorateString:
756     addOpDecorateReqs(MI, 1, Reqs, ST);
757     break;
758   case SPIRV::OpMemberDecorate:
759   case SPIRV::OpMemberDecorateString:
760     addOpDecorateReqs(MI, 2, Reqs, ST);
761     break;
762   case SPIRV::OpInBoundsPtrAccessChain:
763     Reqs.addCapability(SPIRV::Capability::Addresses);
764     break;
765   case SPIRV::OpConstantSampler:
766     Reqs.addCapability(SPIRV::Capability::LiteralSampler);
767     break;
768   case SPIRV::OpTypeImage:
769     addOpTypeImageReqs(MI, Reqs, ST);
770     break;
771   case SPIRV::OpTypeSampler:
772     Reqs.addCapability(SPIRV::Capability::ImageBasic);
773     break;
774   case SPIRV::OpTypeForwardPointer:
775     // TODO: check if it's OpenCL's kernel.
776     Reqs.addCapability(SPIRV::Capability::Addresses);
777     break;
778   case SPIRV::OpAtomicFlagTestAndSet:
779   case SPIRV::OpAtomicLoad:
780   case SPIRV::OpAtomicStore:
781   case SPIRV::OpAtomicExchange:
782   case SPIRV::OpAtomicCompareExchange:
783   case SPIRV::OpAtomicIIncrement:
784   case SPIRV::OpAtomicIDecrement:
785   case SPIRV::OpAtomicIAdd:
786   case SPIRV::OpAtomicISub:
787   case SPIRV::OpAtomicUMin:
788   case SPIRV::OpAtomicUMax:
789   case SPIRV::OpAtomicSMin:
790   case SPIRV::OpAtomicSMax:
791   case SPIRV::OpAtomicAnd:
792   case SPIRV::OpAtomicOr:
793   case SPIRV::OpAtomicXor: {
794     const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
795     const MachineInstr *InstrPtr = &MI;
796     if (MI.getOpcode() == SPIRV::OpAtomicStore) {
797       assert(MI.getOperand(3).isReg());
798       InstrPtr = MRI.getVRegDef(MI.getOperand(3).getReg());
799       assert(InstrPtr && "Unexpected type instruction for OpAtomicStore");
800     }
801     assert(InstrPtr->getOperand(1).isReg() && "Unexpected operand in atomic");
802     Register TypeReg = InstrPtr->getOperand(1).getReg();
803     SPIRVType *TypeDef = MRI.getVRegDef(TypeReg);
804     if (TypeDef->getOpcode() == SPIRV::OpTypeInt) {
805       unsigned BitWidth = TypeDef->getOperand(1).getImm();
806       if (BitWidth == 64)
807         Reqs.addCapability(SPIRV::Capability::Int64Atomics);
808     }
809     break;
810   }
811   case SPIRV::OpGroupNonUniformIAdd:
812   case SPIRV::OpGroupNonUniformFAdd:
813   case SPIRV::OpGroupNonUniformIMul:
814   case SPIRV::OpGroupNonUniformFMul:
815   case SPIRV::OpGroupNonUniformSMin:
816   case SPIRV::OpGroupNonUniformUMin:
817   case SPIRV::OpGroupNonUniformFMin:
818   case SPIRV::OpGroupNonUniformSMax:
819   case SPIRV::OpGroupNonUniformUMax:
820   case SPIRV::OpGroupNonUniformFMax:
821   case SPIRV::OpGroupNonUniformBitwiseAnd:
822   case SPIRV::OpGroupNonUniformBitwiseOr:
823   case SPIRV::OpGroupNonUniformBitwiseXor:
824   case SPIRV::OpGroupNonUniformLogicalAnd:
825   case SPIRV::OpGroupNonUniformLogicalOr:
826   case SPIRV::OpGroupNonUniformLogicalXor: {
827     assert(MI.getOperand(3).isImm());
828     int64_t GroupOp = MI.getOperand(3).getImm();
829     switch (GroupOp) {
830     case SPIRV::GroupOperation::Reduce:
831     case SPIRV::GroupOperation::InclusiveScan:
832     case SPIRV::GroupOperation::ExclusiveScan:
833       Reqs.addCapability(SPIRV::Capability::Kernel);
834       Reqs.addCapability(SPIRV::Capability::GroupNonUniformArithmetic);
835       Reqs.addCapability(SPIRV::Capability::GroupNonUniformBallot);
836       break;
837     case SPIRV::GroupOperation::ClusteredReduce:
838       Reqs.addCapability(SPIRV::Capability::GroupNonUniformClustered);
839       break;
840     case SPIRV::GroupOperation::PartitionedReduceNV:
841     case SPIRV::GroupOperation::PartitionedInclusiveScanNV:
842     case SPIRV::GroupOperation::PartitionedExclusiveScanNV:
843       Reqs.addCapability(SPIRV::Capability::GroupNonUniformPartitionedNV);
844       break;
845     }
846     break;
847   }
848   case SPIRV::OpGroupNonUniformShuffle:
849   case SPIRV::OpGroupNonUniformShuffleXor:
850     Reqs.addCapability(SPIRV::Capability::GroupNonUniformShuffle);
851     break;
852   case SPIRV::OpGroupNonUniformShuffleUp:
853   case SPIRV::OpGroupNonUniformShuffleDown:
854     Reqs.addCapability(SPIRV::Capability::GroupNonUniformShuffleRelative);
855     break;
856   case SPIRV::OpGroupAll:
857   case SPIRV::OpGroupAny:
858   case SPIRV::OpGroupBroadcast:
859   case SPIRV::OpGroupIAdd:
860   case SPIRV::OpGroupFAdd:
861   case SPIRV::OpGroupFMin:
862   case SPIRV::OpGroupUMin:
863   case SPIRV::OpGroupSMin:
864   case SPIRV::OpGroupFMax:
865   case SPIRV::OpGroupUMax:
866   case SPIRV::OpGroupSMax:
867     Reqs.addCapability(SPIRV::Capability::Groups);
868     break;
869   case SPIRV::OpGroupNonUniformElect:
870     Reqs.addCapability(SPIRV::Capability::GroupNonUniform);
871     break;
872   case SPIRV::OpGroupNonUniformAll:
873   case SPIRV::OpGroupNonUniformAny:
874   case SPIRV::OpGroupNonUniformAllEqual:
875     Reqs.addCapability(SPIRV::Capability::GroupNonUniformVote);
876     break;
877   case SPIRV::OpGroupNonUniformBroadcast:
878   case SPIRV::OpGroupNonUniformBroadcastFirst:
879   case SPIRV::OpGroupNonUniformBallot:
880   case SPIRV::OpGroupNonUniformInverseBallot:
881   case SPIRV::OpGroupNonUniformBallotBitExtract:
882   case SPIRV::OpGroupNonUniformBallotBitCount:
883   case SPIRV::OpGroupNonUniformBallotFindLSB:
884   case SPIRV::OpGroupNonUniformBallotFindMSB:
885     Reqs.addCapability(SPIRV::Capability::GroupNonUniformBallot);
886     break;
887   default:
888     break;
889   }
890 }
891 
892 static void collectReqs(const Module &M, SPIRV::ModuleAnalysisInfo &MAI,
893                         MachineModuleInfo *MMI, const SPIRVSubtarget &ST) {
894   // Collect requirements for existing instructions.
895   for (auto F = M.begin(), E = M.end(); F != E; ++F) {
896     MachineFunction *MF = MMI->getMachineFunction(*F);
897     if (!MF)
898       continue;
899     for (const MachineBasicBlock &MBB : *MF)
900       for (const MachineInstr &MI : MBB)
901         addInstrRequirements(MI, MAI.Reqs, ST);
902   }
903   // Collect requirements for OpExecutionMode instructions.
904   auto Node = M.getNamedMetadata("spirv.ExecutionMode");
905   if (Node) {
906     for (unsigned i = 0; i < Node->getNumOperands(); i++) {
907       MDNode *MDN = cast<MDNode>(Node->getOperand(i));
908       const MDOperand &MDOp = MDN->getOperand(1);
909       if (auto *CMeta = dyn_cast<ConstantAsMetadata>(MDOp)) {
910         Constant *C = CMeta->getValue();
911         if (ConstantInt *Const = dyn_cast<ConstantInt>(C)) {
912           auto EM = Const->getZExtValue();
913           MAI.Reqs.getAndAddRequirements(
914               SPIRV::OperandCategory::ExecutionModeOperand, EM, ST);
915         }
916       }
917     }
918   }
919   for (auto FI = M.begin(), E = M.end(); FI != E; ++FI) {
920     const Function &F = *FI;
921     if (F.isDeclaration())
922       continue;
923     if (F.getMetadata("reqd_work_group_size"))
924       MAI.Reqs.getAndAddRequirements(
925           SPIRV::OperandCategory::ExecutionModeOperand,
926           SPIRV::ExecutionMode::LocalSize, ST);
927     if (F.getFnAttribute("hlsl.numthreads").isValid()) {
928       MAI.Reqs.getAndAddRequirements(
929           SPIRV::OperandCategory::ExecutionModeOperand,
930           SPIRV::ExecutionMode::LocalSize, ST);
931     }
932     if (F.getMetadata("work_group_size_hint"))
933       MAI.Reqs.getAndAddRequirements(
934           SPIRV::OperandCategory::ExecutionModeOperand,
935           SPIRV::ExecutionMode::LocalSizeHint, ST);
936     if (F.getMetadata("intel_reqd_sub_group_size"))
937       MAI.Reqs.getAndAddRequirements(
938           SPIRV::OperandCategory::ExecutionModeOperand,
939           SPIRV::ExecutionMode::SubgroupSize, ST);
940     if (F.getMetadata("vec_type_hint"))
941       MAI.Reqs.getAndAddRequirements(
942           SPIRV::OperandCategory::ExecutionModeOperand,
943           SPIRV::ExecutionMode::VecTypeHint, ST);
944 
945     if (F.hasOptNone() &&
946         ST.canUseExtension(SPIRV::Extension::SPV_INTEL_optnone)) {
947       // Output OpCapability OptNoneINTEL.
948       MAI.Reqs.addExtension(SPIRV::Extension::SPV_INTEL_optnone);
949       MAI.Reqs.addCapability(SPIRV::Capability::OptNoneINTEL);
950     }
951   }
952 }
953 
954 static unsigned getFastMathFlags(const MachineInstr &I) {
955   unsigned Flags = SPIRV::FPFastMathMode::None;
956   if (I.getFlag(MachineInstr::MIFlag::FmNoNans))
957     Flags |= SPIRV::FPFastMathMode::NotNaN;
958   if (I.getFlag(MachineInstr::MIFlag::FmNoInfs))
959     Flags |= SPIRV::FPFastMathMode::NotInf;
960   if (I.getFlag(MachineInstr::MIFlag::FmNsz))
961     Flags |= SPIRV::FPFastMathMode::NSZ;
962   if (I.getFlag(MachineInstr::MIFlag::FmArcp))
963     Flags |= SPIRV::FPFastMathMode::AllowRecip;
964   if (I.getFlag(MachineInstr::MIFlag::FmReassoc))
965     Flags |= SPIRV::FPFastMathMode::Fast;
966   return Flags;
967 }
968 
969 static void handleMIFlagDecoration(MachineInstr &I, const SPIRVSubtarget &ST,
970                                    const SPIRVInstrInfo &TII,
971                                    SPIRV::RequirementHandler &Reqs) {
972   if (I.getFlag(MachineInstr::MIFlag::NoSWrap) && TII.canUseNSW(I) &&
973       getSymbolicOperandRequirements(SPIRV::OperandCategory::DecorationOperand,
974                                      SPIRV::Decoration::NoSignedWrap, ST, Reqs)
975           .IsSatisfiable) {
976     buildOpDecorate(I.getOperand(0).getReg(), I, TII,
977                     SPIRV::Decoration::NoSignedWrap, {});
978   }
979   if (I.getFlag(MachineInstr::MIFlag::NoUWrap) && TII.canUseNUW(I) &&
980       getSymbolicOperandRequirements(SPIRV::OperandCategory::DecorationOperand,
981                                      SPIRV::Decoration::NoUnsignedWrap, ST,
982                                      Reqs)
983           .IsSatisfiable) {
984     buildOpDecorate(I.getOperand(0).getReg(), I, TII,
985                     SPIRV::Decoration::NoUnsignedWrap, {});
986   }
987   if (!TII.canUseFastMathFlags(I))
988     return;
989   unsigned FMFlags = getFastMathFlags(I);
990   if (FMFlags == SPIRV::FPFastMathMode::None)
991     return;
992   Register DstReg = I.getOperand(0).getReg();
993   buildOpDecorate(DstReg, I, TII, SPIRV::Decoration::FPFastMathMode, {FMFlags});
994 }
995 
996 // Walk all functions and add decorations related to MI flags.
997 static void addDecorations(const Module &M, const SPIRVInstrInfo &TII,
998                            MachineModuleInfo *MMI, const SPIRVSubtarget &ST,
999                            SPIRV::ModuleAnalysisInfo &MAI) {
1000   for (auto F = M.begin(), E = M.end(); F != E; ++F) {
1001     MachineFunction *MF = MMI->getMachineFunction(*F);
1002     if (!MF)
1003       continue;
1004     for (auto &MBB : *MF)
1005       for (auto &MI : MBB)
1006         handleMIFlagDecoration(MI, ST, TII, MAI.Reqs);
1007   }
1008 }
1009 
1010 struct SPIRV::ModuleAnalysisInfo SPIRVModuleAnalysis::MAI;
1011 
1012 void SPIRVModuleAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
1013   AU.addRequired<TargetPassConfig>();
1014   AU.addRequired<MachineModuleInfoWrapperPass>();
1015 }
1016 
1017 bool SPIRVModuleAnalysis::runOnModule(Module &M) {
1018   SPIRVTargetMachine &TM =
1019       getAnalysis<TargetPassConfig>().getTM<SPIRVTargetMachine>();
1020   ST = TM.getSubtargetImpl();
1021   GR = ST->getSPIRVGlobalRegistry();
1022   TII = ST->getInstrInfo();
1023 
1024   MMI = &getAnalysis<MachineModuleInfoWrapperPass>().getMMI();
1025 
1026   setBaseInfo(M);
1027 
1028   addDecorations(M, *TII, MMI, *ST, MAI);
1029 
1030   collectReqs(M, MAI, MMI, *ST);
1031 
1032   // Process type/const/global var/func decl instructions, number their
1033   // destination registers from 0 to N, collect Extensions and Capabilities.
1034   processDefInstrs(M);
1035 
1036   // Number rest of registers from N+1 onwards.
1037   numberRegistersGlobally(M);
1038 
1039   // Collect OpName, OpEntryPoint, OpDecorate etc, process other instructions.
1040   processOtherInstrs(M);
1041 
1042   // If there are no entry points, we need the Linkage capability.
1043   if (MAI.MS[SPIRV::MB_EntryPoints].empty())
1044     MAI.Reqs.addCapability(SPIRV::Capability::Linkage);
1045 
1046   return false;
1047 }
1048