xref: /llvm-project/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp (revision 7a3c9a85c5fa614ba43b013fce3445fe1f8fdd3f)
1 //===- SPIRVModuleAnalysis.cpp - analysis of global instrs & regs - C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // The analysis collects instructions that should be output at the module level
10 // and performs the global register numbering.
11 //
12 // The results of this analysis are used in AsmPrinter to rename registers
13 // globally and to output required instructions at the module level.
14 //
15 //===----------------------------------------------------------------------===//
16 
17 #include "SPIRVModuleAnalysis.h"
18 #include "SPIRV.h"
19 #include "SPIRVSubtarget.h"
20 #include "SPIRVTargetMachine.h"
21 #include "SPIRVUtils.h"
22 #include "TargetInfo/SPIRVTargetInfo.h"
23 #include "llvm/ADT/STLExtras.h"
24 #include "llvm/CodeGen/MachineModuleInfo.h"
25 #include "llvm/CodeGen/TargetPassConfig.h"
26 
27 using namespace llvm;
28 
29 #define DEBUG_TYPE "spirv-module-analysis"
30 
31 static cl::opt<bool>
32     SPVDumpDeps("spv-dump-deps",
33                 cl::desc("Dump MIR with SPIR-V dependencies info"),
34                 cl::Optional, cl::init(false));
35 
36 char llvm::SPIRVModuleAnalysis::ID = 0;
37 
38 namespace llvm {
39 void initializeSPIRVModuleAnalysisPass(PassRegistry &);
40 } // namespace llvm
41 
42 INITIALIZE_PASS(SPIRVModuleAnalysis, DEBUG_TYPE, "SPIRV module analysis", true,
43                 true)
44 
45 // Retrieve an unsigned from an MDNode with a list of them as operands.
46 static unsigned getMetadataUInt(MDNode *MdNode, unsigned OpIndex,
47                                 unsigned DefaultVal = 0) {
48   if (MdNode && OpIndex < MdNode->getNumOperands()) {
49     const auto &Op = MdNode->getOperand(OpIndex);
50     return mdconst::extract<ConstantInt>(Op)->getZExtValue();
51   }
52   return DefaultVal;
53 }
54 
55 static SPIRV::Requirements
56 getSymbolicOperandRequirements(SPIRV::OperandCategory::OperandCategory Category,
57                                unsigned i, const SPIRVSubtarget &ST,
58                                SPIRV::RequirementHandler &Reqs) {
59   unsigned ReqMinVer = getSymbolicOperandMinVersion(Category, i);
60   unsigned ReqMaxVer = getSymbolicOperandMaxVersion(Category, i);
61   unsigned TargetVer = ST.getSPIRVVersion();
62   bool MinVerOK = !ReqMinVer || !TargetVer || TargetVer >= ReqMinVer;
63   bool MaxVerOK = !ReqMaxVer || !TargetVer || TargetVer <= ReqMaxVer;
64   CapabilityList ReqCaps = getSymbolicOperandCapabilities(Category, i);
65   ExtensionList ReqExts = getSymbolicOperandExtensions(Category, i);
66   if (ReqCaps.empty()) {
67     if (ReqExts.empty()) {
68       if (MinVerOK && MaxVerOK)
69         return {true, {}, {}, ReqMinVer, ReqMaxVer};
70       return {false, {}, {}, 0, 0};
71     }
72   } else if (MinVerOK && MaxVerOK) {
73     for (auto Cap : ReqCaps) { // Only need 1 of the capabilities to work.
74       if (Reqs.isCapabilityAvailable(Cap))
75         return {true, {Cap}, {}, ReqMinVer, ReqMaxVer};
76     }
77   }
78   // If there are no capabilities, or we can't satisfy the version or
79   // capability requirements, use the list of extensions (if the subtarget
80   // can handle them all).
81   if (llvm::all_of(ReqExts, [&ST](const SPIRV::Extension::Extension &Ext) {
82         return ST.canUseExtension(Ext);
83       })) {
84     return {true, {}, ReqExts, 0, 0}; // TODO: add versions to extensions.
85   }
86   return {false, {}, {}, 0, 0};
87 }
88 
89 void SPIRVModuleAnalysis::setBaseInfo(const Module &M) {
90   MAI.MaxID = 0;
91   for (int i = 0; i < SPIRV::NUM_MODULE_SECTIONS; i++)
92     MAI.MS[i].clear();
93   MAI.RegisterAliasTable.clear();
94   MAI.InstrsToDelete.clear();
95   MAI.FuncNameMap.clear();
96   MAI.GlobalVarList.clear();
97   MAI.ExtInstSetMap.clear();
98   MAI.Reqs.clear();
99   MAI.Reqs.initAvailableCapabilities(*ST);
100 
101   // TODO: determine memory model and source language from the configuratoin.
102   if (auto MemModel = M.getNamedMetadata("spirv.MemoryModel")) {
103     auto MemMD = MemModel->getOperand(0);
104     MAI.Addr = static_cast<SPIRV::AddressingModel::AddressingModel>(
105         getMetadataUInt(MemMD, 0));
106     MAI.Mem =
107         static_cast<SPIRV::MemoryModel::MemoryModel>(getMetadataUInt(MemMD, 1));
108   } else {
109     MAI.Mem = SPIRV::MemoryModel::OpenCL;
110     unsigned PtrSize = ST->getPointerSize();
111     MAI.Addr = PtrSize == 32   ? SPIRV::AddressingModel::Physical32
112                : PtrSize == 64 ? SPIRV::AddressingModel::Physical64
113                                : SPIRV::AddressingModel::Logical;
114   }
115   // Get the OpenCL version number from metadata.
116   // TODO: support other source languages.
117   if (auto VerNode = M.getNamedMetadata("opencl.ocl.version")) {
118     MAI.SrcLang = SPIRV::SourceLanguage::OpenCL_C;
119     // Construct version literal in accordance with SPIRV-LLVM-Translator.
120     // TODO: support multiple OCL version metadata.
121     assert(VerNode->getNumOperands() > 0 && "Invalid SPIR");
122     auto VersionMD = VerNode->getOperand(0);
123     unsigned MajorNum = getMetadataUInt(VersionMD, 0, 2);
124     unsigned MinorNum = getMetadataUInt(VersionMD, 1);
125     unsigned RevNum = getMetadataUInt(VersionMD, 2);
126     MAI.SrcLangVersion = (MajorNum * 100 + MinorNum) * 1000 + RevNum;
127   } else {
128     MAI.SrcLang = SPIRV::SourceLanguage::Unknown;
129     MAI.SrcLangVersion = 0;
130   }
131 
132   if (auto ExtNode = M.getNamedMetadata("opencl.used.extensions")) {
133     for (unsigned I = 0, E = ExtNode->getNumOperands(); I != E; ++I) {
134       MDNode *MD = ExtNode->getOperand(I);
135       if (!MD || MD->getNumOperands() == 0)
136         continue;
137       for (unsigned J = 0, N = MD->getNumOperands(); J != N; ++J)
138         MAI.SrcExt.insert(cast<MDString>(MD->getOperand(J))->getString());
139     }
140   }
141 
142   // Update required capabilities for this memory model, addressing model and
143   // source language.
144   MAI.Reqs.getAndAddRequirements(SPIRV::OperandCategory::MemoryModelOperand,
145                                  MAI.Mem, *ST);
146   MAI.Reqs.getAndAddRequirements(SPIRV::OperandCategory::SourceLanguageOperand,
147                                  MAI.SrcLang, *ST);
148   MAI.Reqs.getAndAddRequirements(SPIRV::OperandCategory::AddressingModelOperand,
149                                  MAI.Addr, *ST);
150 
151   // TODO: check if it's required by default.
152   MAI.ExtInstSetMap[static_cast<unsigned>(SPIRV::InstructionSet::OpenCL_std)] =
153       Register::index2VirtReg(MAI.getNextID());
154 }
155 
156 // Collect MI which defines the register in the given machine function.
157 static void collectDefInstr(Register Reg, const MachineFunction *MF,
158                             SPIRV::ModuleAnalysisInfo *MAI,
159                             SPIRV::ModuleSectionType MSType,
160                             bool DoInsert = true) {
161   assert(MAI->hasRegisterAlias(MF, Reg) && "Cannot find register alias");
162   MachineInstr *MI = MF->getRegInfo().getUniqueVRegDef(Reg);
163   assert(MI && "There should be an instruction that defines the register");
164   MAI->setSkipEmission(MI);
165   if (DoInsert)
166     MAI->MS[MSType].push_back(MI);
167 }
168 
169 void SPIRVModuleAnalysis::collectGlobalEntities(
170     const std::vector<SPIRV::DTSortableEntry *> &DepsGraph,
171     SPIRV::ModuleSectionType MSType,
172     std::function<bool(const SPIRV::DTSortableEntry *)> Pred,
173     bool UsePreOrder = false) {
174   DenseSet<const SPIRV::DTSortableEntry *> Visited;
175   for (const auto *E : DepsGraph) {
176     std::function<void(const SPIRV::DTSortableEntry *)> RecHoistUtil;
177     // NOTE: here we prefer recursive approach over iterative because
178     // we don't expect depchains long enough to cause SO.
179     RecHoistUtil = [MSType, UsePreOrder, &Visited, &Pred,
180                     &RecHoistUtil](const SPIRV::DTSortableEntry *E) {
181       if (Visited.count(E) || !Pred(E))
182         return;
183       Visited.insert(E);
184 
185       // Traversing deps graph in post-order allows us to get rid of
186       // register aliases preprocessing.
187       // But pre-order is required for correct processing of function
188       // declaration and arguments processing.
189       if (!UsePreOrder)
190         for (auto *S : E->getDeps())
191           RecHoistUtil(S);
192 
193       Register GlobalReg = Register::index2VirtReg(MAI.getNextID());
194       bool IsFirst = true;
195       for (auto &U : *E) {
196         const MachineFunction *MF = U.first;
197         Register Reg = U.second;
198         MAI.setRegisterAlias(MF, Reg, GlobalReg);
199         if (!MF->getRegInfo().getUniqueVRegDef(Reg))
200           continue;
201         collectDefInstr(Reg, MF, &MAI, MSType, IsFirst);
202         IsFirst = false;
203         if (E->getIsGV())
204           MAI.GlobalVarList.push_back(MF->getRegInfo().getUniqueVRegDef(Reg));
205       }
206 
207       if (UsePreOrder)
208         for (auto *S : E->getDeps())
209           RecHoistUtil(S);
210     };
211     RecHoistUtil(E);
212   }
213 }
214 
215 // The function initializes global register alias table for types, consts,
216 // global vars and func decls and collects these instruction for output
217 // at module level. Also it collects explicit OpExtension/OpCapability
218 // instructions.
219 void SPIRVModuleAnalysis::processDefInstrs(const Module &M) {
220   std::vector<SPIRV::DTSortableEntry *> DepsGraph;
221 
222   GR->buildDepsGraph(DepsGraph, SPVDumpDeps ? MMI : nullptr);
223 
224   collectGlobalEntities(
225       DepsGraph, SPIRV::MB_TypeConstVars,
226       [](const SPIRV::DTSortableEntry *E) { return !E->getIsFunc(); });
227 
228   for (auto F = M.begin(), E = M.end(); F != E; ++F) {
229     MachineFunction *MF = MMI->getMachineFunction(*F);
230     if (!MF)
231       continue;
232     // Iterate through and collect OpExtension/OpCapability instructions.
233     for (MachineBasicBlock &MBB : *MF) {
234       for (MachineInstr &MI : MBB) {
235         if (MI.getOpcode() == SPIRV::OpExtension) {
236           // Here, OpExtension just has a single enum operand, not a string.
237           auto Ext = SPIRV::Extension::Extension(MI.getOperand(0).getImm());
238           MAI.Reqs.addExtension(Ext);
239           MAI.setSkipEmission(&MI);
240         } else if (MI.getOpcode() == SPIRV::OpCapability) {
241           auto Cap = SPIRV::Capability::Capability(MI.getOperand(0).getImm());
242           MAI.Reqs.addCapability(Cap);
243           MAI.setSkipEmission(&MI);
244         }
245       }
246     }
247   }
248 
249   collectGlobalEntities(
250       DepsGraph, SPIRV::MB_ExtFuncDecls,
251       [](const SPIRV::DTSortableEntry *E) { return E->getIsFunc(); }, true);
252 }
253 
254 // True if there is an instruction in the MS list with all the same operands as
255 // the given instruction has (after the given starting index).
256 // TODO: maybe it needs to check Opcodes too.
257 static bool findSameInstrInMS(const MachineInstr &A,
258                               SPIRV::ModuleSectionType MSType,
259                               SPIRV::ModuleAnalysisInfo &MAI,
260                               unsigned StartOpIndex = 0) {
261   for (const auto *B : MAI.MS[MSType]) {
262     const unsigned NumAOps = A.getNumOperands();
263     if (NumAOps != B->getNumOperands() || A.getNumDefs() != B->getNumDefs())
264       continue;
265     bool AllOpsMatch = true;
266     for (unsigned i = StartOpIndex; i < NumAOps && AllOpsMatch; ++i) {
267       if (A.getOperand(i).isReg() && B->getOperand(i).isReg()) {
268         Register RegA = A.getOperand(i).getReg();
269         Register RegB = B->getOperand(i).getReg();
270         AllOpsMatch = MAI.getRegisterAlias(A.getMF(), RegA) ==
271                       MAI.getRegisterAlias(B->getMF(), RegB);
272       } else {
273         AllOpsMatch = A.getOperand(i).isIdenticalTo(B->getOperand(i));
274       }
275     }
276     if (AllOpsMatch)
277       return true;
278   }
279   return false;
280 }
281 
282 // Look for IDs declared with Import linkage, and map the imported name string
283 // to the register defining that variable (which will usually be the result of
284 // an OpFunction). This lets us call externally imported functions using
285 // the correct ID registers.
286 void SPIRVModuleAnalysis::collectFuncNames(MachineInstr &MI,
287                                            const Function &F) {
288   if (MI.getOpcode() == SPIRV::OpDecorate) {
289     // If it's got Import linkage.
290     auto Dec = MI.getOperand(1).getImm();
291     if (Dec == static_cast<unsigned>(SPIRV::Decoration::LinkageAttributes)) {
292       auto Lnk = MI.getOperand(MI.getNumOperands() - 1).getImm();
293       if (Lnk == static_cast<unsigned>(SPIRV::LinkageType::Import)) {
294         // Map imported function name to function ID register.
295         std::string Name = getStringImm(MI, 2);
296         Register Target = MI.getOperand(0).getReg();
297         // TODO: check defs from different MFs.
298         MAI.FuncNameMap[Name] = MAI.getRegisterAlias(MI.getMF(), Target);
299       }
300     }
301   } else if (MI.getOpcode() == SPIRV::OpFunction) {
302     // Record all internal OpFunction declarations.
303     Register Reg = MI.defs().begin()->getReg();
304     Register GlobalReg = MAI.getRegisterAlias(MI.getMF(), Reg);
305     assert(GlobalReg.isValid());
306     // TODO: check that it does not conflict with existing entries.
307     MAI.FuncNameMap[getFunctionGlobalIdentifier(&F)] = GlobalReg;
308   }
309 }
310 
311 // Collect the given instruction in the specified MS. We assume global register
312 // numbering has already occurred by this point. We can directly compare reg
313 // arguments when detecting duplicates.
314 static void collectOtherInstr(MachineInstr &MI, SPIRV::ModuleAnalysisInfo &MAI,
315                               SPIRV::ModuleSectionType MSType,
316                               bool Append = true) {
317   MAI.setSkipEmission(&MI);
318   if (findSameInstrInMS(MI, MSType, MAI))
319     return; // Found a duplicate, so don't add it.
320   // No duplicates, so add it.
321   if (Append)
322     MAI.MS[MSType].push_back(&MI);
323   else
324     MAI.MS[MSType].insert(MAI.MS[MSType].begin(), &MI);
325 }
326 
327 // Some global instructions make reference to function-local ID regs, so cannot
328 // be correctly collected until these registers are globally numbered.
329 void SPIRVModuleAnalysis::processOtherInstrs(const Module &M) {
330   for (auto F = M.begin(), E = M.end(); F != E; ++F) {
331     if ((*F).isDeclaration())
332       continue;
333     MachineFunction *MF = MMI->getMachineFunction(*F);
334     assert(MF);
335     for (MachineBasicBlock &MBB : *MF)
336       for (MachineInstr &MI : MBB) {
337         if (MAI.getSkipEmission(&MI))
338           continue;
339         const unsigned OpCode = MI.getOpcode();
340         if (OpCode == SPIRV::OpName || OpCode == SPIRV::OpMemberName) {
341           collectOtherInstr(MI, MAI, SPIRV::MB_DebugNames);
342         } else if (OpCode == SPIRV::OpEntryPoint) {
343           collectOtherInstr(MI, MAI, SPIRV::MB_EntryPoints);
344         } else if (TII->isDecorationInstr(MI)) {
345           collectOtherInstr(MI, MAI, SPIRV::MB_Annotations);
346           collectFuncNames(MI, *F);
347         } else if (TII->isConstantInstr(MI)) {
348           // Now OpSpecConstant*s are not in DT,
349           // but they need to be collected anyway.
350           collectOtherInstr(MI, MAI, SPIRV::MB_TypeConstVars);
351         } else if (OpCode == SPIRV::OpFunction) {
352           collectFuncNames(MI, *F);
353         } else if (OpCode == SPIRV::OpTypeForwardPointer) {
354           collectOtherInstr(MI, MAI, SPIRV::MB_TypeConstVars, false);
355         }
356       }
357   }
358 }
359 
360 // Number registers in all functions globally from 0 onwards and store
361 // the result in global register alias table. Some registers are already
362 // numbered in collectGlobalEntities.
363 void SPIRVModuleAnalysis::numberRegistersGlobally(const Module &M) {
364   for (auto F = M.begin(), E = M.end(); F != E; ++F) {
365     if ((*F).isDeclaration())
366       continue;
367     MachineFunction *MF = MMI->getMachineFunction(*F);
368     assert(MF);
369     for (MachineBasicBlock &MBB : *MF) {
370       for (MachineInstr &MI : MBB) {
371         for (MachineOperand &Op : MI.operands()) {
372           if (!Op.isReg())
373             continue;
374           Register Reg = Op.getReg();
375           if (MAI.hasRegisterAlias(MF, Reg))
376             continue;
377           Register NewReg = Register::index2VirtReg(MAI.getNextID());
378           MAI.setRegisterAlias(MF, Reg, NewReg);
379         }
380         if (MI.getOpcode() != SPIRV::OpExtInst)
381           continue;
382         auto Set = MI.getOperand(2).getImm();
383         if (MAI.ExtInstSetMap.find(Set) == MAI.ExtInstSetMap.end())
384           MAI.ExtInstSetMap[Set] = Register::index2VirtReg(MAI.getNextID());
385       }
386     }
387   }
388 }
389 
390 // Find OpIEqual and OpBranchConditional instructions originating from
391 // OpSwitches, mark them skipped for emission. Also mark MBB skipped if it
392 // contains only these instructions.
393 static void processSwitches(const Module &M, SPIRV::ModuleAnalysisInfo &MAI,
394                             MachineModuleInfo *MMI) {
395   DenseSet<Register> SwitchRegs;
396   for (auto F = M.begin(), E = M.end(); F != E; ++F) {
397     MachineFunction *MF = MMI->getMachineFunction(*F);
398     if (!MF)
399       continue;
400     for (MachineBasicBlock &MBB : *MF)
401       for (MachineInstr &MI : MBB) {
402         if (MAI.getSkipEmission(&MI))
403           continue;
404         if (MI.getOpcode() == SPIRV::OpSwitch) {
405           assert(MI.getOperand(0).isReg());
406           SwitchRegs.insert(MI.getOperand(0).getReg());
407         }
408         if (MI.getOpcode() == SPIRV::OpISubS &&
409             SwitchRegs.contains(MI.getOperand(2).getReg())) {
410           SwitchRegs.insert(MI.getOperand(0).getReg());
411           MAI.setSkipEmission(&MI);
412         }
413         if ((MI.getOpcode() != SPIRV::OpIEqual &&
414              MI.getOpcode() != SPIRV::OpULessThanEqual) ||
415             !MI.getOperand(2).isReg() ||
416             !SwitchRegs.contains(MI.getOperand(2).getReg()))
417           continue;
418         Register CmpReg = MI.getOperand(0).getReg();
419         MachineInstr *CBr = MI.getNextNode();
420         assert(CBr && CBr->getOpcode() == SPIRV::OpBranchConditional &&
421                CBr->getOperand(0).isReg() &&
422                CBr->getOperand(0).getReg() == CmpReg);
423         MAI.setSkipEmission(&MI);
424         MAI.setSkipEmission(CBr);
425         if (&MBB.front() == &MI && &MBB.back() == CBr)
426           MAI.MBBsToSkip.insert(&MBB);
427       }
428   }
429 }
430 
431 // RequirementHandler implementations.
432 void SPIRV::RequirementHandler::getAndAddRequirements(
433     SPIRV::OperandCategory::OperandCategory Category, uint32_t i,
434     const SPIRVSubtarget &ST) {
435   addRequirements(getSymbolicOperandRequirements(Category, i, ST, *this));
436 }
437 
438 void SPIRV::RequirementHandler::pruneCapabilities(
439     const CapabilityList &ToPrune) {
440   for (const auto &Cap : ToPrune) {
441     AllCaps.insert(Cap);
442     auto FoundIndex = std::find(MinimalCaps.begin(), MinimalCaps.end(), Cap);
443     if (FoundIndex != MinimalCaps.end())
444       MinimalCaps.erase(FoundIndex);
445     CapabilityList ImplicitDecls =
446         getSymbolicOperandCapabilities(OperandCategory::CapabilityOperand, Cap);
447     pruneCapabilities(ImplicitDecls);
448   }
449 }
450 
451 void SPIRV::RequirementHandler::addCapabilities(const CapabilityList &ToAdd) {
452   for (const auto &Cap : ToAdd) {
453     bool IsNewlyInserted = AllCaps.insert(Cap).second;
454     if (!IsNewlyInserted) // Don't re-add if it's already been declared.
455       continue;
456     CapabilityList ImplicitDecls =
457         getSymbolicOperandCapabilities(OperandCategory::CapabilityOperand, Cap);
458     pruneCapabilities(ImplicitDecls);
459     MinimalCaps.push_back(Cap);
460   }
461 }
462 
463 void SPIRV::RequirementHandler::addRequirements(
464     const SPIRV::Requirements &Req) {
465   if (!Req.IsSatisfiable)
466     report_fatal_error("Adding SPIR-V requirements this target can't satisfy.");
467 
468   if (Req.Cap.has_value())
469     addCapabilities({Req.Cap.value()});
470 
471   addExtensions(Req.Exts);
472 
473   if (Req.MinVer) {
474     if (MaxVersion && Req.MinVer > MaxVersion) {
475       LLVM_DEBUG(dbgs() << "Conflicting version requirements: >= " << Req.MinVer
476                         << " and <= " << MaxVersion << "\n");
477       report_fatal_error("Adding SPIR-V requirements that can't be satisfied.");
478     }
479 
480     if (MinVersion == 0 || Req.MinVer > MinVersion)
481       MinVersion = Req.MinVer;
482   }
483 
484   if (Req.MaxVer) {
485     if (MinVersion && Req.MaxVer < MinVersion) {
486       LLVM_DEBUG(dbgs() << "Conflicting version requirements: <= " << Req.MaxVer
487                         << " and >= " << MinVersion << "\n");
488       report_fatal_error("Adding SPIR-V requirements that can't be satisfied.");
489     }
490 
491     if (MaxVersion == 0 || Req.MaxVer < MaxVersion)
492       MaxVersion = Req.MaxVer;
493   }
494 }
495 
496 void SPIRV::RequirementHandler::checkSatisfiable(
497     const SPIRVSubtarget &ST) const {
498   // Report as many errors as possible before aborting the compilation.
499   bool IsSatisfiable = true;
500   auto TargetVer = ST.getSPIRVVersion();
501 
502   if (MaxVersion && TargetVer && MaxVersion < TargetVer) {
503     LLVM_DEBUG(
504         dbgs() << "Target SPIR-V version too high for required features\n"
505                << "Required max version: " << MaxVersion << " target version "
506                << TargetVer << "\n");
507     IsSatisfiable = false;
508   }
509 
510   if (MinVersion && TargetVer && MinVersion > TargetVer) {
511     LLVM_DEBUG(dbgs() << "Target SPIR-V version too low for required features\n"
512                       << "Required min version: " << MinVersion
513                       << " target version " << TargetVer << "\n");
514     IsSatisfiable = false;
515   }
516 
517   if (MinVersion && MaxVersion && MinVersion > MaxVersion) {
518     LLVM_DEBUG(
519         dbgs()
520         << "Version is too low for some features and too high for others.\n"
521         << "Required SPIR-V min version: " << MinVersion
522         << " required SPIR-V max version " << MaxVersion << "\n");
523     IsSatisfiable = false;
524   }
525 
526   for (auto Cap : MinimalCaps) {
527     if (AvailableCaps.contains(Cap))
528       continue;
529     LLVM_DEBUG(dbgs() << "Capability not supported: "
530                       << getSymbolicOperandMnemonic(
531                              OperandCategory::CapabilityOperand, Cap)
532                       << "\n");
533     IsSatisfiable = false;
534   }
535 
536   for (auto Ext : AllExtensions) {
537     if (ST.canUseExtension(Ext))
538       continue;
539     LLVM_DEBUG(dbgs() << "Extension not suported: "
540                       << getSymbolicOperandMnemonic(
541                              OperandCategory::ExtensionOperand, Ext)
542                       << "\n");
543     IsSatisfiable = false;
544   }
545 
546   if (!IsSatisfiable)
547     report_fatal_error("Unable to meet SPIR-V requirements for this target.");
548 }
549 
550 // Add the given capabilities and all their implicitly defined capabilities too.
551 void SPIRV::RequirementHandler::addAvailableCaps(const CapabilityList &ToAdd) {
552   for (const auto Cap : ToAdd)
553     if (AvailableCaps.insert(Cap).second)
554       addAvailableCaps(getSymbolicOperandCapabilities(
555           SPIRV::OperandCategory::CapabilityOperand, Cap));
556 }
557 
558 namespace llvm {
559 namespace SPIRV {
560 void RequirementHandler::initAvailableCapabilities(const SPIRVSubtarget &ST) {
561   // TODO: Implemented for other targets other then OpenCL.
562   if (!ST.isOpenCLEnv())
563     return;
564   // Add the min requirements for different OpenCL and SPIR-V versions.
565   addAvailableCaps({Capability::Addresses, Capability::Float16Buffer,
566                     Capability::Int16, Capability::Int8, Capability::Kernel,
567                     Capability::Linkage, Capability::Vector16,
568                     Capability::Groups, Capability::GenericPointer,
569                     Capability::Shader});
570   if (ST.hasOpenCLFullProfile())
571     addAvailableCaps({Capability::Int64, Capability::Int64Atomics});
572   if (ST.hasOpenCLImageSupport()) {
573     addAvailableCaps({Capability::ImageBasic, Capability::LiteralSampler,
574                       Capability::Image1D, Capability::SampledBuffer,
575                       Capability::ImageBuffer});
576     if (ST.isAtLeastOpenCLVer(20))
577       addAvailableCaps({Capability::ImageReadWrite});
578   }
579   if (ST.isAtLeastSPIRVVer(11) && ST.isAtLeastOpenCLVer(22))
580     addAvailableCaps({Capability::SubgroupDispatch, Capability::PipeStorage});
581   if (ST.isAtLeastSPIRVVer(13))
582     addAvailableCaps({Capability::GroupNonUniform,
583                       Capability::GroupNonUniformVote,
584                       Capability::GroupNonUniformArithmetic,
585                       Capability::GroupNonUniformBallot,
586                       Capability::GroupNonUniformClustered,
587                       Capability::GroupNonUniformShuffle,
588                       Capability::GroupNonUniformShuffleRelative});
589   if (ST.isAtLeastSPIRVVer(14))
590     addAvailableCaps({Capability::DenormPreserve, Capability::DenormFlushToZero,
591                       Capability::SignedZeroInfNanPreserve,
592                       Capability::RoundingModeRTE,
593                       Capability::RoundingModeRTZ});
594   // TODO: verify if this needs some checks.
595   addAvailableCaps({Capability::Float16, Capability::Float64});
596 
597   // TODO: add OpenCL extensions.
598 }
599 } // namespace SPIRV
600 } // namespace llvm
601 
602 // Add the required capabilities from a decoration instruction (including
603 // BuiltIns).
604 static void addOpDecorateReqs(const MachineInstr &MI, unsigned DecIndex,
605                               SPIRV::RequirementHandler &Reqs,
606                               const SPIRVSubtarget &ST) {
607   int64_t DecOp = MI.getOperand(DecIndex).getImm();
608   auto Dec = static_cast<SPIRV::Decoration::Decoration>(DecOp);
609   Reqs.addRequirements(getSymbolicOperandRequirements(
610       SPIRV::OperandCategory::DecorationOperand, Dec, ST, Reqs));
611 
612   if (Dec == SPIRV::Decoration::BuiltIn) {
613     int64_t BuiltInOp = MI.getOperand(DecIndex + 1).getImm();
614     auto BuiltIn = static_cast<SPIRV::BuiltIn::BuiltIn>(BuiltInOp);
615     Reqs.addRequirements(getSymbolicOperandRequirements(
616         SPIRV::OperandCategory::BuiltInOperand, BuiltIn, ST, Reqs));
617   }
618 }
619 
620 // Add requirements for image handling.
621 static void addOpTypeImageReqs(const MachineInstr &MI,
622                                SPIRV::RequirementHandler &Reqs,
623                                const SPIRVSubtarget &ST) {
624   assert(MI.getNumOperands() >= 8 && "Insufficient operands for OpTypeImage");
625   // The operand indices used here are based on the OpTypeImage layout, which
626   // the MachineInstr follows as well.
627   int64_t ImgFormatOp = MI.getOperand(7).getImm();
628   auto ImgFormat = static_cast<SPIRV::ImageFormat::ImageFormat>(ImgFormatOp);
629   Reqs.getAndAddRequirements(SPIRV::OperandCategory::ImageFormatOperand,
630                              ImgFormat, ST);
631 
632   bool IsArrayed = MI.getOperand(4).getImm() == 1;
633   bool IsMultisampled = MI.getOperand(5).getImm() == 1;
634   bool NoSampler = MI.getOperand(6).getImm() == 2;
635   // Add dimension requirements.
636   assert(MI.getOperand(2).isImm());
637   switch (MI.getOperand(2).getImm()) {
638   case SPIRV::Dim::DIM_1D:
639     Reqs.addRequirements(NoSampler ? SPIRV::Capability::Image1D
640                                    : SPIRV::Capability::Sampled1D);
641     break;
642   case SPIRV::Dim::DIM_2D:
643     if (IsMultisampled && NoSampler)
644       Reqs.addRequirements(SPIRV::Capability::ImageMSArray);
645     break;
646   case SPIRV::Dim::DIM_Cube:
647     Reqs.addRequirements(SPIRV::Capability::Shader);
648     if (IsArrayed)
649       Reqs.addRequirements(NoSampler ? SPIRV::Capability::ImageCubeArray
650                                      : SPIRV::Capability::SampledCubeArray);
651     break;
652   case SPIRV::Dim::DIM_Rect:
653     Reqs.addRequirements(NoSampler ? SPIRV::Capability::ImageRect
654                                    : SPIRV::Capability::SampledRect);
655     break;
656   case SPIRV::Dim::DIM_Buffer:
657     Reqs.addRequirements(NoSampler ? SPIRV::Capability::ImageBuffer
658                                    : SPIRV::Capability::SampledBuffer);
659     break;
660   case SPIRV::Dim::DIM_SubpassData:
661     Reqs.addRequirements(SPIRV::Capability::InputAttachment);
662     break;
663   }
664 
665   // Has optional access qualifier.
666   // TODO: check if it's OpenCL's kernel.
667   if (MI.getNumOperands() > 8 &&
668       MI.getOperand(8).getImm() == SPIRV::AccessQualifier::ReadWrite)
669     Reqs.addRequirements(SPIRV::Capability::ImageReadWrite);
670   else
671     Reqs.addRequirements(SPIRV::Capability::ImageBasic);
672 }
673 
674 void addInstrRequirements(const MachineInstr &MI,
675                           SPIRV::RequirementHandler &Reqs,
676                           const SPIRVSubtarget &ST) {
677   switch (MI.getOpcode()) {
678   case SPIRV::OpMemoryModel: {
679     int64_t Addr = MI.getOperand(0).getImm();
680     Reqs.getAndAddRequirements(SPIRV::OperandCategory::AddressingModelOperand,
681                                Addr, ST);
682     int64_t Mem = MI.getOperand(1).getImm();
683     Reqs.getAndAddRequirements(SPIRV::OperandCategory::MemoryModelOperand, Mem,
684                                ST);
685     break;
686   }
687   case SPIRV::OpEntryPoint: {
688     int64_t Exe = MI.getOperand(0).getImm();
689     Reqs.getAndAddRequirements(SPIRV::OperandCategory::ExecutionModelOperand,
690                                Exe, ST);
691     break;
692   }
693   case SPIRV::OpExecutionMode:
694   case SPIRV::OpExecutionModeId: {
695     int64_t Exe = MI.getOperand(1).getImm();
696     Reqs.getAndAddRequirements(SPIRV::OperandCategory::ExecutionModeOperand,
697                                Exe, ST);
698     break;
699   }
700   case SPIRV::OpTypeMatrix:
701     Reqs.addCapability(SPIRV::Capability::Matrix);
702     break;
703   case SPIRV::OpTypeInt: {
704     unsigned BitWidth = MI.getOperand(1).getImm();
705     if (BitWidth == 64)
706       Reqs.addCapability(SPIRV::Capability::Int64);
707     else if (BitWidth == 16)
708       Reqs.addCapability(SPIRV::Capability::Int16);
709     else if (BitWidth == 8)
710       Reqs.addCapability(SPIRV::Capability::Int8);
711     break;
712   }
713   case SPIRV::OpTypeFloat: {
714     unsigned BitWidth = MI.getOperand(1).getImm();
715     if (BitWidth == 64)
716       Reqs.addCapability(SPIRV::Capability::Float64);
717     else if (BitWidth == 16)
718       Reqs.addCapability(SPIRV::Capability::Float16);
719     break;
720   }
721   case SPIRV::OpTypeVector: {
722     unsigned NumComponents = MI.getOperand(2).getImm();
723     if (NumComponents == 8 || NumComponents == 16)
724       Reqs.addCapability(SPIRV::Capability::Vector16);
725     break;
726   }
727   case SPIRV::OpTypePointer: {
728     auto SC = MI.getOperand(1).getImm();
729     Reqs.getAndAddRequirements(SPIRV::OperandCategory::StorageClassOperand, SC,
730                                ST);
731     // If it's a type of pointer to float16, add Float16Buffer capability.
732     assert(MI.getOperand(2).isReg());
733     const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
734     SPIRVType *TypeDef = MRI.getVRegDef(MI.getOperand(2).getReg());
735     if (TypeDef->getOpcode() == SPIRV::OpTypeFloat &&
736         TypeDef->getOperand(1).getImm() == 16)
737       Reqs.addCapability(SPIRV::Capability::Float16Buffer);
738     break;
739   }
740   case SPIRV::OpBitReverse:
741   case SPIRV::OpTypeRuntimeArray:
742     Reqs.addCapability(SPIRV::Capability::Shader);
743     break;
744   case SPIRV::OpTypeOpaque:
745   case SPIRV::OpTypeEvent:
746     Reqs.addCapability(SPIRV::Capability::Kernel);
747     break;
748   case SPIRV::OpTypePipe:
749   case SPIRV::OpTypeReserveId:
750     Reqs.addCapability(SPIRV::Capability::Pipes);
751     break;
752   case SPIRV::OpTypeDeviceEvent:
753   case SPIRV::OpTypeQueue:
754   case SPIRV::OpBuildNDRange:
755     Reqs.addCapability(SPIRV::Capability::DeviceEnqueue);
756     break;
757   case SPIRV::OpDecorate:
758   case SPIRV::OpDecorateId:
759   case SPIRV::OpDecorateString:
760     addOpDecorateReqs(MI, 1, Reqs, ST);
761     break;
762   case SPIRV::OpMemberDecorate:
763   case SPIRV::OpMemberDecorateString:
764     addOpDecorateReqs(MI, 2, Reqs, ST);
765     break;
766   case SPIRV::OpInBoundsPtrAccessChain:
767     Reqs.addCapability(SPIRV::Capability::Addresses);
768     break;
769   case SPIRV::OpConstantSampler:
770     Reqs.addCapability(SPIRV::Capability::LiteralSampler);
771     break;
772   case SPIRV::OpTypeImage:
773     addOpTypeImageReqs(MI, Reqs, ST);
774     break;
775   case SPIRV::OpTypeSampler:
776     Reqs.addCapability(SPIRV::Capability::ImageBasic);
777     break;
778   case SPIRV::OpTypeForwardPointer:
779     // TODO: check if it's OpenCL's kernel.
780     Reqs.addCapability(SPIRV::Capability::Addresses);
781     break;
782   case SPIRV::OpAtomicFlagTestAndSet:
783   case SPIRV::OpAtomicLoad:
784   case SPIRV::OpAtomicStore:
785   case SPIRV::OpAtomicExchange:
786   case SPIRV::OpAtomicCompareExchange:
787   case SPIRV::OpAtomicIIncrement:
788   case SPIRV::OpAtomicIDecrement:
789   case SPIRV::OpAtomicIAdd:
790   case SPIRV::OpAtomicISub:
791   case SPIRV::OpAtomicUMin:
792   case SPIRV::OpAtomicUMax:
793   case SPIRV::OpAtomicSMin:
794   case SPIRV::OpAtomicSMax:
795   case SPIRV::OpAtomicAnd:
796   case SPIRV::OpAtomicOr:
797   case SPIRV::OpAtomicXor: {
798     const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
799     const MachineInstr *InstrPtr = &MI;
800     if (MI.getOpcode() == SPIRV::OpAtomicStore) {
801       assert(MI.getOperand(3).isReg());
802       InstrPtr = MRI.getVRegDef(MI.getOperand(3).getReg());
803       assert(InstrPtr && "Unexpected type instruction for OpAtomicStore");
804     }
805     assert(InstrPtr->getOperand(1).isReg() && "Unexpected operand in atomic");
806     Register TypeReg = InstrPtr->getOperand(1).getReg();
807     SPIRVType *TypeDef = MRI.getVRegDef(TypeReg);
808     if (TypeDef->getOpcode() == SPIRV::OpTypeInt) {
809       unsigned BitWidth = TypeDef->getOperand(1).getImm();
810       if (BitWidth == 64)
811         Reqs.addCapability(SPIRV::Capability::Int64Atomics);
812     }
813     break;
814   }
815   case SPIRV::OpGroupNonUniformIAdd:
816   case SPIRV::OpGroupNonUniformFAdd:
817   case SPIRV::OpGroupNonUniformIMul:
818   case SPIRV::OpGroupNonUniformFMul:
819   case SPIRV::OpGroupNonUniformSMin:
820   case SPIRV::OpGroupNonUniformUMin:
821   case SPIRV::OpGroupNonUniformFMin:
822   case SPIRV::OpGroupNonUniformSMax:
823   case SPIRV::OpGroupNonUniformUMax:
824   case SPIRV::OpGroupNonUniformFMax:
825   case SPIRV::OpGroupNonUniformBitwiseAnd:
826   case SPIRV::OpGroupNonUniformBitwiseOr:
827   case SPIRV::OpGroupNonUniformBitwiseXor:
828   case SPIRV::OpGroupNonUniformLogicalAnd:
829   case SPIRV::OpGroupNonUniformLogicalOr:
830   case SPIRV::OpGroupNonUniformLogicalXor: {
831     assert(MI.getOperand(3).isImm());
832     int64_t GroupOp = MI.getOperand(3).getImm();
833     switch (GroupOp) {
834     case SPIRV::GroupOperation::Reduce:
835     case SPIRV::GroupOperation::InclusiveScan:
836     case SPIRV::GroupOperation::ExclusiveScan:
837       Reqs.addCapability(SPIRV::Capability::Kernel);
838       Reqs.addCapability(SPIRV::Capability::GroupNonUniformArithmetic);
839       Reqs.addCapability(SPIRV::Capability::GroupNonUniformBallot);
840       break;
841     case SPIRV::GroupOperation::ClusteredReduce:
842       Reqs.addCapability(SPIRV::Capability::GroupNonUniformClustered);
843       break;
844     case SPIRV::GroupOperation::PartitionedReduceNV:
845     case SPIRV::GroupOperation::PartitionedInclusiveScanNV:
846     case SPIRV::GroupOperation::PartitionedExclusiveScanNV:
847       Reqs.addCapability(SPIRV::Capability::GroupNonUniformPartitionedNV);
848       break;
849     }
850     break;
851   }
852   case SPIRV::OpGroupNonUniformShuffle:
853   case SPIRV::OpGroupNonUniformShuffleXor:
854     Reqs.addCapability(SPIRV::Capability::GroupNonUniformShuffle);
855     break;
856   case SPIRV::OpGroupNonUniformShuffleUp:
857   case SPIRV::OpGroupNonUniformShuffleDown:
858     Reqs.addCapability(SPIRV::Capability::GroupNonUniformShuffleRelative);
859     break;
860   case SPIRV::OpGroupAll:
861   case SPIRV::OpGroupAny:
862   case SPIRV::OpGroupBroadcast:
863   case SPIRV::OpGroupIAdd:
864   case SPIRV::OpGroupFAdd:
865   case SPIRV::OpGroupFMin:
866   case SPIRV::OpGroupUMin:
867   case SPIRV::OpGroupSMin:
868   case SPIRV::OpGroupFMax:
869   case SPIRV::OpGroupUMax:
870   case SPIRV::OpGroupSMax:
871     Reqs.addCapability(SPIRV::Capability::Groups);
872     break;
873   case SPIRV::OpGroupNonUniformElect:
874     Reqs.addCapability(SPIRV::Capability::GroupNonUniform);
875     break;
876   case SPIRV::OpGroupNonUniformAll:
877   case SPIRV::OpGroupNonUniformAny:
878   case SPIRV::OpGroupNonUniformAllEqual:
879     Reqs.addCapability(SPIRV::Capability::GroupNonUniformVote);
880     break;
881   case SPIRV::OpGroupNonUniformBroadcast:
882   case SPIRV::OpGroupNonUniformBroadcastFirst:
883   case SPIRV::OpGroupNonUniformBallot:
884   case SPIRV::OpGroupNonUniformInverseBallot:
885   case SPIRV::OpGroupNonUniformBallotBitExtract:
886   case SPIRV::OpGroupNonUniformBallotBitCount:
887   case SPIRV::OpGroupNonUniformBallotFindLSB:
888   case SPIRV::OpGroupNonUniformBallotFindMSB:
889     Reqs.addCapability(SPIRV::Capability::GroupNonUniformBallot);
890     break;
891   default:
892     break;
893   }
894 }
895 
896 static void collectReqs(const Module &M, SPIRV::ModuleAnalysisInfo &MAI,
897                         MachineModuleInfo *MMI, const SPIRVSubtarget &ST) {
898   // Collect requirements for existing instructions.
899   for (auto F = M.begin(), E = M.end(); F != E; ++F) {
900     MachineFunction *MF = MMI->getMachineFunction(*F);
901     if (!MF)
902       continue;
903     for (const MachineBasicBlock &MBB : *MF)
904       for (const MachineInstr &MI : MBB)
905         addInstrRequirements(MI, MAI.Reqs, ST);
906   }
907   // Collect requirements for OpExecutionMode instructions.
908   auto Node = M.getNamedMetadata("spirv.ExecutionMode");
909   if (Node) {
910     for (unsigned i = 0; i < Node->getNumOperands(); i++) {
911       MDNode *MDN = cast<MDNode>(Node->getOperand(i));
912       const MDOperand &MDOp = MDN->getOperand(1);
913       if (auto *CMeta = dyn_cast<ConstantAsMetadata>(MDOp)) {
914         Constant *C = CMeta->getValue();
915         if (ConstantInt *Const = dyn_cast<ConstantInt>(C)) {
916           auto EM = Const->getZExtValue();
917           MAI.Reqs.getAndAddRequirements(
918               SPIRV::OperandCategory::ExecutionModeOperand, EM, ST);
919         }
920       }
921     }
922   }
923   for (auto FI = M.begin(), E = M.end(); FI != E; ++FI) {
924     const Function &F = *FI;
925     if (F.isDeclaration())
926       continue;
927     if (F.getMetadata("reqd_work_group_size"))
928       MAI.Reqs.getAndAddRequirements(
929           SPIRV::OperandCategory::ExecutionModeOperand,
930           SPIRV::ExecutionMode::LocalSize, ST);
931     if (F.getMetadata("work_group_size_hint"))
932       MAI.Reqs.getAndAddRequirements(
933           SPIRV::OperandCategory::ExecutionModeOperand,
934           SPIRV::ExecutionMode::LocalSizeHint, ST);
935     if (F.getMetadata("intel_reqd_sub_group_size"))
936       MAI.Reqs.getAndAddRequirements(
937           SPIRV::OperandCategory::ExecutionModeOperand,
938           SPIRV::ExecutionMode::SubgroupSize, ST);
939     if (F.getMetadata("vec_type_hint"))
940       MAI.Reqs.getAndAddRequirements(
941           SPIRV::OperandCategory::ExecutionModeOperand,
942           SPIRV::ExecutionMode::VecTypeHint, ST);
943   }
944 }
945 
946 static unsigned getFastMathFlags(const MachineInstr &I) {
947   unsigned Flags = SPIRV::FPFastMathMode::None;
948   if (I.getFlag(MachineInstr::MIFlag::FmNoNans))
949     Flags |= SPIRV::FPFastMathMode::NotNaN;
950   if (I.getFlag(MachineInstr::MIFlag::FmNoInfs))
951     Flags |= SPIRV::FPFastMathMode::NotInf;
952   if (I.getFlag(MachineInstr::MIFlag::FmNsz))
953     Flags |= SPIRV::FPFastMathMode::NSZ;
954   if (I.getFlag(MachineInstr::MIFlag::FmArcp))
955     Flags |= SPIRV::FPFastMathMode::AllowRecip;
956   if (I.getFlag(MachineInstr::MIFlag::FmReassoc))
957     Flags |= SPIRV::FPFastMathMode::Fast;
958   return Flags;
959 }
960 
961 static void handleMIFlagDecoration(MachineInstr &I, const SPIRVSubtarget &ST,
962                                    const SPIRVInstrInfo &TII,
963                                    SPIRV::RequirementHandler &Reqs) {
964   if (I.getFlag(MachineInstr::MIFlag::NoSWrap) && TII.canUseNSW(I) &&
965       getSymbolicOperandRequirements(SPIRV::OperandCategory::DecorationOperand,
966                                      SPIRV::Decoration::NoSignedWrap, ST, Reqs)
967           .IsSatisfiable) {
968     buildOpDecorate(I.getOperand(0).getReg(), I, TII,
969                     SPIRV::Decoration::NoSignedWrap, {});
970   }
971   if (I.getFlag(MachineInstr::MIFlag::NoUWrap) && TII.canUseNUW(I) &&
972       getSymbolicOperandRequirements(SPIRV::OperandCategory::DecorationOperand,
973                                      SPIRV::Decoration::NoUnsignedWrap, ST,
974                                      Reqs)
975           .IsSatisfiable) {
976     buildOpDecorate(I.getOperand(0).getReg(), I, TII,
977                     SPIRV::Decoration::NoUnsignedWrap, {});
978   }
979   if (!TII.canUseFastMathFlags(I))
980     return;
981   unsigned FMFlags = getFastMathFlags(I);
982   if (FMFlags == SPIRV::FPFastMathMode::None)
983     return;
984   Register DstReg = I.getOperand(0).getReg();
985   buildOpDecorate(DstReg, I, TII, SPIRV::Decoration::FPFastMathMode, {FMFlags});
986 }
987 
988 // Walk all functions and add decorations related to MI flags.
989 static void addDecorations(const Module &M, const SPIRVInstrInfo &TII,
990                            MachineModuleInfo *MMI, const SPIRVSubtarget &ST,
991                            SPIRV::ModuleAnalysisInfo &MAI) {
992   for (auto F = M.begin(), E = M.end(); F != E; ++F) {
993     MachineFunction *MF = MMI->getMachineFunction(*F);
994     if (!MF)
995       continue;
996     for (auto &MBB : *MF)
997       for (auto &MI : MBB)
998         handleMIFlagDecoration(MI, ST, TII, MAI.Reqs);
999   }
1000 }
1001 
1002 struct SPIRV::ModuleAnalysisInfo SPIRVModuleAnalysis::MAI;
1003 
1004 void SPIRVModuleAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
1005   AU.addRequired<TargetPassConfig>();
1006   AU.addRequired<MachineModuleInfoWrapperPass>();
1007 }
1008 
1009 bool SPIRVModuleAnalysis::runOnModule(Module &M) {
1010   SPIRVTargetMachine &TM =
1011       getAnalysis<TargetPassConfig>().getTM<SPIRVTargetMachine>();
1012   ST = TM.getSubtargetImpl();
1013   GR = ST->getSPIRVGlobalRegistry();
1014   TII = ST->getInstrInfo();
1015 
1016   MMI = &getAnalysis<MachineModuleInfoWrapperPass>().getMMI();
1017 
1018   setBaseInfo(M);
1019 
1020   addDecorations(M, *TII, MMI, *ST, MAI);
1021 
1022   collectReqs(M, MAI, MMI, *ST);
1023 
1024   processSwitches(M, MAI, MMI);
1025 
1026   // Process type/const/global var/func decl instructions, number their
1027   // destination registers from 0 to N, collect Extensions and Capabilities.
1028   processDefInstrs(M);
1029 
1030   // Number rest of registers from N+1 onwards.
1031   numberRegistersGlobally(M);
1032 
1033   // Collect OpName, OpEntryPoint, OpDecorate etc, process other instructions.
1034   processOtherInstrs(M);
1035 
1036   // If there are no entry points, we need the Linkage capability.
1037   if (MAI.MS[SPIRV::MB_EntryPoints].empty())
1038     MAI.Reqs.addCapability(SPIRV::Capability::Linkage);
1039 
1040   return false;
1041 }
1042