xref: /llvm-project/llvm/lib/Target/SPIRV/SPIRVModuleAnalysis.cpp (revision 4b692a95d103f3ad30d6be1ce6d5dda0bd90bc1f)
1 //===- SPIRVModuleAnalysis.cpp - analysis of global instrs & regs - C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // The analysis collects instructions that should be output at the module level
10 // and performs the global register numbering.
11 //
12 // The results of this analysis are used in AsmPrinter to rename registers
13 // globally and to output required instructions at the module level.
14 //
15 //===----------------------------------------------------------------------===//
16 
17 #include "SPIRVModuleAnalysis.h"
18 #include "MCTargetDesc/SPIRVBaseInfo.h"
19 #include "MCTargetDesc/SPIRVMCTargetDesc.h"
20 #include "SPIRV.h"
21 #include "SPIRVSubtarget.h"
22 #include "SPIRVTargetMachine.h"
23 #include "SPIRVUtils.h"
24 #include "llvm/ADT/STLExtras.h"
25 #include "llvm/CodeGen/MachineModuleInfo.h"
26 #include "llvm/CodeGen/TargetPassConfig.h"
27 
28 using namespace llvm;
29 
30 #define DEBUG_TYPE "spirv-module-analysis"
31 
32 static cl::opt<bool>
33     SPVDumpDeps("spv-dump-deps",
34                 cl::desc("Dump MIR with SPIR-V dependencies info"),
35                 cl::Optional, cl::init(false));
36 
37 static cl::list<SPIRV::Capability::Capability>
38     AvoidCapabilities("avoid-spirv-capabilities",
39                       cl::desc("SPIR-V capabilities to avoid if there are "
40                                "other options enabling a feature"),
41                       cl::ZeroOrMore, cl::Hidden,
42                       cl::values(clEnumValN(SPIRV::Capability::Shader, "Shader",
43                                             "SPIR-V Shader capability")));
44 // Use sets instead of cl::list to check "if contains" condition
45 struct AvoidCapabilitiesSet {
46   SmallSet<SPIRV::Capability::Capability, 4> S;
47   AvoidCapabilitiesSet() {
48     for (auto Cap : AvoidCapabilities)
49       S.insert(Cap);
50   }
51 };
52 
53 char llvm::SPIRVModuleAnalysis::ID = 0;
54 
55 namespace llvm {
56 void initializeSPIRVModuleAnalysisPass(PassRegistry &);
57 } // namespace llvm
58 
59 INITIALIZE_PASS(SPIRVModuleAnalysis, DEBUG_TYPE, "SPIRV module analysis", true,
60                 true)
61 
62 // Retrieve an unsigned from an MDNode with a list of them as operands.
63 static unsigned getMetadataUInt(MDNode *MdNode, unsigned OpIndex,
64                                 unsigned DefaultVal = 0) {
65   if (MdNode && OpIndex < MdNode->getNumOperands()) {
66     const auto &Op = MdNode->getOperand(OpIndex);
67     return mdconst::extract<ConstantInt>(Op)->getZExtValue();
68   }
69   return DefaultVal;
70 }
71 
72 static SPIRV::Requirements
73 getSymbolicOperandRequirements(SPIRV::OperandCategory::OperandCategory Category,
74                                unsigned i, const SPIRVSubtarget &ST,
75                                SPIRV::RequirementHandler &Reqs) {
76   static AvoidCapabilitiesSet
77       AvoidCaps; // contains capabilities to avoid if there is another option
78 
79   VersionTuple ReqMinVer = getSymbolicOperandMinVersion(Category, i);
80   VersionTuple ReqMaxVer = getSymbolicOperandMaxVersion(Category, i);
81   VersionTuple SPIRVVersion = ST.getSPIRVVersion();
82   bool MinVerOK = SPIRVVersion.empty() || SPIRVVersion >= ReqMinVer;
83   bool MaxVerOK =
84       ReqMaxVer.empty() || SPIRVVersion.empty() || SPIRVVersion <= ReqMaxVer;
85   CapabilityList ReqCaps = getSymbolicOperandCapabilities(Category, i);
86   ExtensionList ReqExts = getSymbolicOperandExtensions(Category, i);
87   if (ReqCaps.empty()) {
88     if (ReqExts.empty()) {
89       if (MinVerOK && MaxVerOK)
90         return {true, {}, {}, ReqMinVer, ReqMaxVer};
91       return {false, {}, {}, VersionTuple(), VersionTuple()};
92     }
93   } else if (MinVerOK && MaxVerOK) {
94     if (ReqCaps.size() == 1) {
95       auto Cap = ReqCaps[0];
96       if (Reqs.isCapabilityAvailable(Cap))
97         return {true, {Cap}, ReqExts, ReqMinVer, ReqMaxVer};
98     } else {
99       // By SPIR-V specification: "If an instruction, enumerant, or other
100       // feature specifies multiple enabling capabilities, only one such
101       // capability needs to be declared to use the feature." However, one
102       // capability may be preferred over another. We use command line
103       // argument(s) and AvoidCapabilities to avoid selection of certain
104       // capabilities if there are other options.
105       CapabilityList UseCaps;
106       for (auto Cap : ReqCaps)
107         if (Reqs.isCapabilityAvailable(Cap))
108           UseCaps.push_back(Cap);
109       for (size_t i = 0, Sz = UseCaps.size(); i < Sz; ++i) {
110         auto Cap = UseCaps[i];
111         if (i == Sz - 1 || !AvoidCaps.S.contains(Cap))
112           return {true, {Cap}, ReqExts, ReqMinVer, ReqMaxVer};
113       }
114     }
115   }
116   // If there are no capabilities, or we can't satisfy the version or
117   // capability requirements, use the list of extensions (if the subtarget
118   // can handle them all).
119   if (llvm::all_of(ReqExts, [&ST](const SPIRV::Extension::Extension &Ext) {
120         return ST.canUseExtension(Ext);
121       })) {
122     return {true,
123             {},
124             ReqExts,
125             VersionTuple(),
126             VersionTuple()}; // TODO: add versions to extensions.
127   }
128   return {false, {}, {}, VersionTuple(), VersionTuple()};
129 }
130 
131 void SPIRVModuleAnalysis::setBaseInfo(const Module &M) {
132   MAI.MaxID = 0;
133   for (int i = 0; i < SPIRV::NUM_MODULE_SECTIONS; i++)
134     MAI.MS[i].clear();
135   MAI.RegisterAliasTable.clear();
136   MAI.InstrsToDelete.clear();
137   MAI.FuncMap.clear();
138   MAI.GlobalVarList.clear();
139   MAI.ExtInstSetMap.clear();
140   MAI.Reqs.clear();
141   MAI.Reqs.initAvailableCapabilities(*ST);
142 
143   // TODO: determine memory model and source language from the configuratoin.
144   if (auto MemModel = M.getNamedMetadata("spirv.MemoryModel")) {
145     auto MemMD = MemModel->getOperand(0);
146     MAI.Addr = static_cast<SPIRV::AddressingModel::AddressingModel>(
147         getMetadataUInt(MemMD, 0));
148     MAI.Mem =
149         static_cast<SPIRV::MemoryModel::MemoryModel>(getMetadataUInt(MemMD, 1));
150   } else {
151     // TODO: Add support for VulkanMemoryModel.
152     MAI.Mem = ST->isOpenCLEnv() ? SPIRV::MemoryModel::OpenCL
153                                 : SPIRV::MemoryModel::GLSL450;
154     if (MAI.Mem == SPIRV::MemoryModel::OpenCL) {
155       unsigned PtrSize = ST->getPointerSize();
156       MAI.Addr = PtrSize == 32   ? SPIRV::AddressingModel::Physical32
157                  : PtrSize == 64 ? SPIRV::AddressingModel::Physical64
158                                  : SPIRV::AddressingModel::Logical;
159     } else {
160       // TODO: Add support for PhysicalStorageBufferAddress.
161       MAI.Addr = SPIRV::AddressingModel::Logical;
162     }
163   }
164   // Get the OpenCL version number from metadata.
165   // TODO: support other source languages.
166   if (auto VerNode = M.getNamedMetadata("opencl.ocl.version")) {
167     MAI.SrcLang = SPIRV::SourceLanguage::OpenCL_C;
168     // Construct version literal in accordance with SPIRV-LLVM-Translator.
169     // TODO: support multiple OCL version metadata.
170     assert(VerNode->getNumOperands() > 0 && "Invalid SPIR");
171     auto VersionMD = VerNode->getOperand(0);
172     unsigned MajorNum = getMetadataUInt(VersionMD, 0, 2);
173     unsigned MinorNum = getMetadataUInt(VersionMD, 1);
174     unsigned RevNum = getMetadataUInt(VersionMD, 2);
175     // Prevent Major part of OpenCL version to be 0
176     MAI.SrcLangVersion =
177         (std::max(1U, MajorNum) * 100 + MinorNum) * 1000 + RevNum;
178   } else {
179     // If there is no information about OpenCL version we are forced to generate
180     // OpenCL 1.0 by default for the OpenCL environment to avoid puzzling
181     // run-times with Unknown/0.0 version output. For a reference, LLVM-SPIRV
182     // Translator avoids potential issues with run-times in a similar manner.
183     if (ST->isOpenCLEnv()) {
184       MAI.SrcLang = SPIRV::SourceLanguage::OpenCL_CPP;
185       MAI.SrcLangVersion = 100000;
186     } else {
187       MAI.SrcLang = SPIRV::SourceLanguage::Unknown;
188       MAI.SrcLangVersion = 0;
189     }
190   }
191 
192   if (auto ExtNode = M.getNamedMetadata("opencl.used.extensions")) {
193     for (unsigned I = 0, E = ExtNode->getNumOperands(); I != E; ++I) {
194       MDNode *MD = ExtNode->getOperand(I);
195       if (!MD || MD->getNumOperands() == 0)
196         continue;
197       for (unsigned J = 0, N = MD->getNumOperands(); J != N; ++J)
198         MAI.SrcExt.insert(cast<MDString>(MD->getOperand(J))->getString());
199     }
200   }
201 
202   // Update required capabilities for this memory model, addressing model and
203   // source language.
204   MAI.Reqs.getAndAddRequirements(SPIRV::OperandCategory::MemoryModelOperand,
205                                  MAI.Mem, *ST);
206   MAI.Reqs.getAndAddRequirements(SPIRV::OperandCategory::SourceLanguageOperand,
207                                  MAI.SrcLang, *ST);
208   MAI.Reqs.getAndAddRequirements(SPIRV::OperandCategory::AddressingModelOperand,
209                                  MAI.Addr, *ST);
210 
211   if (ST->isOpenCLEnv()) {
212     // TODO: check if it's required by default.
213     MAI.ExtInstSetMap[static_cast<unsigned>(
214         SPIRV::InstructionSet::OpenCL_std)] =
215         Register::index2VirtReg(MAI.getNextID());
216   }
217 }
218 
219 // Returns a representation of an instruction as a vector of MachineOperand
220 // hash values, see llvm::hash_value(const MachineOperand &MO) for details.
221 // This creates a signature of the instruction with the same content
222 // that MachineOperand::isIdenticalTo uses for comparison.
223 static InstrSignature instrToSignature(const MachineInstr &MI,
224                                        SPIRV::ModuleAnalysisInfo &MAI,
225                                        bool UseDefReg) {
226   InstrSignature Signature{MI.getOpcode()};
227   for (unsigned i = 0; i < MI.getNumOperands(); ++i) {
228     const MachineOperand &MO = MI.getOperand(i);
229     size_t h;
230     if (MO.isReg()) {
231       if (!UseDefReg && MO.isDef())
232         continue;
233       Register RegAlias = MAI.getRegisterAlias(MI.getMF(), MO.getReg());
234       if (!RegAlias.isValid()) {
235         LLVM_DEBUG({
236           dbgs() << "Unexpectedly, no global id found for the operand ";
237           MO.print(dbgs());
238           dbgs() << "\nInstruction: ";
239           MI.print(dbgs());
240           dbgs() << "\n";
241         });
242         report_fatal_error("All v-regs must have been mapped to global id's");
243       }
244       // mimic llvm::hash_value(const MachineOperand &MO)
245       h = hash_combine(MO.getType(), (unsigned)RegAlias, MO.getSubReg(),
246                        MO.isDef());
247     } else {
248       h = hash_value(MO);
249     }
250     Signature.push_back(h);
251   }
252   return Signature;
253 }
254 
255 bool SPIRVModuleAnalysis::isDeclSection(const MachineRegisterInfo &MRI,
256                                         const MachineInstr &MI) {
257   unsigned Opcode = MI.getOpcode();
258   switch (Opcode) {
259   case SPIRV::OpTypeForwardPointer:
260     // omit now, collect later
261     return false;
262   case SPIRV::OpVariable:
263     return static_cast<SPIRV::StorageClass::StorageClass>(
264                MI.getOperand(2).getImm()) != SPIRV::StorageClass::Function;
265   case SPIRV::OpFunction:
266   case SPIRV::OpFunctionParameter:
267     return true;
268   }
269   if (GR->hasConstFunPtr() && Opcode == SPIRV::OpUndef) {
270     Register DefReg = MI.getOperand(0).getReg();
271     for (MachineInstr &UseMI : MRI.use_instructions(DefReg)) {
272       if (UseMI.getOpcode() != SPIRV::OpConstantFunctionPointerINTEL)
273         continue;
274       // it's a dummy definition, FP constant refers to a function,
275       // and this is resolved in another way; let's skip this definition
276       assert(UseMI.getOperand(2).isReg() &&
277              UseMI.getOperand(2).getReg() == DefReg);
278       MAI.setSkipEmission(&MI);
279       return false;
280     }
281   }
282   return TII->isTypeDeclInstr(MI) || TII->isConstantInstr(MI) ||
283          TII->isInlineAsmDefInstr(MI);
284 }
285 
286 // This is a special case of a function pointer refering to a possibly
287 // forward function declaration. The operand is a dummy OpUndef that
288 // requires a special treatment.
289 void SPIRVModuleAnalysis::visitFunPtrUse(
290     Register OpReg, InstrGRegsMap &SignatureToGReg,
291     std::map<const Value *, unsigned> &GlobalToGReg, const MachineFunction *MF,
292     const MachineInstr &MI) {
293   const MachineOperand *OpFunDef =
294       GR->getFunctionDefinitionByUse(&MI.getOperand(2));
295   assert(OpFunDef && OpFunDef->isReg());
296   // find the actual function definition and number it globally in advance
297   const MachineInstr *OpDefMI = OpFunDef->getParent();
298   assert(OpDefMI && OpDefMI->getOpcode() == SPIRV::OpFunction);
299   const MachineFunction *FunDefMF = OpDefMI->getParent()->getParent();
300   const MachineRegisterInfo &FunDefMRI = FunDefMF->getRegInfo();
301   do {
302     visitDecl(FunDefMRI, SignatureToGReg, GlobalToGReg, FunDefMF, *OpDefMI);
303     OpDefMI = OpDefMI->getNextNode();
304   } while (OpDefMI && (OpDefMI->getOpcode() == SPIRV::OpFunction ||
305                        OpDefMI->getOpcode() == SPIRV::OpFunctionParameter));
306   // associate the function pointer with the newly assigned global number
307   Register GlobalFunDefReg = MAI.getRegisterAlias(FunDefMF, OpFunDef->getReg());
308   assert(GlobalFunDefReg.isValid() &&
309          "Function definition must refer to a global register");
310   MAI.setRegisterAlias(MF, OpReg, GlobalFunDefReg);
311 }
312 
313 // Depth first recursive traversal of dependencies. Repeated visits are guarded
314 // by MAI.hasRegisterAlias().
315 void SPIRVModuleAnalysis::visitDecl(
316     const MachineRegisterInfo &MRI, InstrGRegsMap &SignatureToGReg,
317     std::map<const Value *, unsigned> &GlobalToGReg, const MachineFunction *MF,
318     const MachineInstr &MI) {
319   unsigned Opcode = MI.getOpcode();
320   DenseSet<Register> Deps;
321 
322   // Process each operand of the instruction to resolve dependencies
323   for (const MachineOperand &MO : MI.operands()) {
324     if (!MO.isReg() || MO.isDef())
325       continue;
326     Register OpReg = MO.getReg();
327     // Handle function pointers special case
328     if (Opcode == SPIRV::OpConstantFunctionPointerINTEL &&
329         MRI.getRegClass(OpReg) == &SPIRV::pIDRegClass) {
330       visitFunPtrUse(OpReg, SignatureToGReg, GlobalToGReg, MF, MI);
331       continue;
332     }
333     // Skip already processed instructions
334     if (MAI.hasRegisterAlias(MF, MO.getReg()))
335       continue;
336     // Recursively visit dependencies
337     if (const MachineInstr *OpDefMI = MRI.getUniqueVRegDef(OpReg)) {
338       if (isDeclSection(MRI, *OpDefMI))
339         visitDecl(MRI, SignatureToGReg, GlobalToGReg, MF, *OpDefMI);
340       continue;
341     }
342     // Handle the unexpected case of no unique definition for the SPIR-V
343     // instruction
344     LLVM_DEBUG({
345       dbgs() << "Unexpectedly, no unique definition for the operand ";
346       MO.print(dbgs());
347       dbgs() << "\nInstruction: ";
348       MI.print(dbgs());
349       dbgs() << "\n";
350     });
351     report_fatal_error(
352         "No unique definition is found for the virtual register");
353   }
354 
355   Register GReg;
356   bool IsFunDef = false;
357   if (TII->isSpecConstantInstr(MI)) {
358     GReg = Register::index2VirtReg(MAI.getNextID());
359     MAI.MS[SPIRV::MB_TypeConstVars].push_back(&MI);
360   } else if (Opcode == SPIRV::OpFunction ||
361              Opcode == SPIRV::OpFunctionParameter) {
362     GReg = handleFunctionOrParameter(MF, MI, GlobalToGReg, IsFunDef);
363   } else if (TII->isTypeDeclInstr(MI) || TII->isConstantInstr(MI) ||
364              TII->isInlineAsmDefInstr(MI)) {
365     GReg = handleTypeDeclOrConstant(MI, SignatureToGReg);
366   } else if (Opcode == SPIRV::OpVariable) {
367     GReg = handleVariable(MF, MI, GlobalToGReg);
368   } else {
369     LLVM_DEBUG({
370       dbgs() << "\nInstruction: ";
371       MI.print(dbgs());
372       dbgs() << "\n";
373     });
374     llvm_unreachable("Unexpected instruction is visited");
375   }
376   MAI.setRegisterAlias(MF, MI.getOperand(0).getReg(), GReg);
377   if (!IsFunDef)
378     MAI.setSkipEmission(&MI);
379 }
380 
381 Register SPIRVModuleAnalysis::handleFunctionOrParameter(
382     const MachineFunction *MF, const MachineInstr &MI,
383     std::map<const Value *, unsigned> &GlobalToGReg, bool &IsFunDef) {
384   const Value *GObj = GR->getGlobalObject(MF, MI.getOperand(0).getReg());
385   assert(GObj && "Unregistered global definition");
386   const Function *F = dyn_cast<Function>(GObj);
387   if (!F)
388     F = dyn_cast<Argument>(GObj)->getParent();
389   assert(F && "Expected a reference to a function or an argument");
390   IsFunDef = !F->isDeclaration();
391   auto It = GlobalToGReg.find(GObj);
392   if (It != GlobalToGReg.end())
393     return It->second;
394   Register GReg = Register::index2VirtReg(MAI.getNextID());
395   GlobalToGReg[GObj] = GReg;
396   if (!IsFunDef)
397     MAI.MS[SPIRV::MB_ExtFuncDecls].push_back(&MI);
398   return GReg;
399 }
400 
401 Register
402 SPIRVModuleAnalysis::handleTypeDeclOrConstant(const MachineInstr &MI,
403                                               InstrGRegsMap &SignatureToGReg) {
404   InstrSignature MISign = instrToSignature(MI, MAI, false);
405   auto It = SignatureToGReg.find(MISign);
406   if (It != SignatureToGReg.end())
407     return It->second;
408   Register GReg = Register::index2VirtReg(MAI.getNextID());
409   SignatureToGReg[MISign] = GReg;
410   MAI.MS[SPIRV::MB_TypeConstVars].push_back(&MI);
411   return GReg;
412 }
413 
414 Register SPIRVModuleAnalysis::handleVariable(
415     const MachineFunction *MF, const MachineInstr &MI,
416     std::map<const Value *, unsigned> &GlobalToGReg) {
417   MAI.GlobalVarList.push_back(&MI);
418   const Value *GObj = GR->getGlobalObject(MF, MI.getOperand(0).getReg());
419   assert(GObj && "Unregistered global definition");
420   auto It = GlobalToGReg.find(GObj);
421   if (It != GlobalToGReg.end())
422     return It->second;
423   Register GReg = Register::index2VirtReg(MAI.getNextID());
424   GlobalToGReg[GObj] = GReg;
425   MAI.MS[SPIRV::MB_TypeConstVars].push_back(&MI);
426   return GReg;
427 }
428 
429 void SPIRVModuleAnalysis::collectDeclarations(const Module &M) {
430   InstrGRegsMap SignatureToGReg;
431   std::map<const Value *, unsigned> GlobalToGReg;
432   for (auto F = M.begin(), E = M.end(); F != E; ++F) {
433     MachineFunction *MF = MMI->getMachineFunction(*F);
434     if (!MF)
435       continue;
436     const MachineRegisterInfo &MRI = MF->getRegInfo();
437     unsigned PastHeader = 0;
438     for (MachineBasicBlock &MBB : *MF) {
439       for (MachineInstr &MI : MBB) {
440         if (MI.getNumOperands() == 0)
441           continue;
442         unsigned Opcode = MI.getOpcode();
443         if (Opcode == SPIRV::OpFunction) {
444           if (PastHeader == 0) {
445             PastHeader = 1;
446             continue;
447           }
448         } else if (Opcode == SPIRV::OpFunctionParameter) {
449           if (PastHeader < 2)
450             continue;
451         } else if (PastHeader > 0) {
452           PastHeader = 2;
453         }
454 
455         const MachineOperand &DefMO = MI.getOperand(0);
456         switch (Opcode) {
457         case SPIRV::OpExtension:
458           MAI.Reqs.addExtension(SPIRV::Extension::Extension(DefMO.getImm()));
459           MAI.setSkipEmission(&MI);
460           break;
461         case SPIRV::OpCapability:
462           MAI.Reqs.addCapability(SPIRV::Capability::Capability(DefMO.getImm()));
463           MAI.setSkipEmission(&MI);
464           if (PastHeader > 0)
465             PastHeader = 2;
466           break;
467         default:
468           if (DefMO.isReg() && isDeclSection(MRI, MI) &&
469               !MAI.hasRegisterAlias(MF, DefMO.getReg()))
470             visitDecl(MRI, SignatureToGReg, GlobalToGReg, MF, MI);
471         }
472       }
473     }
474   }
475 }
476 
477 // Look for IDs declared with Import linkage, and map the corresponding function
478 // to the register defining that variable (which will usually be the result of
479 // an OpFunction). This lets us call externally imported functions using
480 // the correct ID registers.
481 void SPIRVModuleAnalysis::collectFuncNames(MachineInstr &MI,
482                                            const Function *F) {
483   if (MI.getOpcode() == SPIRV::OpDecorate) {
484     // If it's got Import linkage.
485     auto Dec = MI.getOperand(1).getImm();
486     if (Dec == static_cast<unsigned>(SPIRV::Decoration::LinkageAttributes)) {
487       auto Lnk = MI.getOperand(MI.getNumOperands() - 1).getImm();
488       if (Lnk == static_cast<unsigned>(SPIRV::LinkageType::Import)) {
489         // Map imported function name to function ID register.
490         const Function *ImportedFunc =
491             F->getParent()->getFunction(getStringImm(MI, 2));
492         Register Target = MI.getOperand(0).getReg();
493         MAI.FuncMap[ImportedFunc] = MAI.getRegisterAlias(MI.getMF(), Target);
494       }
495     }
496   } else if (MI.getOpcode() == SPIRV::OpFunction) {
497     // Record all internal OpFunction declarations.
498     Register Reg = MI.defs().begin()->getReg();
499     Register GlobalReg = MAI.getRegisterAlias(MI.getMF(), Reg);
500     assert(GlobalReg.isValid());
501     MAI.FuncMap[F] = GlobalReg;
502   }
503 }
504 
505 // Collect the given instruction in the specified MS. We assume global register
506 // numbering has already occurred by this point. We can directly compare reg
507 // arguments when detecting duplicates.
508 static void collectOtherInstr(MachineInstr &MI, SPIRV::ModuleAnalysisInfo &MAI,
509                               SPIRV::ModuleSectionType MSType, InstrTraces &IS,
510                               bool Append = true) {
511   MAI.setSkipEmission(&MI);
512   InstrSignature MISign = instrToSignature(MI, MAI, true);
513   auto FoundMI = IS.insert(MISign);
514   if (!FoundMI.second)
515     return; // insert failed, so we found a duplicate; don't add it to MAI.MS
516   // No duplicates, so add it.
517   if (Append)
518     MAI.MS[MSType].push_back(&MI);
519   else
520     MAI.MS[MSType].insert(MAI.MS[MSType].begin(), &MI);
521 }
522 
523 // Some global instructions make reference to function-local ID regs, so cannot
524 // be correctly collected until these registers are globally numbered.
525 void SPIRVModuleAnalysis::processOtherInstrs(const Module &M) {
526   InstrTraces IS;
527   for (auto F = M.begin(), E = M.end(); F != E; ++F) {
528     if ((*F).isDeclaration())
529       continue;
530     MachineFunction *MF = MMI->getMachineFunction(*F);
531     assert(MF);
532 
533     for (MachineBasicBlock &MBB : *MF)
534       for (MachineInstr &MI : MBB) {
535         if (MAI.getSkipEmission(&MI))
536           continue;
537         const unsigned OpCode = MI.getOpcode();
538         if (OpCode == SPIRV::OpString) {
539           collectOtherInstr(MI, MAI, SPIRV::MB_DebugStrings, IS);
540         } else if (OpCode == SPIRV::OpExtInst && MI.getOperand(2).isImm() &&
541                    MI.getOperand(2).getImm() ==
542                        SPIRV::InstructionSet::
543                            NonSemantic_Shader_DebugInfo_100) {
544           MachineOperand Ins = MI.getOperand(3);
545           namespace NS = SPIRV::NonSemanticExtInst;
546           static constexpr int64_t GlobalNonSemanticDITy[] = {
547               NS::DebugSource, NS::DebugCompilationUnit, NS::DebugInfoNone,
548               NS::DebugTypeBasic, NS::DebugTypePointer};
549           bool IsGlobalDI = false;
550           for (unsigned Idx = 0; Idx < std::size(GlobalNonSemanticDITy); ++Idx)
551             IsGlobalDI |= Ins.getImm() == GlobalNonSemanticDITy[Idx];
552           if (IsGlobalDI)
553             collectOtherInstr(MI, MAI, SPIRV::MB_NonSemanticGlobalDI, IS);
554         } else if (OpCode == SPIRV::OpName || OpCode == SPIRV::OpMemberName) {
555           collectOtherInstr(MI, MAI, SPIRV::MB_DebugNames, IS);
556         } else if (OpCode == SPIRV::OpEntryPoint) {
557           collectOtherInstr(MI, MAI, SPIRV::MB_EntryPoints, IS);
558         } else if (TII->isDecorationInstr(MI)) {
559           collectOtherInstr(MI, MAI, SPIRV::MB_Annotations, IS);
560           collectFuncNames(MI, &*F);
561         } else if (TII->isConstantInstr(MI)) {
562           // Now OpSpecConstant*s are not in DT,
563           // but they need to be collected anyway.
564           collectOtherInstr(MI, MAI, SPIRV::MB_TypeConstVars, IS);
565         } else if (OpCode == SPIRV::OpFunction) {
566           collectFuncNames(MI, &*F);
567         } else if (OpCode == SPIRV::OpTypeForwardPointer) {
568           collectOtherInstr(MI, MAI, SPIRV::MB_TypeConstVars, IS, false);
569         }
570       }
571   }
572 }
573 
574 // Number registers in all functions globally from 0 onwards and store
575 // the result in global register alias table. Some registers are already
576 // numbered.
577 void SPIRVModuleAnalysis::numberRegistersGlobally(const Module &M) {
578   for (auto F = M.begin(), E = M.end(); F != E; ++F) {
579     if ((*F).isDeclaration())
580       continue;
581     MachineFunction *MF = MMI->getMachineFunction(*F);
582     assert(MF);
583     for (MachineBasicBlock &MBB : *MF) {
584       for (MachineInstr &MI : MBB) {
585         for (MachineOperand &Op : MI.operands()) {
586           if (!Op.isReg())
587             continue;
588           Register Reg = Op.getReg();
589           if (MAI.hasRegisterAlias(MF, Reg))
590             continue;
591           Register NewReg = Register::index2VirtReg(MAI.getNextID());
592           MAI.setRegisterAlias(MF, Reg, NewReg);
593         }
594         if (MI.getOpcode() != SPIRV::OpExtInst)
595           continue;
596         auto Set = MI.getOperand(2).getImm();
597         if (!MAI.ExtInstSetMap.contains(Set))
598           MAI.ExtInstSetMap[Set] = Register::index2VirtReg(MAI.getNextID());
599       }
600     }
601   }
602 }
603 
604 // RequirementHandler implementations.
605 void SPIRV::RequirementHandler::getAndAddRequirements(
606     SPIRV::OperandCategory::OperandCategory Category, uint32_t i,
607     const SPIRVSubtarget &ST) {
608   addRequirements(getSymbolicOperandRequirements(Category, i, ST, *this));
609 }
610 
611 void SPIRV::RequirementHandler::recursiveAddCapabilities(
612     const CapabilityList &ToPrune) {
613   for (const auto &Cap : ToPrune) {
614     AllCaps.insert(Cap);
615     CapabilityList ImplicitDecls =
616         getSymbolicOperandCapabilities(OperandCategory::CapabilityOperand, Cap);
617     recursiveAddCapabilities(ImplicitDecls);
618   }
619 }
620 
621 void SPIRV::RequirementHandler::addCapabilities(const CapabilityList &ToAdd) {
622   for (const auto &Cap : ToAdd) {
623     bool IsNewlyInserted = AllCaps.insert(Cap).second;
624     if (!IsNewlyInserted) // Don't re-add if it's already been declared.
625       continue;
626     CapabilityList ImplicitDecls =
627         getSymbolicOperandCapabilities(OperandCategory::CapabilityOperand, Cap);
628     recursiveAddCapabilities(ImplicitDecls);
629     MinimalCaps.push_back(Cap);
630   }
631 }
632 
633 void SPIRV::RequirementHandler::addRequirements(
634     const SPIRV::Requirements &Req) {
635   if (!Req.IsSatisfiable)
636     report_fatal_error("Adding SPIR-V requirements this target can't satisfy.");
637 
638   if (Req.Cap.has_value())
639     addCapabilities({Req.Cap.value()});
640 
641   addExtensions(Req.Exts);
642 
643   if (!Req.MinVer.empty()) {
644     if (!MaxVersion.empty() && Req.MinVer > MaxVersion) {
645       LLVM_DEBUG(dbgs() << "Conflicting version requirements: >= " << Req.MinVer
646                         << " and <= " << MaxVersion << "\n");
647       report_fatal_error("Adding SPIR-V requirements that can't be satisfied.");
648     }
649 
650     if (MinVersion.empty() || Req.MinVer > MinVersion)
651       MinVersion = Req.MinVer;
652   }
653 
654   if (!Req.MaxVer.empty()) {
655     if (!MinVersion.empty() && Req.MaxVer < MinVersion) {
656       LLVM_DEBUG(dbgs() << "Conflicting version requirements: <= " << Req.MaxVer
657                         << " and >= " << MinVersion << "\n");
658       report_fatal_error("Adding SPIR-V requirements that can't be satisfied.");
659     }
660 
661     if (MaxVersion.empty() || Req.MaxVer < MaxVersion)
662       MaxVersion = Req.MaxVer;
663   }
664 }
665 
666 void SPIRV::RequirementHandler::checkSatisfiable(
667     const SPIRVSubtarget &ST) const {
668   // Report as many errors as possible before aborting the compilation.
669   bool IsSatisfiable = true;
670   auto TargetVer = ST.getSPIRVVersion();
671 
672   if (!MaxVersion.empty() && !TargetVer.empty() && MaxVersion < TargetVer) {
673     LLVM_DEBUG(
674         dbgs() << "Target SPIR-V version too high for required features\n"
675                << "Required max version: " << MaxVersion << " target version "
676                << TargetVer << "\n");
677     IsSatisfiable = false;
678   }
679 
680   if (!MinVersion.empty() && !TargetVer.empty() && MinVersion > TargetVer) {
681     LLVM_DEBUG(dbgs() << "Target SPIR-V version too low for required features\n"
682                       << "Required min version: " << MinVersion
683                       << " target version " << TargetVer << "\n");
684     IsSatisfiable = false;
685   }
686 
687   if (!MinVersion.empty() && !MaxVersion.empty() && MinVersion > MaxVersion) {
688     LLVM_DEBUG(
689         dbgs()
690         << "Version is too low for some features and too high for others.\n"
691         << "Required SPIR-V min version: " << MinVersion
692         << " required SPIR-V max version " << MaxVersion << "\n");
693     IsSatisfiable = false;
694   }
695 
696   for (auto Cap : MinimalCaps) {
697     if (AvailableCaps.contains(Cap))
698       continue;
699     LLVM_DEBUG(dbgs() << "Capability not supported: "
700                       << getSymbolicOperandMnemonic(
701                              OperandCategory::CapabilityOperand, Cap)
702                       << "\n");
703     IsSatisfiable = false;
704   }
705 
706   for (auto Ext : AllExtensions) {
707     if (ST.canUseExtension(Ext))
708       continue;
709     LLVM_DEBUG(dbgs() << "Extension not supported: "
710                       << getSymbolicOperandMnemonic(
711                              OperandCategory::ExtensionOperand, Ext)
712                       << "\n");
713     IsSatisfiable = false;
714   }
715 
716   if (!IsSatisfiable)
717     report_fatal_error("Unable to meet SPIR-V requirements for this target.");
718 }
719 
720 // Add the given capabilities and all their implicitly defined capabilities too.
721 void SPIRV::RequirementHandler::addAvailableCaps(const CapabilityList &ToAdd) {
722   for (const auto Cap : ToAdd)
723     if (AvailableCaps.insert(Cap).second)
724       addAvailableCaps(getSymbolicOperandCapabilities(
725           SPIRV::OperandCategory::CapabilityOperand, Cap));
726 }
727 
728 void SPIRV::RequirementHandler::removeCapabilityIf(
729     const Capability::Capability ToRemove,
730     const Capability::Capability IfPresent) {
731   if (AllCaps.contains(IfPresent))
732     AllCaps.erase(ToRemove);
733 }
734 
735 namespace llvm {
736 namespace SPIRV {
737 void RequirementHandler::initAvailableCapabilities(const SPIRVSubtarget &ST) {
738   // Provided by both all supported Vulkan versions and OpenCl.
739   addAvailableCaps({Capability::Shader, Capability::Linkage, Capability::Int8,
740                     Capability::Int16});
741 
742   if (ST.isAtLeastSPIRVVer(VersionTuple(1, 3)))
743     addAvailableCaps({Capability::GroupNonUniform,
744                       Capability::GroupNonUniformVote,
745                       Capability::GroupNonUniformArithmetic,
746                       Capability::GroupNonUniformBallot,
747                       Capability::GroupNonUniformClustered,
748                       Capability::GroupNonUniformShuffle,
749                       Capability::GroupNonUniformShuffleRelative});
750 
751   if (ST.isAtLeastSPIRVVer(VersionTuple(1, 6)))
752     addAvailableCaps({Capability::DotProduct, Capability::DotProductInputAll,
753                       Capability::DotProductInput4x8Bit,
754                       Capability::DotProductInput4x8BitPacked,
755                       Capability::DemoteToHelperInvocation});
756 
757   // Add capabilities enabled by extensions.
758   for (auto Extension : ST.getAllAvailableExtensions()) {
759     CapabilityList EnabledCapabilities =
760         getCapabilitiesEnabledByExtension(Extension);
761     addAvailableCaps(EnabledCapabilities);
762   }
763 
764   if (ST.isOpenCLEnv()) {
765     initAvailableCapabilitiesForOpenCL(ST);
766     return;
767   }
768 
769   if (ST.isVulkanEnv()) {
770     initAvailableCapabilitiesForVulkan(ST);
771     return;
772   }
773 
774   report_fatal_error("Unimplemented environment for SPIR-V generation.");
775 }
776 
777 void RequirementHandler::initAvailableCapabilitiesForOpenCL(
778     const SPIRVSubtarget &ST) {
779   // Add the min requirements for different OpenCL and SPIR-V versions.
780   addAvailableCaps({Capability::Addresses, Capability::Float16Buffer,
781                     Capability::Kernel, Capability::Vector16,
782                     Capability::Groups, Capability::GenericPointer,
783                     Capability::StorageImageWriteWithoutFormat,
784                     Capability::StorageImageReadWithoutFormat});
785   if (ST.hasOpenCLFullProfile())
786     addAvailableCaps({Capability::Int64, Capability::Int64Atomics});
787   if (ST.hasOpenCLImageSupport()) {
788     addAvailableCaps({Capability::ImageBasic, Capability::LiteralSampler,
789                       Capability::Image1D, Capability::SampledBuffer,
790                       Capability::ImageBuffer});
791     if (ST.isAtLeastOpenCLVer(VersionTuple(2, 0)))
792       addAvailableCaps({Capability::ImageReadWrite});
793   }
794   if (ST.isAtLeastSPIRVVer(VersionTuple(1, 1)) &&
795       ST.isAtLeastOpenCLVer(VersionTuple(2, 2)))
796     addAvailableCaps({Capability::SubgroupDispatch, Capability::PipeStorage});
797   if (ST.isAtLeastSPIRVVer(VersionTuple(1, 4)))
798     addAvailableCaps({Capability::DenormPreserve, Capability::DenormFlushToZero,
799                       Capability::SignedZeroInfNanPreserve,
800                       Capability::RoundingModeRTE,
801                       Capability::RoundingModeRTZ});
802   // TODO: verify if this needs some checks.
803   addAvailableCaps({Capability::Float16, Capability::Float64});
804 
805   // TODO: add OpenCL extensions.
806 }
807 
808 void RequirementHandler::initAvailableCapabilitiesForVulkan(
809     const SPIRVSubtarget &ST) {
810 
811   // Core in Vulkan 1.1 and earlier.
812   addAvailableCaps({Capability::Int64, Capability::Float16, Capability::Float64,
813                     Capability::GroupNonUniform, Capability::Image1D,
814                     Capability::SampledBuffer, Capability::ImageBuffer,
815                     Capability::UniformBufferArrayDynamicIndexing,
816                     Capability::SampledImageArrayDynamicIndexing,
817                     Capability::StorageBufferArrayDynamicIndexing,
818                     Capability::StorageImageArrayDynamicIndexing});
819 
820   // Became core in Vulkan 1.2
821   if (ST.isAtLeastSPIRVVer(VersionTuple(1, 5))) {
822     addAvailableCaps(
823         {Capability::ShaderNonUniformEXT, Capability::RuntimeDescriptorArrayEXT,
824          Capability::InputAttachmentArrayDynamicIndexingEXT,
825          Capability::UniformTexelBufferArrayDynamicIndexingEXT,
826          Capability::StorageTexelBufferArrayDynamicIndexingEXT,
827          Capability::UniformBufferArrayNonUniformIndexingEXT,
828          Capability::SampledImageArrayNonUniformIndexingEXT,
829          Capability::StorageBufferArrayNonUniformIndexingEXT,
830          Capability::StorageImageArrayNonUniformIndexingEXT,
831          Capability::InputAttachmentArrayNonUniformIndexingEXT,
832          Capability::UniformTexelBufferArrayNonUniformIndexingEXT,
833          Capability::StorageTexelBufferArrayNonUniformIndexingEXT});
834   }
835 
836   // Became core in Vulkan 1.3
837   if (ST.isAtLeastSPIRVVer(VersionTuple(1, 6)))
838     addAvailableCaps({Capability::StorageImageWriteWithoutFormat,
839                       Capability::StorageImageReadWithoutFormat});
840 }
841 
842 } // namespace SPIRV
843 } // namespace llvm
844 
845 // Add the required capabilities from a decoration instruction (including
846 // BuiltIns).
847 static void addOpDecorateReqs(const MachineInstr &MI, unsigned DecIndex,
848                               SPIRV::RequirementHandler &Reqs,
849                               const SPIRVSubtarget &ST) {
850   int64_t DecOp = MI.getOperand(DecIndex).getImm();
851   auto Dec = static_cast<SPIRV::Decoration::Decoration>(DecOp);
852   Reqs.addRequirements(getSymbolicOperandRequirements(
853       SPIRV::OperandCategory::DecorationOperand, Dec, ST, Reqs));
854 
855   if (Dec == SPIRV::Decoration::BuiltIn) {
856     int64_t BuiltInOp = MI.getOperand(DecIndex + 1).getImm();
857     auto BuiltIn = static_cast<SPIRV::BuiltIn::BuiltIn>(BuiltInOp);
858     Reqs.addRequirements(getSymbolicOperandRequirements(
859         SPIRV::OperandCategory::BuiltInOperand, BuiltIn, ST, Reqs));
860   } else if (Dec == SPIRV::Decoration::LinkageAttributes) {
861     int64_t LinkageOp = MI.getOperand(MI.getNumOperands() - 1).getImm();
862     SPIRV::LinkageType::LinkageType LnkType =
863         static_cast<SPIRV::LinkageType::LinkageType>(LinkageOp);
864     if (LnkType == SPIRV::LinkageType::LinkOnceODR)
865       Reqs.addExtension(SPIRV::Extension::SPV_KHR_linkonce_odr);
866   } else if (Dec == SPIRV::Decoration::CacheControlLoadINTEL ||
867              Dec == SPIRV::Decoration::CacheControlStoreINTEL) {
868     Reqs.addExtension(SPIRV::Extension::SPV_INTEL_cache_controls);
869   } else if (Dec == SPIRV::Decoration::HostAccessINTEL) {
870     Reqs.addExtension(SPIRV::Extension::SPV_INTEL_global_variable_host_access);
871   } else if (Dec == SPIRV::Decoration::InitModeINTEL ||
872              Dec == SPIRV::Decoration::ImplementInRegisterMapINTEL) {
873     Reqs.addExtension(
874         SPIRV::Extension::SPV_INTEL_global_variable_fpga_decorations);
875   } else if (Dec == SPIRV::Decoration::NonUniformEXT) {
876     Reqs.addRequirements(SPIRV::Capability::ShaderNonUniformEXT);
877   }
878 }
879 
880 // Add requirements for image handling.
881 static void addOpTypeImageReqs(const MachineInstr &MI,
882                                SPIRV::RequirementHandler &Reqs,
883                                const SPIRVSubtarget &ST) {
884   assert(MI.getNumOperands() >= 8 && "Insufficient operands for OpTypeImage");
885   // The operand indices used here are based on the OpTypeImage layout, which
886   // the MachineInstr follows as well.
887   int64_t ImgFormatOp = MI.getOperand(7).getImm();
888   auto ImgFormat = static_cast<SPIRV::ImageFormat::ImageFormat>(ImgFormatOp);
889   Reqs.getAndAddRequirements(SPIRV::OperandCategory::ImageFormatOperand,
890                              ImgFormat, ST);
891 
892   bool IsArrayed = MI.getOperand(4).getImm() == 1;
893   bool IsMultisampled = MI.getOperand(5).getImm() == 1;
894   bool NoSampler = MI.getOperand(6).getImm() == 2;
895   // Add dimension requirements.
896   assert(MI.getOperand(2).isImm());
897   switch (MI.getOperand(2).getImm()) {
898   case SPIRV::Dim::DIM_1D:
899     Reqs.addRequirements(NoSampler ? SPIRV::Capability::Image1D
900                                    : SPIRV::Capability::Sampled1D);
901     break;
902   case SPIRV::Dim::DIM_2D:
903     if (IsMultisampled && NoSampler)
904       Reqs.addRequirements(SPIRV::Capability::ImageMSArray);
905     break;
906   case SPIRV::Dim::DIM_Cube:
907     Reqs.addRequirements(SPIRV::Capability::Shader);
908     if (IsArrayed)
909       Reqs.addRequirements(NoSampler ? SPIRV::Capability::ImageCubeArray
910                                      : SPIRV::Capability::SampledCubeArray);
911     break;
912   case SPIRV::Dim::DIM_Rect:
913     Reqs.addRequirements(NoSampler ? SPIRV::Capability::ImageRect
914                                    : SPIRV::Capability::SampledRect);
915     break;
916   case SPIRV::Dim::DIM_Buffer:
917     Reqs.addRequirements(NoSampler ? SPIRV::Capability::ImageBuffer
918                                    : SPIRV::Capability::SampledBuffer);
919     break;
920   case SPIRV::Dim::DIM_SubpassData:
921     Reqs.addRequirements(SPIRV::Capability::InputAttachment);
922     break;
923   }
924 
925   // Has optional access qualifier.
926   if (ST.isOpenCLEnv()) {
927     if (MI.getNumOperands() > 8 &&
928         MI.getOperand(8).getImm() == SPIRV::AccessQualifier::ReadWrite)
929       Reqs.addRequirements(SPIRV::Capability::ImageReadWrite);
930     else
931       Reqs.addRequirements(SPIRV::Capability::ImageBasic);
932   }
933 }
934 
935 // Add requirements for handling atomic float instructions
936 #define ATOM_FLT_REQ_EXT_MSG(ExtName)                                          \
937   "The atomic float instruction requires the following SPIR-V "                \
938   "extension: SPV_EXT_shader_atomic_float" ExtName
939 static void AddAtomicFloatRequirements(const MachineInstr &MI,
940                                        SPIRV::RequirementHandler &Reqs,
941                                        const SPIRVSubtarget &ST) {
942   assert(MI.getOperand(1).isReg() &&
943          "Expect register operand in atomic float instruction");
944   Register TypeReg = MI.getOperand(1).getReg();
945   SPIRVType *TypeDef = MI.getMF()->getRegInfo().getVRegDef(TypeReg);
946   if (TypeDef->getOpcode() != SPIRV::OpTypeFloat)
947     report_fatal_error("Result type of an atomic float instruction must be a "
948                        "floating-point type scalar");
949 
950   unsigned BitWidth = TypeDef->getOperand(1).getImm();
951   unsigned Op = MI.getOpcode();
952   if (Op == SPIRV::OpAtomicFAddEXT) {
953     if (!ST.canUseExtension(SPIRV::Extension::SPV_EXT_shader_atomic_float_add))
954       report_fatal_error(ATOM_FLT_REQ_EXT_MSG("_add"), false);
955     Reqs.addExtension(SPIRV::Extension::SPV_EXT_shader_atomic_float_add);
956     switch (BitWidth) {
957     case 16:
958       if (!ST.canUseExtension(
959               SPIRV::Extension::SPV_EXT_shader_atomic_float16_add))
960         report_fatal_error(ATOM_FLT_REQ_EXT_MSG("16_add"), false);
961       Reqs.addExtension(SPIRV::Extension::SPV_EXT_shader_atomic_float16_add);
962       Reqs.addCapability(SPIRV::Capability::AtomicFloat16AddEXT);
963       break;
964     case 32:
965       Reqs.addCapability(SPIRV::Capability::AtomicFloat32AddEXT);
966       break;
967     case 64:
968       Reqs.addCapability(SPIRV::Capability::AtomicFloat64AddEXT);
969       break;
970     default:
971       report_fatal_error(
972           "Unexpected floating-point type width in atomic float instruction");
973     }
974   } else {
975     if (!ST.canUseExtension(
976             SPIRV::Extension::SPV_EXT_shader_atomic_float_min_max))
977       report_fatal_error(ATOM_FLT_REQ_EXT_MSG("_min_max"), false);
978     Reqs.addExtension(SPIRV::Extension::SPV_EXT_shader_atomic_float_min_max);
979     switch (BitWidth) {
980     case 16:
981       Reqs.addCapability(SPIRV::Capability::AtomicFloat16MinMaxEXT);
982       break;
983     case 32:
984       Reqs.addCapability(SPIRV::Capability::AtomicFloat32MinMaxEXT);
985       break;
986     case 64:
987       Reqs.addCapability(SPIRV::Capability::AtomicFloat64MinMaxEXT);
988       break;
989     default:
990       report_fatal_error(
991           "Unexpected floating-point type width in atomic float instruction");
992     }
993   }
994 }
995 
996 bool isUniformTexelBuffer(MachineInstr *ImageInst) {
997   if (ImageInst->getOpcode() != SPIRV::OpTypeImage)
998     return false;
999   uint32_t Dim = ImageInst->getOperand(2).getImm();
1000   uint32_t Sampled = ImageInst->getOperand(6).getImm();
1001   return Dim == SPIRV::Dim::DIM_Buffer && Sampled == 1;
1002 }
1003 
1004 bool isStorageTexelBuffer(MachineInstr *ImageInst) {
1005   if (ImageInst->getOpcode() != SPIRV::OpTypeImage)
1006     return false;
1007   uint32_t Dim = ImageInst->getOperand(2).getImm();
1008   uint32_t Sampled = ImageInst->getOperand(6).getImm();
1009   return Dim == SPIRV::Dim::DIM_Buffer && Sampled == 2;
1010 }
1011 
1012 bool isSampledImage(MachineInstr *ImageInst) {
1013   if (ImageInst->getOpcode() != SPIRV::OpTypeImage)
1014     return false;
1015   uint32_t Dim = ImageInst->getOperand(2).getImm();
1016   uint32_t Sampled = ImageInst->getOperand(6).getImm();
1017   return Dim != SPIRV::Dim::DIM_Buffer && Sampled == 1;
1018 }
1019 
1020 bool isInputAttachment(MachineInstr *ImageInst) {
1021   if (ImageInst->getOpcode() != SPIRV::OpTypeImage)
1022     return false;
1023   uint32_t Dim = ImageInst->getOperand(2).getImm();
1024   uint32_t Sampled = ImageInst->getOperand(6).getImm();
1025   return Dim == SPIRV::Dim::DIM_SubpassData && Sampled == 2;
1026 }
1027 
1028 bool isStorageImage(MachineInstr *ImageInst) {
1029   if (ImageInst->getOpcode() != SPIRV::OpTypeImage)
1030     return false;
1031   uint32_t Dim = ImageInst->getOperand(2).getImm();
1032   uint32_t Sampled = ImageInst->getOperand(6).getImm();
1033   return Dim != SPIRV::Dim::DIM_Buffer && Sampled == 2;
1034 }
1035 
1036 bool isCombinedImageSampler(MachineInstr *SampledImageInst) {
1037   if (SampledImageInst->getOpcode() != SPIRV::OpTypeSampledImage)
1038     return false;
1039 
1040   const MachineRegisterInfo &MRI = SampledImageInst->getMF()->getRegInfo();
1041   Register ImageReg = SampledImageInst->getOperand(1).getReg();
1042   auto *ImageInst = MRI.getUniqueVRegDef(ImageReg);
1043   return isSampledImage(ImageInst);
1044 }
1045 
1046 bool hasNonUniformDecoration(Register Reg, const MachineRegisterInfo &MRI) {
1047   for (const auto &MI : MRI.reg_instructions(Reg)) {
1048     if (MI.getOpcode() != SPIRV::OpDecorate)
1049       continue;
1050 
1051     uint32_t Dec = MI.getOperand(1).getImm();
1052     if (Dec == SPIRV::Decoration::NonUniformEXT)
1053       return true;
1054   }
1055   return false;
1056 }
1057 
1058 void addOpAccessChainReqs(const MachineInstr &Instr,
1059                           SPIRV::RequirementHandler &Handler,
1060                           const SPIRVSubtarget &Subtarget) {
1061   const MachineRegisterInfo &MRI = Instr.getMF()->getRegInfo();
1062   // Get the result type. If it is an image type, then the shader uses
1063   // descriptor indexing. The appropriate capabilities will be added based
1064   // on the specifics of the image.
1065   Register ResTypeReg = Instr.getOperand(1).getReg();
1066   MachineInstr *ResTypeInst = MRI.getUniqueVRegDef(ResTypeReg);
1067 
1068   assert(ResTypeInst->getOpcode() == SPIRV::OpTypePointer);
1069   uint32_t StorageClass = ResTypeInst->getOperand(1).getImm();
1070   if (StorageClass != SPIRV::StorageClass::StorageClass::UniformConstant &&
1071       StorageClass != SPIRV::StorageClass::StorageClass::Uniform &&
1072       StorageClass != SPIRV::StorageClass::StorageClass::StorageBuffer) {
1073     return;
1074   }
1075 
1076   Register PointeeTypeReg = ResTypeInst->getOperand(2).getReg();
1077   MachineInstr *PointeeType = MRI.getUniqueVRegDef(PointeeTypeReg);
1078   if (PointeeType->getOpcode() != SPIRV::OpTypeImage &&
1079       PointeeType->getOpcode() != SPIRV::OpTypeSampledImage &&
1080       PointeeType->getOpcode() != SPIRV::OpTypeSampler) {
1081     return;
1082   }
1083 
1084   bool IsNonUniform =
1085       hasNonUniformDecoration(Instr.getOperand(0).getReg(), MRI);
1086   if (isUniformTexelBuffer(PointeeType)) {
1087     if (IsNonUniform)
1088       Handler.addRequirements(
1089           SPIRV::Capability::UniformTexelBufferArrayNonUniformIndexingEXT);
1090     else
1091       Handler.addRequirements(
1092           SPIRV::Capability::UniformTexelBufferArrayDynamicIndexingEXT);
1093   } else if (isInputAttachment(PointeeType)) {
1094     if (IsNonUniform)
1095       Handler.addRequirements(
1096           SPIRV::Capability::InputAttachmentArrayNonUniformIndexingEXT);
1097     else
1098       Handler.addRequirements(
1099           SPIRV::Capability::InputAttachmentArrayDynamicIndexingEXT);
1100   } else if (isStorageTexelBuffer(PointeeType)) {
1101     if (IsNonUniform)
1102       Handler.addRequirements(
1103           SPIRV::Capability::StorageTexelBufferArrayNonUniformIndexingEXT);
1104     else
1105       Handler.addRequirements(
1106           SPIRV::Capability::StorageTexelBufferArrayDynamicIndexingEXT);
1107   } else if (isSampledImage(PointeeType) ||
1108              isCombinedImageSampler(PointeeType) ||
1109              PointeeType->getOpcode() == SPIRV::OpTypeSampler) {
1110     if (IsNonUniform)
1111       Handler.addRequirements(
1112           SPIRV::Capability::SampledImageArrayNonUniformIndexingEXT);
1113     else
1114       Handler.addRequirements(
1115           SPIRV::Capability::SampledImageArrayDynamicIndexing);
1116   } else if (isStorageImage(PointeeType)) {
1117     if (IsNonUniform)
1118       Handler.addRequirements(
1119           SPIRV::Capability::StorageImageArrayNonUniformIndexingEXT);
1120     else
1121       Handler.addRequirements(
1122           SPIRV::Capability::StorageImageArrayDynamicIndexing);
1123   }
1124 }
1125 
1126 static bool isImageTypeWithUnknownFormat(SPIRVType *TypeInst) {
1127   if (TypeInst->getOpcode() != SPIRV::OpTypeImage)
1128     return false;
1129   assert(TypeInst->getOperand(7).isImm() && "The image format must be an imm.");
1130   return TypeInst->getOperand(7).getImm() == 0;
1131 }
1132 
1133 static void AddDotProductRequirements(const MachineInstr &MI,
1134                                       SPIRV::RequirementHandler &Reqs,
1135                                       const SPIRVSubtarget &ST) {
1136   if (ST.canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product))
1137     Reqs.addExtension(SPIRV::Extension::SPV_KHR_integer_dot_product);
1138   Reqs.addCapability(SPIRV::Capability::DotProduct);
1139 
1140   const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
1141   assert(MI.getOperand(2).isReg() && "Unexpected operand in dot");
1142   // We do not consider what the previous instruction is. This is just used
1143   // to get the input register and to check the type.
1144   const MachineInstr *Input = MRI.getVRegDef(MI.getOperand(2).getReg());
1145   assert(Input->getOperand(1).isReg() && "Unexpected operand in dot input");
1146   Register InputReg = Input->getOperand(1).getReg();
1147 
1148   SPIRVType *TypeDef = MRI.getVRegDef(InputReg);
1149   if (TypeDef->getOpcode() == SPIRV::OpTypeInt) {
1150     assert(TypeDef->getOperand(1).getImm() == 32);
1151     Reqs.addCapability(SPIRV::Capability::DotProductInput4x8BitPacked);
1152   } else if (TypeDef->getOpcode() == SPIRV::OpTypeVector) {
1153     SPIRVType *ScalarTypeDef = MRI.getVRegDef(TypeDef->getOperand(1).getReg());
1154     assert(ScalarTypeDef->getOpcode() == SPIRV::OpTypeInt);
1155     if (ScalarTypeDef->getOperand(1).getImm() == 8) {
1156       assert(TypeDef->getOperand(2).getImm() == 4 &&
1157              "Dot operand of 8-bit integer type requires 4 components");
1158       Reqs.addCapability(SPIRV::Capability::DotProductInput4x8Bit);
1159     } else {
1160       Reqs.addCapability(SPIRV::Capability::DotProductInputAll);
1161     }
1162   }
1163 }
1164 
1165 void addInstrRequirements(const MachineInstr &MI,
1166                           SPIRV::RequirementHandler &Reqs,
1167                           const SPIRVSubtarget &ST) {
1168   switch (MI.getOpcode()) {
1169   case SPIRV::OpMemoryModel: {
1170     int64_t Addr = MI.getOperand(0).getImm();
1171     Reqs.getAndAddRequirements(SPIRV::OperandCategory::AddressingModelOperand,
1172                                Addr, ST);
1173     int64_t Mem = MI.getOperand(1).getImm();
1174     Reqs.getAndAddRequirements(SPIRV::OperandCategory::MemoryModelOperand, Mem,
1175                                ST);
1176     break;
1177   }
1178   case SPIRV::OpEntryPoint: {
1179     int64_t Exe = MI.getOperand(0).getImm();
1180     Reqs.getAndAddRequirements(SPIRV::OperandCategory::ExecutionModelOperand,
1181                                Exe, ST);
1182     break;
1183   }
1184   case SPIRV::OpExecutionMode:
1185   case SPIRV::OpExecutionModeId: {
1186     int64_t Exe = MI.getOperand(1).getImm();
1187     Reqs.getAndAddRequirements(SPIRV::OperandCategory::ExecutionModeOperand,
1188                                Exe, ST);
1189     break;
1190   }
1191   case SPIRV::OpTypeMatrix:
1192     Reqs.addCapability(SPIRV::Capability::Matrix);
1193     break;
1194   case SPIRV::OpTypeInt: {
1195     unsigned BitWidth = MI.getOperand(1).getImm();
1196     if (BitWidth == 64)
1197       Reqs.addCapability(SPIRV::Capability::Int64);
1198     else if (BitWidth == 16)
1199       Reqs.addCapability(SPIRV::Capability::Int16);
1200     else if (BitWidth == 8)
1201       Reqs.addCapability(SPIRV::Capability::Int8);
1202     break;
1203   }
1204   case SPIRV::OpTypeFloat: {
1205     unsigned BitWidth = MI.getOperand(1).getImm();
1206     if (BitWidth == 64)
1207       Reqs.addCapability(SPIRV::Capability::Float64);
1208     else if (BitWidth == 16)
1209       Reqs.addCapability(SPIRV::Capability::Float16);
1210     break;
1211   }
1212   case SPIRV::OpTypeVector: {
1213     unsigned NumComponents = MI.getOperand(2).getImm();
1214     if (NumComponents == 8 || NumComponents == 16)
1215       Reqs.addCapability(SPIRV::Capability::Vector16);
1216     break;
1217   }
1218   case SPIRV::OpTypePointer: {
1219     auto SC = MI.getOperand(1).getImm();
1220     Reqs.getAndAddRequirements(SPIRV::OperandCategory::StorageClassOperand, SC,
1221                                ST);
1222     // If it's a type of pointer to float16 targeting OpenCL, add Float16Buffer
1223     // capability.
1224     if (!ST.isOpenCLEnv())
1225       break;
1226     assert(MI.getOperand(2).isReg());
1227     const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
1228     SPIRVType *TypeDef = MRI.getVRegDef(MI.getOperand(2).getReg());
1229     if (TypeDef->getOpcode() == SPIRV::OpTypeFloat &&
1230         TypeDef->getOperand(1).getImm() == 16)
1231       Reqs.addCapability(SPIRV::Capability::Float16Buffer);
1232     break;
1233   }
1234   case SPIRV::OpExtInst: {
1235     if (MI.getOperand(2).getImm() ==
1236         static_cast<int64_t>(
1237             SPIRV::InstructionSet::NonSemantic_Shader_DebugInfo_100)) {
1238       Reqs.addExtension(SPIRV::Extension::SPV_KHR_non_semantic_info);
1239     }
1240     break;
1241   }
1242   case SPIRV::OpBitReverse:
1243   case SPIRV::OpBitFieldInsert:
1244   case SPIRV::OpBitFieldSExtract:
1245   case SPIRV::OpBitFieldUExtract:
1246     if (!ST.canUseExtension(SPIRV::Extension::SPV_KHR_bit_instructions)) {
1247       Reqs.addCapability(SPIRV::Capability::Shader);
1248       break;
1249     }
1250     Reqs.addExtension(SPIRV::Extension::SPV_KHR_bit_instructions);
1251     Reqs.addCapability(SPIRV::Capability::BitInstructions);
1252     break;
1253   case SPIRV::OpTypeRuntimeArray:
1254     Reqs.addCapability(SPIRV::Capability::Shader);
1255     break;
1256   case SPIRV::OpTypeOpaque:
1257   case SPIRV::OpTypeEvent:
1258     Reqs.addCapability(SPIRV::Capability::Kernel);
1259     break;
1260   case SPIRV::OpTypePipe:
1261   case SPIRV::OpTypeReserveId:
1262     Reqs.addCapability(SPIRV::Capability::Pipes);
1263     break;
1264   case SPIRV::OpTypeDeviceEvent:
1265   case SPIRV::OpTypeQueue:
1266   case SPIRV::OpBuildNDRange:
1267     Reqs.addCapability(SPIRV::Capability::DeviceEnqueue);
1268     break;
1269   case SPIRV::OpDecorate:
1270   case SPIRV::OpDecorateId:
1271   case SPIRV::OpDecorateString:
1272     addOpDecorateReqs(MI, 1, Reqs, ST);
1273     break;
1274   case SPIRV::OpMemberDecorate:
1275   case SPIRV::OpMemberDecorateString:
1276     addOpDecorateReqs(MI, 2, Reqs, ST);
1277     break;
1278   case SPIRV::OpInBoundsPtrAccessChain:
1279     Reqs.addCapability(SPIRV::Capability::Addresses);
1280     break;
1281   case SPIRV::OpConstantSampler:
1282     Reqs.addCapability(SPIRV::Capability::LiteralSampler);
1283     break;
1284   case SPIRV::OpInBoundsAccessChain:
1285   case SPIRV::OpAccessChain:
1286     addOpAccessChainReqs(MI, Reqs, ST);
1287     break;
1288   case SPIRV::OpTypeImage:
1289     addOpTypeImageReqs(MI, Reqs, ST);
1290     break;
1291   case SPIRV::OpTypeSampler:
1292     if (!ST.isVulkanEnv()) {
1293       Reqs.addCapability(SPIRV::Capability::ImageBasic);
1294     }
1295     break;
1296   case SPIRV::OpTypeForwardPointer:
1297     // TODO: check if it's OpenCL's kernel.
1298     Reqs.addCapability(SPIRV::Capability::Addresses);
1299     break;
1300   case SPIRV::OpAtomicFlagTestAndSet:
1301   case SPIRV::OpAtomicLoad:
1302   case SPIRV::OpAtomicStore:
1303   case SPIRV::OpAtomicExchange:
1304   case SPIRV::OpAtomicCompareExchange:
1305   case SPIRV::OpAtomicIIncrement:
1306   case SPIRV::OpAtomicIDecrement:
1307   case SPIRV::OpAtomicIAdd:
1308   case SPIRV::OpAtomicISub:
1309   case SPIRV::OpAtomicUMin:
1310   case SPIRV::OpAtomicUMax:
1311   case SPIRV::OpAtomicSMin:
1312   case SPIRV::OpAtomicSMax:
1313   case SPIRV::OpAtomicAnd:
1314   case SPIRV::OpAtomicOr:
1315   case SPIRV::OpAtomicXor: {
1316     const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
1317     const MachineInstr *InstrPtr = &MI;
1318     if (MI.getOpcode() == SPIRV::OpAtomicStore) {
1319       assert(MI.getOperand(3).isReg());
1320       InstrPtr = MRI.getVRegDef(MI.getOperand(3).getReg());
1321       assert(InstrPtr && "Unexpected type instruction for OpAtomicStore");
1322     }
1323     assert(InstrPtr->getOperand(1).isReg() && "Unexpected operand in atomic");
1324     Register TypeReg = InstrPtr->getOperand(1).getReg();
1325     SPIRVType *TypeDef = MRI.getVRegDef(TypeReg);
1326     if (TypeDef->getOpcode() == SPIRV::OpTypeInt) {
1327       unsigned BitWidth = TypeDef->getOperand(1).getImm();
1328       if (BitWidth == 64)
1329         Reqs.addCapability(SPIRV::Capability::Int64Atomics);
1330     }
1331     break;
1332   }
1333   case SPIRV::OpGroupNonUniformIAdd:
1334   case SPIRV::OpGroupNonUniformFAdd:
1335   case SPIRV::OpGroupNonUniformIMul:
1336   case SPIRV::OpGroupNonUniformFMul:
1337   case SPIRV::OpGroupNonUniformSMin:
1338   case SPIRV::OpGroupNonUniformUMin:
1339   case SPIRV::OpGroupNonUniformFMin:
1340   case SPIRV::OpGroupNonUniformSMax:
1341   case SPIRV::OpGroupNonUniformUMax:
1342   case SPIRV::OpGroupNonUniformFMax:
1343   case SPIRV::OpGroupNonUniformBitwiseAnd:
1344   case SPIRV::OpGroupNonUniformBitwiseOr:
1345   case SPIRV::OpGroupNonUniformBitwiseXor:
1346   case SPIRV::OpGroupNonUniformLogicalAnd:
1347   case SPIRV::OpGroupNonUniformLogicalOr:
1348   case SPIRV::OpGroupNonUniformLogicalXor: {
1349     assert(MI.getOperand(3).isImm());
1350     int64_t GroupOp = MI.getOperand(3).getImm();
1351     switch (GroupOp) {
1352     case SPIRV::GroupOperation::Reduce:
1353     case SPIRV::GroupOperation::InclusiveScan:
1354     case SPIRV::GroupOperation::ExclusiveScan:
1355       Reqs.addCapability(SPIRV::Capability::GroupNonUniformArithmetic);
1356       break;
1357     case SPIRV::GroupOperation::ClusteredReduce:
1358       Reqs.addCapability(SPIRV::Capability::GroupNonUniformClustered);
1359       break;
1360     case SPIRV::GroupOperation::PartitionedReduceNV:
1361     case SPIRV::GroupOperation::PartitionedInclusiveScanNV:
1362     case SPIRV::GroupOperation::PartitionedExclusiveScanNV:
1363       Reqs.addCapability(SPIRV::Capability::GroupNonUniformPartitionedNV);
1364       break;
1365     }
1366     break;
1367   }
1368   case SPIRV::OpGroupNonUniformShuffle:
1369   case SPIRV::OpGroupNonUniformShuffleXor:
1370     Reqs.addCapability(SPIRV::Capability::GroupNonUniformShuffle);
1371     break;
1372   case SPIRV::OpGroupNonUniformShuffleUp:
1373   case SPIRV::OpGroupNonUniformShuffleDown:
1374     Reqs.addCapability(SPIRV::Capability::GroupNonUniformShuffleRelative);
1375     break;
1376   case SPIRV::OpGroupAll:
1377   case SPIRV::OpGroupAny:
1378   case SPIRV::OpGroupBroadcast:
1379   case SPIRV::OpGroupIAdd:
1380   case SPIRV::OpGroupFAdd:
1381   case SPIRV::OpGroupFMin:
1382   case SPIRV::OpGroupUMin:
1383   case SPIRV::OpGroupSMin:
1384   case SPIRV::OpGroupFMax:
1385   case SPIRV::OpGroupUMax:
1386   case SPIRV::OpGroupSMax:
1387     Reqs.addCapability(SPIRV::Capability::Groups);
1388     break;
1389   case SPIRV::OpGroupNonUniformElect:
1390     Reqs.addCapability(SPIRV::Capability::GroupNonUniform);
1391     break;
1392   case SPIRV::OpGroupNonUniformAll:
1393   case SPIRV::OpGroupNonUniformAny:
1394   case SPIRV::OpGroupNonUniformAllEqual:
1395     Reqs.addCapability(SPIRV::Capability::GroupNonUniformVote);
1396     break;
1397   case SPIRV::OpGroupNonUniformBroadcast:
1398   case SPIRV::OpGroupNonUniformBroadcastFirst:
1399   case SPIRV::OpGroupNonUniformBallot:
1400   case SPIRV::OpGroupNonUniformInverseBallot:
1401   case SPIRV::OpGroupNonUniformBallotBitExtract:
1402   case SPIRV::OpGroupNonUniformBallotBitCount:
1403   case SPIRV::OpGroupNonUniformBallotFindLSB:
1404   case SPIRV::OpGroupNonUniformBallotFindMSB:
1405     Reqs.addCapability(SPIRV::Capability::GroupNonUniformBallot);
1406     break;
1407   case SPIRV::OpSubgroupShuffleINTEL:
1408   case SPIRV::OpSubgroupShuffleDownINTEL:
1409   case SPIRV::OpSubgroupShuffleUpINTEL:
1410   case SPIRV::OpSubgroupShuffleXorINTEL:
1411     if (ST.canUseExtension(SPIRV::Extension::SPV_INTEL_subgroups)) {
1412       Reqs.addExtension(SPIRV::Extension::SPV_INTEL_subgroups);
1413       Reqs.addCapability(SPIRV::Capability::SubgroupShuffleINTEL);
1414     }
1415     break;
1416   case SPIRV::OpSubgroupBlockReadINTEL:
1417   case SPIRV::OpSubgroupBlockWriteINTEL:
1418     if (ST.canUseExtension(SPIRV::Extension::SPV_INTEL_subgroups)) {
1419       Reqs.addExtension(SPIRV::Extension::SPV_INTEL_subgroups);
1420       Reqs.addCapability(SPIRV::Capability::SubgroupBufferBlockIOINTEL);
1421     }
1422     break;
1423   case SPIRV::OpSubgroupImageBlockReadINTEL:
1424   case SPIRV::OpSubgroupImageBlockWriteINTEL:
1425     if (ST.canUseExtension(SPIRV::Extension::SPV_INTEL_subgroups)) {
1426       Reqs.addExtension(SPIRV::Extension::SPV_INTEL_subgroups);
1427       Reqs.addCapability(SPIRV::Capability::SubgroupImageBlockIOINTEL);
1428     }
1429     break;
1430   case SPIRV::OpSubgroupImageMediaBlockReadINTEL:
1431   case SPIRV::OpSubgroupImageMediaBlockWriteINTEL:
1432     if (ST.canUseExtension(SPIRV::Extension::SPV_INTEL_media_block_io)) {
1433       Reqs.addExtension(SPIRV::Extension::SPV_INTEL_media_block_io);
1434       Reqs.addCapability(SPIRV::Capability::SubgroupImageMediaBlockIOINTEL);
1435     }
1436     break;
1437   case SPIRV::OpAssumeTrueKHR:
1438   case SPIRV::OpExpectKHR:
1439     if (ST.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume)) {
1440       Reqs.addExtension(SPIRV::Extension::SPV_KHR_expect_assume);
1441       Reqs.addCapability(SPIRV::Capability::ExpectAssumeKHR);
1442     }
1443     break;
1444   case SPIRV::OpPtrCastToCrossWorkgroupINTEL:
1445   case SPIRV::OpCrossWorkgroupCastToPtrINTEL:
1446     if (ST.canUseExtension(SPIRV::Extension::SPV_INTEL_usm_storage_classes)) {
1447       Reqs.addExtension(SPIRV::Extension::SPV_INTEL_usm_storage_classes);
1448       Reqs.addCapability(SPIRV::Capability::USMStorageClassesINTEL);
1449     }
1450     break;
1451   case SPIRV::OpConstantFunctionPointerINTEL:
1452     if (ST.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers)) {
1453       Reqs.addExtension(SPIRV::Extension::SPV_INTEL_function_pointers);
1454       Reqs.addCapability(SPIRV::Capability::FunctionPointersINTEL);
1455     }
1456     break;
1457   case SPIRV::OpGroupNonUniformRotateKHR:
1458     if (!ST.canUseExtension(SPIRV::Extension::SPV_KHR_subgroup_rotate))
1459       report_fatal_error("OpGroupNonUniformRotateKHR instruction requires the "
1460                          "following SPIR-V extension: SPV_KHR_subgroup_rotate",
1461                          false);
1462     Reqs.addExtension(SPIRV::Extension::SPV_KHR_subgroup_rotate);
1463     Reqs.addCapability(SPIRV::Capability::GroupNonUniformRotateKHR);
1464     Reqs.addCapability(SPIRV::Capability::GroupNonUniform);
1465     break;
1466   case SPIRV::OpGroupIMulKHR:
1467   case SPIRV::OpGroupFMulKHR:
1468   case SPIRV::OpGroupBitwiseAndKHR:
1469   case SPIRV::OpGroupBitwiseOrKHR:
1470   case SPIRV::OpGroupBitwiseXorKHR:
1471   case SPIRV::OpGroupLogicalAndKHR:
1472   case SPIRV::OpGroupLogicalOrKHR:
1473   case SPIRV::OpGroupLogicalXorKHR:
1474     if (ST.canUseExtension(
1475             SPIRV::Extension::SPV_KHR_uniform_group_instructions)) {
1476       Reqs.addExtension(SPIRV::Extension::SPV_KHR_uniform_group_instructions);
1477       Reqs.addCapability(SPIRV::Capability::GroupUniformArithmeticKHR);
1478     }
1479     break;
1480   case SPIRV::OpReadClockKHR:
1481     if (!ST.canUseExtension(SPIRV::Extension::SPV_KHR_shader_clock))
1482       report_fatal_error("OpReadClockKHR instruction requires the "
1483                          "following SPIR-V extension: SPV_KHR_shader_clock",
1484                          false);
1485     Reqs.addExtension(SPIRV::Extension::SPV_KHR_shader_clock);
1486     Reqs.addCapability(SPIRV::Capability::ShaderClockKHR);
1487     break;
1488   case SPIRV::OpFunctionPointerCallINTEL:
1489     if (ST.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers)) {
1490       Reqs.addExtension(SPIRV::Extension::SPV_INTEL_function_pointers);
1491       Reqs.addCapability(SPIRV::Capability::FunctionPointersINTEL);
1492     }
1493     break;
1494   case SPIRV::OpAtomicFAddEXT:
1495   case SPIRV::OpAtomicFMinEXT:
1496   case SPIRV::OpAtomicFMaxEXT:
1497     AddAtomicFloatRequirements(MI, Reqs, ST);
1498     break;
1499   case SPIRV::OpConvertBF16ToFINTEL:
1500   case SPIRV::OpConvertFToBF16INTEL:
1501     if (ST.canUseExtension(SPIRV::Extension::SPV_INTEL_bfloat16_conversion)) {
1502       Reqs.addExtension(SPIRV::Extension::SPV_INTEL_bfloat16_conversion);
1503       Reqs.addCapability(SPIRV::Capability::BFloat16ConversionINTEL);
1504     }
1505     break;
1506   case SPIRV::OpVariableLengthArrayINTEL:
1507   case SPIRV::OpSaveMemoryINTEL:
1508   case SPIRV::OpRestoreMemoryINTEL:
1509     if (ST.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array)) {
1510       Reqs.addExtension(SPIRV::Extension::SPV_INTEL_variable_length_array);
1511       Reqs.addCapability(SPIRV::Capability::VariableLengthArrayINTEL);
1512     }
1513     break;
1514   case SPIRV::OpAsmTargetINTEL:
1515   case SPIRV::OpAsmINTEL:
1516   case SPIRV::OpAsmCallINTEL:
1517     if (ST.canUseExtension(SPIRV::Extension::SPV_INTEL_inline_assembly)) {
1518       Reqs.addExtension(SPIRV::Extension::SPV_INTEL_inline_assembly);
1519       Reqs.addCapability(SPIRV::Capability::AsmINTEL);
1520     }
1521     break;
1522   case SPIRV::OpTypeCooperativeMatrixKHR:
1523     if (!ST.canUseExtension(SPIRV::Extension::SPV_KHR_cooperative_matrix))
1524       report_fatal_error(
1525           "OpTypeCooperativeMatrixKHR type requires the "
1526           "following SPIR-V extension: SPV_KHR_cooperative_matrix",
1527           false);
1528     Reqs.addExtension(SPIRV::Extension::SPV_KHR_cooperative_matrix);
1529     Reqs.addCapability(SPIRV::Capability::CooperativeMatrixKHR);
1530     break;
1531   case SPIRV::OpArithmeticFenceEXT:
1532     if (!ST.canUseExtension(SPIRV::Extension::SPV_EXT_arithmetic_fence))
1533       report_fatal_error("OpArithmeticFenceEXT requires the "
1534                          "following SPIR-V extension: SPV_EXT_arithmetic_fence",
1535                          false);
1536     Reqs.addExtension(SPIRV::Extension::SPV_EXT_arithmetic_fence);
1537     Reqs.addCapability(SPIRV::Capability::ArithmeticFenceEXT);
1538     break;
1539   case SPIRV::OpControlBarrierArriveINTEL:
1540   case SPIRV::OpControlBarrierWaitINTEL:
1541     if (ST.canUseExtension(SPIRV::Extension::SPV_INTEL_split_barrier)) {
1542       Reqs.addExtension(SPIRV::Extension::SPV_INTEL_split_barrier);
1543       Reqs.addCapability(SPIRV::Capability::SplitBarrierINTEL);
1544     }
1545     break;
1546   case SPIRV::OpCooperativeMatrixMulAddKHR: {
1547     if (!ST.canUseExtension(SPIRV::Extension::SPV_KHR_cooperative_matrix))
1548       report_fatal_error("Cooperative matrix instructions require the "
1549                          "following SPIR-V extension: "
1550                          "SPV_KHR_cooperative_matrix",
1551                          false);
1552     Reqs.addExtension(SPIRV::Extension::SPV_KHR_cooperative_matrix);
1553     Reqs.addCapability(SPIRV::Capability::CooperativeMatrixKHR);
1554     constexpr unsigned MulAddMaxSize = 6;
1555     if (MI.getNumOperands() != MulAddMaxSize)
1556       break;
1557     const int64_t CoopOperands = MI.getOperand(MulAddMaxSize - 1).getImm();
1558     if (CoopOperands &
1559         SPIRV::CooperativeMatrixOperands::MatrixAAndBTF32ComponentsINTEL) {
1560       if (!ST.canUseExtension(SPIRV::Extension::SPV_INTEL_joint_matrix))
1561         report_fatal_error("MatrixAAndBTF32ComponentsINTEL type interpretation "
1562                            "require the following SPIR-V extension: "
1563                            "SPV_INTEL_joint_matrix",
1564                            false);
1565       Reqs.addExtension(SPIRV::Extension::SPV_INTEL_joint_matrix);
1566       Reqs.addCapability(
1567           SPIRV::Capability::CooperativeMatrixTF32ComponentTypeINTEL);
1568     }
1569     if (CoopOperands & SPIRV::CooperativeMatrixOperands::
1570                            MatrixAAndBBFloat16ComponentsINTEL ||
1571         CoopOperands &
1572             SPIRV::CooperativeMatrixOperands::MatrixCBFloat16ComponentsINTEL ||
1573         CoopOperands & SPIRV::CooperativeMatrixOperands::
1574                            MatrixResultBFloat16ComponentsINTEL) {
1575       if (!ST.canUseExtension(SPIRV::Extension::SPV_INTEL_joint_matrix))
1576         report_fatal_error("***BF16ComponentsINTEL type interpretations "
1577                            "require the following SPIR-V extension: "
1578                            "SPV_INTEL_joint_matrix",
1579                            false);
1580       Reqs.addExtension(SPIRV::Extension::SPV_INTEL_joint_matrix);
1581       Reqs.addCapability(
1582           SPIRV::Capability::CooperativeMatrixBFloat16ComponentTypeINTEL);
1583     }
1584     break;
1585   }
1586   case SPIRV::OpCooperativeMatrixLoadKHR:
1587   case SPIRV::OpCooperativeMatrixStoreKHR:
1588   case SPIRV::OpCooperativeMatrixLoadCheckedINTEL:
1589   case SPIRV::OpCooperativeMatrixStoreCheckedINTEL:
1590   case SPIRV::OpCooperativeMatrixPrefetchINTEL: {
1591     if (!ST.canUseExtension(SPIRV::Extension::SPV_KHR_cooperative_matrix))
1592       report_fatal_error("Cooperative matrix instructions require the "
1593                          "following SPIR-V extension: "
1594                          "SPV_KHR_cooperative_matrix",
1595                          false);
1596     Reqs.addExtension(SPIRV::Extension::SPV_KHR_cooperative_matrix);
1597     Reqs.addCapability(SPIRV::Capability::CooperativeMatrixKHR);
1598 
1599     // Check Layout operand in case if it's not a standard one and add the
1600     // appropriate capability.
1601     std::unordered_map<unsigned, unsigned> LayoutToInstMap = {
1602         {SPIRV::OpCooperativeMatrixLoadKHR, 3},
1603         {SPIRV::OpCooperativeMatrixStoreKHR, 2},
1604         {SPIRV::OpCooperativeMatrixLoadCheckedINTEL, 5},
1605         {SPIRV::OpCooperativeMatrixStoreCheckedINTEL, 4},
1606         {SPIRV::OpCooperativeMatrixPrefetchINTEL, 4}};
1607 
1608     const auto OpCode = MI.getOpcode();
1609     const unsigned LayoutNum = LayoutToInstMap[OpCode];
1610     Register RegLayout = MI.getOperand(LayoutNum).getReg();
1611     const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
1612     MachineInstr *MILayout = MRI.getUniqueVRegDef(RegLayout);
1613     if (MILayout->getOpcode() == SPIRV::OpConstantI) {
1614       const unsigned LayoutVal = MILayout->getOperand(2).getImm();
1615       if (LayoutVal ==
1616           static_cast<unsigned>(SPIRV::CooperativeMatrixLayout::PackedINTEL)) {
1617         if (!ST.canUseExtension(SPIRV::Extension::SPV_INTEL_joint_matrix))
1618           report_fatal_error("PackedINTEL layout require the following SPIR-V "
1619                              "extension: SPV_INTEL_joint_matrix",
1620                              false);
1621         Reqs.addExtension(SPIRV::Extension::SPV_INTEL_joint_matrix);
1622         Reqs.addCapability(SPIRV::Capability::PackedCooperativeMatrixINTEL);
1623       }
1624     }
1625 
1626     // Nothing to do.
1627     if (OpCode == SPIRV::OpCooperativeMatrixLoadKHR ||
1628         OpCode == SPIRV::OpCooperativeMatrixStoreKHR)
1629       break;
1630 
1631     std::string InstName;
1632     switch (OpCode) {
1633     case SPIRV::OpCooperativeMatrixPrefetchINTEL:
1634       InstName = "OpCooperativeMatrixPrefetchINTEL";
1635       break;
1636     case SPIRV::OpCooperativeMatrixLoadCheckedINTEL:
1637       InstName = "OpCooperativeMatrixLoadCheckedINTEL";
1638       break;
1639     case SPIRV::OpCooperativeMatrixStoreCheckedINTEL:
1640       InstName = "OpCooperativeMatrixStoreCheckedINTEL";
1641       break;
1642     }
1643 
1644     if (!ST.canUseExtension(SPIRV::Extension::SPV_INTEL_joint_matrix)) {
1645       const std::string ErrorMsg =
1646           InstName + " instruction requires the "
1647                      "following SPIR-V extension: SPV_INTEL_joint_matrix";
1648       report_fatal_error(ErrorMsg.c_str(), false);
1649     }
1650     Reqs.addExtension(SPIRV::Extension::SPV_INTEL_joint_matrix);
1651     if (OpCode == SPIRV::OpCooperativeMatrixPrefetchINTEL) {
1652       Reqs.addCapability(SPIRV::Capability::CooperativeMatrixPrefetchINTEL);
1653       break;
1654     }
1655     Reqs.addCapability(
1656         SPIRV::Capability::CooperativeMatrixCheckedInstructionsINTEL);
1657     break;
1658   }
1659   case SPIRV::OpCooperativeMatrixConstructCheckedINTEL:
1660     if (!ST.canUseExtension(SPIRV::Extension::SPV_INTEL_joint_matrix))
1661       report_fatal_error("OpCooperativeMatrixConstructCheckedINTEL "
1662                          "instructions require the following SPIR-V extension: "
1663                          "SPV_INTEL_joint_matrix",
1664                          false);
1665     Reqs.addExtension(SPIRV::Extension::SPV_INTEL_joint_matrix);
1666     Reqs.addCapability(
1667         SPIRV::Capability::CooperativeMatrixCheckedInstructionsINTEL);
1668     break;
1669   case SPIRV::OpCooperativeMatrixGetElementCoordINTEL:
1670     if (!ST.canUseExtension(SPIRV::Extension::SPV_INTEL_joint_matrix))
1671       report_fatal_error("OpCooperativeMatrixGetElementCoordINTEL requires the "
1672                          "following SPIR-V extension: SPV_INTEL_joint_matrix",
1673                          false);
1674     Reqs.addExtension(SPIRV::Extension::SPV_INTEL_joint_matrix);
1675     Reqs.addCapability(
1676         SPIRV::Capability::CooperativeMatrixInvocationInstructionsINTEL);
1677     break;
1678   case SPIRV::OpKill: {
1679     Reqs.addCapability(SPIRV::Capability::Shader);
1680   } break;
1681   case SPIRV::OpDemoteToHelperInvocation:
1682     Reqs.addCapability(SPIRV::Capability::DemoteToHelperInvocation);
1683 
1684     if (ST.canUseExtension(
1685             SPIRV::Extension::SPV_EXT_demote_to_helper_invocation)) {
1686       if (!ST.isAtLeastSPIRVVer(llvm::VersionTuple(1, 6)))
1687         Reqs.addExtension(
1688             SPIRV::Extension::SPV_EXT_demote_to_helper_invocation);
1689     }
1690     break;
1691   case SPIRV::OpSDot:
1692   case SPIRV::OpUDot:
1693     AddDotProductRequirements(MI, Reqs, ST);
1694     break;
1695   case SPIRV::OpImageRead: {
1696     Register ImageReg = MI.getOperand(2).getReg();
1697     SPIRVType *TypeDef = ST.getSPIRVGlobalRegistry()->getResultType(
1698         ImageReg, const_cast<MachineFunction *>(MI.getMF()));
1699     if (isImageTypeWithUnknownFormat(TypeDef))
1700       Reqs.addCapability(SPIRV::Capability::StorageImageReadWithoutFormat);
1701     break;
1702   }
1703   case SPIRV::OpImageWrite: {
1704     Register ImageReg = MI.getOperand(0).getReg();
1705     SPIRVType *TypeDef = ST.getSPIRVGlobalRegistry()->getResultType(
1706         ImageReg, const_cast<MachineFunction *>(MI.getMF()));
1707     if (isImageTypeWithUnknownFormat(TypeDef))
1708       Reqs.addCapability(SPIRV::Capability::StorageImageWriteWithoutFormat);
1709     break;
1710   }
1711 
1712   default:
1713     break;
1714   }
1715 
1716   // If we require capability Shader, then we can remove the requirement for
1717   // the BitInstructions capability, since Shader is a superset capability
1718   // of BitInstructions.
1719   Reqs.removeCapabilityIf(SPIRV::Capability::BitInstructions,
1720                           SPIRV::Capability::Shader);
1721 }
1722 
1723 static void collectReqs(const Module &M, SPIRV::ModuleAnalysisInfo &MAI,
1724                         MachineModuleInfo *MMI, const SPIRVSubtarget &ST) {
1725   // Collect requirements for existing instructions.
1726   for (auto F = M.begin(), E = M.end(); F != E; ++F) {
1727     MachineFunction *MF = MMI->getMachineFunction(*F);
1728     if (!MF)
1729       continue;
1730     for (const MachineBasicBlock &MBB : *MF)
1731       for (const MachineInstr &MI : MBB)
1732         addInstrRequirements(MI, MAI.Reqs, ST);
1733   }
1734   // Collect requirements for OpExecutionMode instructions.
1735   auto Node = M.getNamedMetadata("spirv.ExecutionMode");
1736   if (Node) {
1737     bool RequireFloatControls = false, RequireFloatControls2 = false,
1738          VerLower14 = !ST.isAtLeastSPIRVVer(VersionTuple(1, 4));
1739     bool HasFloatControls2 =
1740         ST.canUseExtension(SPIRV::Extension::SPV_INTEL_float_controls2);
1741     for (unsigned i = 0; i < Node->getNumOperands(); i++) {
1742       MDNode *MDN = cast<MDNode>(Node->getOperand(i));
1743       const MDOperand &MDOp = MDN->getOperand(1);
1744       if (auto *CMeta = dyn_cast<ConstantAsMetadata>(MDOp)) {
1745         Constant *C = CMeta->getValue();
1746         if (ConstantInt *Const = dyn_cast<ConstantInt>(C)) {
1747           auto EM = Const->getZExtValue();
1748           // SPV_KHR_float_controls is not available until v1.4:
1749           // add SPV_KHR_float_controls if the version is too low
1750           switch (EM) {
1751           case SPIRV::ExecutionMode::DenormPreserve:
1752           case SPIRV::ExecutionMode::DenormFlushToZero:
1753           case SPIRV::ExecutionMode::SignedZeroInfNanPreserve:
1754           case SPIRV::ExecutionMode::RoundingModeRTE:
1755           case SPIRV::ExecutionMode::RoundingModeRTZ:
1756             RequireFloatControls = VerLower14;
1757             MAI.Reqs.getAndAddRequirements(
1758                 SPIRV::OperandCategory::ExecutionModeOperand, EM, ST);
1759             break;
1760           case SPIRV::ExecutionMode::RoundingModeRTPINTEL:
1761           case SPIRV::ExecutionMode::RoundingModeRTNINTEL:
1762           case SPIRV::ExecutionMode::FloatingPointModeALTINTEL:
1763           case SPIRV::ExecutionMode::FloatingPointModeIEEEINTEL:
1764             if (HasFloatControls2) {
1765               RequireFloatControls2 = true;
1766               MAI.Reqs.getAndAddRequirements(
1767                   SPIRV::OperandCategory::ExecutionModeOperand, EM, ST);
1768             }
1769             break;
1770           default:
1771             MAI.Reqs.getAndAddRequirements(
1772                 SPIRV::OperandCategory::ExecutionModeOperand, EM, ST);
1773           }
1774         }
1775       }
1776     }
1777     if (RequireFloatControls &&
1778         ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls))
1779       MAI.Reqs.addExtension(SPIRV::Extension::SPV_KHR_float_controls);
1780     if (RequireFloatControls2)
1781       MAI.Reqs.addExtension(SPIRV::Extension::SPV_INTEL_float_controls2);
1782   }
1783   for (auto FI = M.begin(), E = M.end(); FI != E; ++FI) {
1784     const Function &F = *FI;
1785     if (F.isDeclaration())
1786       continue;
1787     if (F.getMetadata("reqd_work_group_size"))
1788       MAI.Reqs.getAndAddRequirements(
1789           SPIRV::OperandCategory::ExecutionModeOperand,
1790           SPIRV::ExecutionMode::LocalSize, ST);
1791     if (F.getFnAttribute("hlsl.numthreads").isValid()) {
1792       MAI.Reqs.getAndAddRequirements(
1793           SPIRV::OperandCategory::ExecutionModeOperand,
1794           SPIRV::ExecutionMode::LocalSize, ST);
1795     }
1796     if (F.getMetadata("work_group_size_hint"))
1797       MAI.Reqs.getAndAddRequirements(
1798           SPIRV::OperandCategory::ExecutionModeOperand,
1799           SPIRV::ExecutionMode::LocalSizeHint, ST);
1800     if (F.getMetadata("intel_reqd_sub_group_size"))
1801       MAI.Reqs.getAndAddRequirements(
1802           SPIRV::OperandCategory::ExecutionModeOperand,
1803           SPIRV::ExecutionMode::SubgroupSize, ST);
1804     if (F.getMetadata("vec_type_hint"))
1805       MAI.Reqs.getAndAddRequirements(
1806           SPIRV::OperandCategory::ExecutionModeOperand,
1807           SPIRV::ExecutionMode::VecTypeHint, ST);
1808 
1809     if (F.hasOptNone()) {
1810       if (ST.canUseExtension(SPIRV::Extension::SPV_INTEL_optnone)) {
1811         MAI.Reqs.addExtension(SPIRV::Extension::SPV_INTEL_optnone);
1812         MAI.Reqs.addCapability(SPIRV::Capability::OptNoneINTEL);
1813       } else if (ST.canUseExtension(SPIRV::Extension::SPV_EXT_optnone)) {
1814         MAI.Reqs.addExtension(SPIRV::Extension::SPV_EXT_optnone);
1815         MAI.Reqs.addCapability(SPIRV::Capability::OptNoneEXT);
1816       }
1817     }
1818   }
1819 }
1820 
1821 static unsigned getFastMathFlags(const MachineInstr &I) {
1822   unsigned Flags = SPIRV::FPFastMathMode::None;
1823   if (I.getFlag(MachineInstr::MIFlag::FmNoNans))
1824     Flags |= SPIRV::FPFastMathMode::NotNaN;
1825   if (I.getFlag(MachineInstr::MIFlag::FmNoInfs))
1826     Flags |= SPIRV::FPFastMathMode::NotInf;
1827   if (I.getFlag(MachineInstr::MIFlag::FmNsz))
1828     Flags |= SPIRV::FPFastMathMode::NSZ;
1829   if (I.getFlag(MachineInstr::MIFlag::FmArcp))
1830     Flags |= SPIRV::FPFastMathMode::AllowRecip;
1831   if (I.getFlag(MachineInstr::MIFlag::FmReassoc))
1832     Flags |= SPIRV::FPFastMathMode::Fast;
1833   return Flags;
1834 }
1835 
1836 static void handleMIFlagDecoration(MachineInstr &I, const SPIRVSubtarget &ST,
1837                                    const SPIRVInstrInfo &TII,
1838                                    SPIRV::RequirementHandler &Reqs) {
1839   if (I.getFlag(MachineInstr::MIFlag::NoSWrap) && TII.canUseNSW(I) &&
1840       getSymbolicOperandRequirements(SPIRV::OperandCategory::DecorationOperand,
1841                                      SPIRV::Decoration::NoSignedWrap, ST, Reqs)
1842           .IsSatisfiable) {
1843     buildOpDecorate(I.getOperand(0).getReg(), I, TII,
1844                     SPIRV::Decoration::NoSignedWrap, {});
1845   }
1846   if (I.getFlag(MachineInstr::MIFlag::NoUWrap) && TII.canUseNUW(I) &&
1847       getSymbolicOperandRequirements(SPIRV::OperandCategory::DecorationOperand,
1848                                      SPIRV::Decoration::NoUnsignedWrap, ST,
1849                                      Reqs)
1850           .IsSatisfiable) {
1851     buildOpDecorate(I.getOperand(0).getReg(), I, TII,
1852                     SPIRV::Decoration::NoUnsignedWrap, {});
1853   }
1854   if (!TII.canUseFastMathFlags(I))
1855     return;
1856   unsigned FMFlags = getFastMathFlags(I);
1857   if (FMFlags == SPIRV::FPFastMathMode::None)
1858     return;
1859   Register DstReg = I.getOperand(0).getReg();
1860   buildOpDecorate(DstReg, I, TII, SPIRV::Decoration::FPFastMathMode, {FMFlags});
1861 }
1862 
1863 // Walk all functions and add decorations related to MI flags.
1864 static void addDecorations(const Module &M, const SPIRVInstrInfo &TII,
1865                            MachineModuleInfo *MMI, const SPIRVSubtarget &ST,
1866                            SPIRV::ModuleAnalysisInfo &MAI) {
1867   for (auto F = M.begin(), E = M.end(); F != E; ++F) {
1868     MachineFunction *MF = MMI->getMachineFunction(*F);
1869     if (!MF)
1870       continue;
1871     for (auto &MBB : *MF)
1872       for (auto &MI : MBB)
1873         handleMIFlagDecoration(MI, ST, TII, MAI.Reqs);
1874   }
1875 }
1876 
1877 static void addMBBNames(const Module &M, const SPIRVInstrInfo &TII,
1878                         MachineModuleInfo *MMI, const SPIRVSubtarget &ST,
1879                         SPIRV::ModuleAnalysisInfo &MAI) {
1880   for (auto F = M.begin(), E = M.end(); F != E; ++F) {
1881     MachineFunction *MF = MMI->getMachineFunction(*F);
1882     if (!MF)
1883       continue;
1884     MachineRegisterInfo &MRI = MF->getRegInfo();
1885     for (auto &MBB : *MF) {
1886       if (!MBB.hasName() || MBB.empty())
1887         continue;
1888       // Emit basic block names.
1889       Register Reg = MRI.createGenericVirtualRegister(LLT::scalar(64));
1890       MRI.setRegClass(Reg, &SPIRV::IDRegClass);
1891       buildOpName(Reg, MBB.getName(), *std::prev(MBB.end()), TII);
1892       Register GlobalReg = MAI.getOrCreateMBBRegister(MBB);
1893       MAI.setRegisterAlias(MF, Reg, GlobalReg);
1894     }
1895   }
1896 }
1897 
1898 // patching Instruction::PHI to SPIRV::OpPhi
1899 static void patchPhis(const Module &M, SPIRVGlobalRegistry *GR,
1900                       const SPIRVInstrInfo &TII, MachineModuleInfo *MMI) {
1901   for (auto F = M.begin(), E = M.end(); F != E; ++F) {
1902     MachineFunction *MF = MMI->getMachineFunction(*F);
1903     if (!MF)
1904       continue;
1905     for (auto &MBB : *MF) {
1906       for (MachineInstr &MI : MBB) {
1907         if (MI.getOpcode() != TargetOpcode::PHI)
1908           continue;
1909         MI.setDesc(TII.get(SPIRV::OpPhi));
1910         Register ResTypeReg = GR->getSPIRVTypeID(
1911             GR->getSPIRVTypeForVReg(MI.getOperand(0).getReg(), MF));
1912         MI.insert(MI.operands_begin() + 1,
1913                   {MachineOperand::CreateReg(ResTypeReg, false)});
1914       }
1915     }
1916   }
1917 }
1918 
1919 struct SPIRV::ModuleAnalysisInfo SPIRVModuleAnalysis::MAI;
1920 
1921 void SPIRVModuleAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
1922   AU.addRequired<TargetPassConfig>();
1923   AU.addRequired<MachineModuleInfoWrapperPass>();
1924 }
1925 
1926 bool SPIRVModuleAnalysis::runOnModule(Module &M) {
1927   SPIRVTargetMachine &TM =
1928       getAnalysis<TargetPassConfig>().getTM<SPIRVTargetMachine>();
1929   ST = TM.getSubtargetImpl();
1930   GR = ST->getSPIRVGlobalRegistry();
1931   TII = ST->getInstrInfo();
1932 
1933   MMI = &getAnalysis<MachineModuleInfoWrapperPass>().getMMI();
1934 
1935   setBaseInfo(M);
1936 
1937   patchPhis(M, GR, *TII, MMI);
1938 
1939   addMBBNames(M, *TII, MMI, *ST, MAI);
1940   addDecorations(M, *TII, MMI, *ST, MAI);
1941 
1942   collectReqs(M, MAI, MMI, *ST);
1943 
1944   // Process type/const/global var/func decl instructions, number their
1945   // destination registers from 0 to N, collect Extensions and Capabilities.
1946   collectDeclarations(M);
1947 
1948   // Number rest of registers from N+1 onwards.
1949   numberRegistersGlobally(M);
1950 
1951   // Collect OpName, OpEntryPoint, OpDecorate etc, process other instructions.
1952   processOtherInstrs(M);
1953 
1954   // If there are no entry points, we need the Linkage capability.
1955   if (MAI.MS[SPIRV::MB_EntryPoints].empty())
1956     MAI.Reqs.addCapability(SPIRV::Capability::Linkage);
1957 
1958   // Set maximum ID used.
1959   GR->setBound(MAI.MaxID);
1960 
1961   return false;
1962 }
1963