xref: /llvm-project/llvm/lib/Target/SPIRV/SPIRVUtils.cpp (revision 1ed65febd996eaa018164e880c87a9e9afc6f68d)
1 //===--- SPIRVUtils.cpp ---- SPIR-V Utility Functions -----------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains miscellaneous utility functions.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "SPIRVUtils.h"
14 #include "MCTargetDesc/SPIRVBaseInfo.h"
15 #include "SPIRV.h"
16 #include "SPIRVInstrInfo.h"
17 #include "SPIRVSubtarget.h"
18 #include "llvm/ADT/StringRef.h"
19 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
20 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
21 #include "llvm/CodeGen/MachineInstr.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/Demangle/Demangle.h"
24 #include "llvm/IR/IntrinsicsSPIRV.h"
25 #include <queue>
26 #include <vector>
27 
28 namespace llvm {
29 
30 // The following functions are used to add these string literals as a series of
31 // 32-bit integer operands with the correct format, and unpack them if necessary
32 // when making string comparisons in compiler passes.
33 // SPIR-V requires null-terminated UTF-8 strings padded to 32-bit alignment.
34 static uint32_t convertCharsToWord(const StringRef &Str, unsigned i) {
35   uint32_t Word = 0u; // Build up this 32-bit word from 4 8-bit chars.
36   for (unsigned WordIndex = 0; WordIndex < 4; ++WordIndex) {
37     unsigned StrIndex = i + WordIndex;
38     uint8_t CharToAdd = 0;       // Initilize char as padding/null.
39     if (StrIndex < Str.size()) { // If it's within the string, get a real char.
40       CharToAdd = Str[StrIndex];
41     }
42     Word |= (CharToAdd << (WordIndex * 8));
43   }
44   return Word;
45 }
46 
47 // Get length including padding and null terminator.
48 static size_t getPaddedLen(const StringRef &Str) {
49   const size_t Len = Str.size() + 1;
50   return (Len % 4 == 0) ? Len : Len + (4 - (Len % 4));
51 }
52 
53 void addStringImm(const StringRef &Str, MCInst &Inst) {
54   const size_t PaddedLen = getPaddedLen(Str);
55   for (unsigned i = 0; i < PaddedLen; i += 4) {
56     // Add an operand for the 32-bits of chars or padding.
57     Inst.addOperand(MCOperand::createImm(convertCharsToWord(Str, i)));
58   }
59 }
60 
61 void addStringImm(const StringRef &Str, MachineInstrBuilder &MIB) {
62   const size_t PaddedLen = getPaddedLen(Str);
63   for (unsigned i = 0; i < PaddedLen; i += 4) {
64     // Add an operand for the 32-bits of chars or padding.
65     MIB.addImm(convertCharsToWord(Str, i));
66   }
67 }
68 
69 void addStringImm(const StringRef &Str, IRBuilder<> &B,
70                   std::vector<Value *> &Args) {
71   const size_t PaddedLen = getPaddedLen(Str);
72   for (unsigned i = 0; i < PaddedLen; i += 4) {
73     // Add a vector element for the 32-bits of chars or padding.
74     Args.push_back(B.getInt32(convertCharsToWord(Str, i)));
75   }
76 }
77 
78 std::string getStringImm(const MachineInstr &MI, unsigned StartIndex) {
79   return getSPIRVStringOperand(MI, StartIndex);
80 }
81 
82 void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB) {
83   const auto Bitwidth = Imm.getBitWidth();
84   if (Bitwidth == 1)
85     return; // Already handled
86   else if (Bitwidth <= 32) {
87     MIB.addImm(Imm.getZExtValue());
88     // Asm Printer needs this info to print floating-type correctly
89     if (Bitwidth == 16)
90       MIB.getInstr()->setAsmPrinterFlag(SPIRV::ASM_PRINTER_WIDTH16);
91     return;
92   } else if (Bitwidth <= 64) {
93     uint64_t FullImm = Imm.getZExtValue();
94     uint32_t LowBits = FullImm & 0xffffffff;
95     uint32_t HighBits = (FullImm >> 32) & 0xffffffff;
96     MIB.addImm(LowBits).addImm(HighBits);
97     return;
98   }
99   report_fatal_error("Unsupported constant bitwidth");
100 }
101 
102 void buildOpName(Register Target, const StringRef &Name,
103                  MachineIRBuilder &MIRBuilder) {
104   if (!Name.empty()) {
105     auto MIB = MIRBuilder.buildInstr(SPIRV::OpName).addUse(Target);
106     addStringImm(Name, MIB);
107   }
108 }
109 
110 static void finishBuildOpDecorate(MachineInstrBuilder &MIB,
111                                   const std::vector<uint32_t> &DecArgs,
112                                   StringRef StrImm) {
113   if (!StrImm.empty())
114     addStringImm(StrImm, MIB);
115   for (const auto &DecArg : DecArgs)
116     MIB.addImm(DecArg);
117 }
118 
119 void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder,
120                      SPIRV::Decoration::Decoration Dec,
121                      const std::vector<uint32_t> &DecArgs, StringRef StrImm) {
122   auto MIB = MIRBuilder.buildInstr(SPIRV::OpDecorate)
123                  .addUse(Reg)
124                  .addImm(static_cast<uint32_t>(Dec));
125   finishBuildOpDecorate(MIB, DecArgs, StrImm);
126 }
127 
128 void buildOpDecorate(Register Reg, MachineInstr &I, const SPIRVInstrInfo &TII,
129                      SPIRV::Decoration::Decoration Dec,
130                      const std::vector<uint32_t> &DecArgs, StringRef StrImm) {
131   MachineBasicBlock &MBB = *I.getParent();
132   auto MIB = BuildMI(MBB, I, I.getDebugLoc(), TII.get(SPIRV::OpDecorate))
133                  .addUse(Reg)
134                  .addImm(static_cast<uint32_t>(Dec));
135   finishBuildOpDecorate(MIB, DecArgs, StrImm);
136 }
137 
138 void buildOpSpirvDecorations(Register Reg, MachineIRBuilder &MIRBuilder,
139                              const MDNode *GVarMD) {
140   for (unsigned I = 0, E = GVarMD->getNumOperands(); I != E; ++I) {
141     auto *OpMD = dyn_cast<MDNode>(GVarMD->getOperand(I));
142     if (!OpMD)
143       report_fatal_error("Invalid decoration");
144     if (OpMD->getNumOperands() == 0)
145       report_fatal_error("Expect operand(s) of the decoration");
146     ConstantInt *DecorationId =
147         mdconst::dyn_extract<ConstantInt>(OpMD->getOperand(0));
148     if (!DecorationId)
149       report_fatal_error("Expect SPIR-V <Decoration> operand to be the first "
150                          "element of the decoration");
151     auto MIB = MIRBuilder.buildInstr(SPIRV::OpDecorate)
152                    .addUse(Reg)
153                    .addImm(static_cast<uint32_t>(DecorationId->getZExtValue()));
154     for (unsigned OpI = 1, OpE = OpMD->getNumOperands(); OpI != OpE; ++OpI) {
155       if (ConstantInt *OpV =
156               mdconst::dyn_extract<ConstantInt>(OpMD->getOperand(OpI)))
157         MIB.addImm(static_cast<uint32_t>(OpV->getZExtValue()));
158       else if (MDString *OpV = dyn_cast<MDString>(OpMD->getOperand(OpI)))
159         addStringImm(OpV->getString(), MIB);
160       else
161         report_fatal_error("Unexpected operand of the decoration");
162     }
163   }
164 }
165 
166 // TODO: maybe the following two functions should be handled in the subtarget
167 // to allow for different OpenCL vs Vulkan handling.
168 unsigned storageClassToAddressSpace(SPIRV::StorageClass::StorageClass SC) {
169   switch (SC) {
170   case SPIRV::StorageClass::Function:
171     return 0;
172   case SPIRV::StorageClass::CrossWorkgroup:
173     return 1;
174   case SPIRV::StorageClass::UniformConstant:
175     return 2;
176   case SPIRV::StorageClass::Workgroup:
177     return 3;
178   case SPIRV::StorageClass::Generic:
179     return 4;
180   case SPIRV::StorageClass::DeviceOnlyINTEL:
181     return 5;
182   case SPIRV::StorageClass::HostOnlyINTEL:
183     return 6;
184   case SPIRV::StorageClass::Input:
185     return 7;
186   default:
187     report_fatal_error("Unable to get address space id");
188   }
189 }
190 
191 SPIRV::StorageClass::StorageClass
192 addressSpaceToStorageClass(unsigned AddrSpace, const SPIRVSubtarget &STI) {
193   switch (AddrSpace) {
194   case 0:
195     return SPIRV::StorageClass::Function;
196   case 1:
197     return SPIRV::StorageClass::CrossWorkgroup;
198   case 2:
199     return SPIRV::StorageClass::UniformConstant;
200   case 3:
201     return SPIRV::StorageClass::Workgroup;
202   case 4:
203     return SPIRV::StorageClass::Generic;
204   case 5:
205     return STI.canUseExtension(SPIRV::Extension::SPV_INTEL_usm_storage_classes)
206                ? SPIRV::StorageClass::DeviceOnlyINTEL
207                : SPIRV::StorageClass::CrossWorkgroup;
208   case 6:
209     return STI.canUseExtension(SPIRV::Extension::SPV_INTEL_usm_storage_classes)
210                ? SPIRV::StorageClass::HostOnlyINTEL
211                : SPIRV::StorageClass::CrossWorkgroup;
212   case 7:
213     return SPIRV::StorageClass::Input;
214   default:
215     report_fatal_error("Unknown address space");
216   }
217 }
218 
219 SPIRV::MemorySemantics::MemorySemantics
220 getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC) {
221   switch (SC) {
222   case SPIRV::StorageClass::StorageBuffer:
223   case SPIRV::StorageClass::Uniform:
224     return SPIRV::MemorySemantics::UniformMemory;
225   case SPIRV::StorageClass::Workgroup:
226     return SPIRV::MemorySemantics::WorkgroupMemory;
227   case SPIRV::StorageClass::CrossWorkgroup:
228     return SPIRV::MemorySemantics::CrossWorkgroupMemory;
229   case SPIRV::StorageClass::AtomicCounter:
230     return SPIRV::MemorySemantics::AtomicCounterMemory;
231   case SPIRV::StorageClass::Image:
232     return SPIRV::MemorySemantics::ImageMemory;
233   default:
234     return SPIRV::MemorySemantics::None;
235   }
236 }
237 
238 SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord) {
239   switch (Ord) {
240   case AtomicOrdering::Acquire:
241     return SPIRV::MemorySemantics::Acquire;
242   case AtomicOrdering::Release:
243     return SPIRV::MemorySemantics::Release;
244   case AtomicOrdering::AcquireRelease:
245     return SPIRV::MemorySemantics::AcquireRelease;
246   case AtomicOrdering::SequentiallyConsistent:
247     return SPIRV::MemorySemantics::SequentiallyConsistent;
248   case AtomicOrdering::Unordered:
249   case AtomicOrdering::Monotonic:
250   case AtomicOrdering::NotAtomic:
251     return SPIRV::MemorySemantics::None;
252   }
253   llvm_unreachable(nullptr);
254 }
255 
256 MachineInstr *getDefInstrMaybeConstant(Register &ConstReg,
257                                        const MachineRegisterInfo *MRI) {
258   MachineInstr *MI = MRI->getVRegDef(ConstReg);
259   MachineInstr *ConstInstr =
260       MI->getOpcode() == SPIRV::G_TRUNC || MI->getOpcode() == SPIRV::G_ZEXT
261           ? MRI->getVRegDef(MI->getOperand(1).getReg())
262           : MI;
263   if (auto *GI = dyn_cast<GIntrinsic>(ConstInstr)) {
264     if (GI->is(Intrinsic::spv_track_constant)) {
265       ConstReg = ConstInstr->getOperand(2).getReg();
266       return MRI->getVRegDef(ConstReg);
267     }
268   } else if (ConstInstr->getOpcode() == SPIRV::ASSIGN_TYPE) {
269     ConstReg = ConstInstr->getOperand(1).getReg();
270     return MRI->getVRegDef(ConstReg);
271   }
272   return MRI->getVRegDef(ConstReg);
273 }
274 
275 uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI) {
276   const MachineInstr *MI = getDefInstrMaybeConstant(ConstReg, MRI);
277   assert(MI && MI->getOpcode() == TargetOpcode::G_CONSTANT);
278   return MI->getOperand(1).getCImm()->getValue().getZExtValue();
279 }
280 
281 bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID) {
282   if (const auto *GI = dyn_cast<GIntrinsic>(&MI))
283     return GI->is(IntrinsicID);
284   return false;
285 }
286 
287 Type *getMDOperandAsType(const MDNode *N, unsigned I) {
288   Type *ElementTy = cast<ValueAsMetadata>(N->getOperand(I))->getType();
289   return toTypedPointer(ElementTy);
290 }
291 
292 // The set of names is borrowed from the SPIR-V translator.
293 // TODO: may be implemented in SPIRVBuiltins.td.
294 static bool isPipeOrAddressSpaceCastBI(const StringRef MangledName) {
295   return MangledName == "write_pipe_2" || MangledName == "read_pipe_2" ||
296          MangledName == "write_pipe_2_bl" || MangledName == "read_pipe_2_bl" ||
297          MangledName == "write_pipe_4" || MangledName == "read_pipe_4" ||
298          MangledName == "reserve_write_pipe" ||
299          MangledName == "reserve_read_pipe" ||
300          MangledName == "commit_write_pipe" ||
301          MangledName == "commit_read_pipe" ||
302          MangledName == "work_group_reserve_write_pipe" ||
303          MangledName == "work_group_reserve_read_pipe" ||
304          MangledName == "work_group_commit_write_pipe" ||
305          MangledName == "work_group_commit_read_pipe" ||
306          MangledName == "get_pipe_num_packets_ro" ||
307          MangledName == "get_pipe_max_packets_ro" ||
308          MangledName == "get_pipe_num_packets_wo" ||
309          MangledName == "get_pipe_max_packets_wo" ||
310          MangledName == "sub_group_reserve_write_pipe" ||
311          MangledName == "sub_group_reserve_read_pipe" ||
312          MangledName == "sub_group_commit_write_pipe" ||
313          MangledName == "sub_group_commit_read_pipe" ||
314          MangledName == "to_global" || MangledName == "to_local" ||
315          MangledName == "to_private";
316 }
317 
318 static bool isEnqueueKernelBI(const StringRef MangledName) {
319   return MangledName == "__enqueue_kernel_basic" ||
320          MangledName == "__enqueue_kernel_basic_events" ||
321          MangledName == "__enqueue_kernel_varargs" ||
322          MangledName == "__enqueue_kernel_events_varargs";
323 }
324 
325 static bool isKernelQueryBI(const StringRef MangledName) {
326   return MangledName == "__get_kernel_work_group_size_impl" ||
327          MangledName == "__get_kernel_sub_group_count_for_ndrange_impl" ||
328          MangledName == "__get_kernel_max_sub_group_size_for_ndrange_impl" ||
329          MangledName == "__get_kernel_preferred_work_group_size_multiple_impl";
330 }
331 
332 static bool isNonMangledOCLBuiltin(StringRef Name) {
333   if (!Name.starts_with("__"))
334     return false;
335 
336   return isEnqueueKernelBI(Name) || isKernelQueryBI(Name) ||
337          isPipeOrAddressSpaceCastBI(Name.drop_front(2)) ||
338          Name == "__translate_sampler_initializer";
339 }
340 
341 std::string getOclOrSpirvBuiltinDemangledName(StringRef Name) {
342   bool IsNonMangledOCL = isNonMangledOCLBuiltin(Name);
343   bool IsNonMangledSPIRV = Name.starts_with("__spirv_");
344   bool IsNonMangledHLSL = Name.starts_with("__hlsl_");
345   bool IsMangled = Name.starts_with("_Z");
346 
347   // Otherwise use simple demangling to return the function name.
348   if (IsNonMangledOCL || IsNonMangledSPIRV || IsNonMangledHLSL || !IsMangled)
349     return Name.str();
350 
351   // Try to use the itanium demangler.
352   if (char *DemangledName = itaniumDemangle(Name.data())) {
353     std::string Result = DemangledName;
354     free(DemangledName);
355     return Result;
356   }
357 
358   // Autocheck C++, maybe need to do explicit check of the source language.
359   // OpenCL C++ built-ins are declared in cl namespace.
360   // TODO: consider using 'St' abbriviation for cl namespace mangling.
361   // Similar to ::std:: in C++.
362   size_t Start, Len = 0;
363   size_t DemangledNameLenStart = 2;
364   if (Name.starts_with("_ZN")) {
365     // Skip CV and ref qualifiers.
366     size_t NameSpaceStart = Name.find_first_not_of("rVKRO", 3);
367     // All built-ins are in the ::cl:: namespace.
368     if (Name.substr(NameSpaceStart, 11) != "2cl7__spirv")
369       return std::string();
370     DemangledNameLenStart = NameSpaceStart + 11;
371   }
372   Start = Name.find_first_not_of("0123456789", DemangledNameLenStart);
373   Name.substr(DemangledNameLenStart, Start - DemangledNameLenStart)
374       .getAsInteger(10, Len);
375   return Name.substr(Start, Len).str();
376 }
377 
378 bool hasBuiltinTypePrefix(StringRef Name) {
379   if (Name.starts_with("opencl.") || Name.starts_with("ocl_") ||
380       Name.starts_with("spirv."))
381     return true;
382   return false;
383 }
384 
385 bool isSpecialOpaqueType(const Type *Ty) {
386   if (const TargetExtType *EType = dyn_cast<TargetExtType>(Ty))
387     return hasBuiltinTypePrefix(EType->getName());
388 
389   return false;
390 }
391 
392 bool isEntryPoint(const Function &F) {
393   // OpenCL handling: any function with the SPIR_KERNEL
394   // calling convention will be a potential entry point.
395   if (F.getCallingConv() == CallingConv::SPIR_KERNEL)
396     return true;
397 
398   // HLSL handling: special attribute are emitted from the
399   // front-end.
400   if (F.getFnAttribute("hlsl.shader").isValid())
401     return true;
402 
403   return false;
404 }
405 
406 Type *parseBasicTypeName(StringRef &TypeName, LLVMContext &Ctx) {
407   TypeName.consume_front("atomic_");
408   if (TypeName.consume_front("void"))
409     return Type::getVoidTy(Ctx);
410   else if (TypeName.consume_front("bool"))
411     return Type::getIntNTy(Ctx, 1);
412   else if (TypeName.consume_front("char") ||
413            TypeName.consume_front("unsigned char") ||
414            TypeName.consume_front("uchar"))
415     return Type::getInt8Ty(Ctx);
416   else if (TypeName.consume_front("short") ||
417            TypeName.consume_front("unsigned short") ||
418            TypeName.consume_front("ushort"))
419     return Type::getInt16Ty(Ctx);
420   else if (TypeName.consume_front("int") ||
421            TypeName.consume_front("unsigned int") ||
422            TypeName.consume_front("uint"))
423     return Type::getInt32Ty(Ctx);
424   else if (TypeName.consume_front("long") ||
425            TypeName.consume_front("unsigned long") ||
426            TypeName.consume_front("ulong"))
427     return Type::getInt64Ty(Ctx);
428   else if (TypeName.consume_front("half"))
429     return Type::getHalfTy(Ctx);
430   else if (TypeName.consume_front("float"))
431     return Type::getFloatTy(Ctx);
432   else if (TypeName.consume_front("double"))
433     return Type::getDoubleTy(Ctx);
434 
435   // Unable to recognize SPIRV type name
436   return nullptr;
437 }
438 
439 std::unordered_set<BasicBlock *>
440 PartialOrderingVisitor::getReachableFrom(BasicBlock *Start) {
441   std::queue<BasicBlock *> ToVisit;
442   ToVisit.push(Start);
443 
444   std::unordered_set<BasicBlock *> Output;
445   while (ToVisit.size() != 0) {
446     BasicBlock *BB = ToVisit.front();
447     ToVisit.pop();
448 
449     if (Output.count(BB) != 0)
450       continue;
451     Output.insert(BB);
452 
453     for (BasicBlock *Successor : successors(BB)) {
454       if (DT.dominates(Successor, BB))
455         continue;
456       ToVisit.push(Successor);
457     }
458   }
459 
460   return Output;
461 }
462 
463 size_t PartialOrderingVisitor::visit(BasicBlock *BB, size_t Rank) {
464   if (Visited.count(BB) != 0)
465     return Rank;
466 
467   Loop *L = LI.getLoopFor(BB);
468   const bool isLoopHeader = LI.isLoopHeader(BB);
469 
470   if (BlockToOrder.count(BB) == 0) {
471     OrderInfo Info = {Rank, Visited.size()};
472     BlockToOrder.emplace(BB, Info);
473   } else {
474     BlockToOrder[BB].Rank = std::max(BlockToOrder[BB].Rank, Rank);
475   }
476 
477   for (BasicBlock *Predecessor : predecessors(BB)) {
478     if (isLoopHeader && L->contains(Predecessor)) {
479       continue;
480     }
481 
482     if (BlockToOrder.count(Predecessor) == 0) {
483       return Rank;
484     }
485   }
486 
487   Visited.insert(BB);
488 
489   SmallVector<BasicBlock *, 2> OtherSuccessors;
490   SmallVector<BasicBlock *, 2> LoopSuccessors;
491 
492   for (BasicBlock *Successor : successors(BB)) {
493     // Ignoring back-edges.
494     if (DT.dominates(Successor, BB))
495       continue;
496 
497     if (isLoopHeader && L->contains(Successor)) {
498       LoopSuccessors.push_back(Successor);
499     } else
500       OtherSuccessors.push_back(Successor);
501   }
502 
503   for (BasicBlock *BB : LoopSuccessors)
504     Rank = std::max(Rank, visit(BB, Rank + 1));
505 
506   size_t OutputRank = Rank;
507   for (BasicBlock *Item : OtherSuccessors)
508     OutputRank = std::max(OutputRank, visit(Item, Rank + 1));
509   return OutputRank;
510 }
511 
512 PartialOrderingVisitor::PartialOrderingVisitor(Function &F) {
513   DT.recalculate(F);
514   LI = LoopInfo(DT);
515 
516   visit(&*F.begin(), 0);
517 
518   Order.reserve(F.size());
519   for (auto &[BB, Info] : BlockToOrder)
520     Order.emplace_back(BB);
521 
522   std::sort(Order.begin(), Order.end(), [&](const auto &LHS, const auto &RHS) {
523     return compare(LHS, RHS);
524   });
525 }
526 
527 bool PartialOrderingVisitor::compare(const BasicBlock *LHS,
528                                      const BasicBlock *RHS) const {
529   const OrderInfo &InfoLHS = BlockToOrder.at(const_cast<BasicBlock *>(LHS));
530   const OrderInfo &InfoRHS = BlockToOrder.at(const_cast<BasicBlock *>(RHS));
531   if (InfoLHS.Rank != InfoRHS.Rank)
532     return InfoLHS.Rank < InfoRHS.Rank;
533   return InfoLHS.TraversalIndex < InfoRHS.TraversalIndex;
534 }
535 
536 void PartialOrderingVisitor::partialOrderVisit(
537     BasicBlock &Start, std::function<bool(BasicBlock *)> Op) {
538   std::unordered_set<BasicBlock *> Reachable = getReachableFrom(&Start);
539   assert(BlockToOrder.count(&Start) != 0);
540 
541   // Skipping blocks with a rank inferior to |Start|'s rank.
542   auto It = Order.begin();
543   while (It != Order.end() && *It != &Start)
544     ++It;
545 
546   // This is unexpected. Worst case |Start| is the last block,
547   // so It should point to the last block, not past-end.
548   assert(It != Order.end());
549 
550   // By default, there is no rank limit. Setting it to the maximum value.
551   std::optional<size_t> EndRank = std::nullopt;
552   for (; It != Order.end(); ++It) {
553     if (EndRank.has_value() && BlockToOrder[*It].Rank > *EndRank)
554       break;
555 
556     if (Reachable.count(*It) == 0) {
557       continue;
558     }
559 
560     if (!Op(*It)) {
561       EndRank = BlockToOrder[*It].Rank;
562     }
563   }
564 }
565 
566 bool sortBlocks(Function &F) {
567   if (F.size() == 0)
568     return false;
569 
570   bool Modified = false;
571 
572   std::vector<BasicBlock *> Order;
573   Order.reserve(F.size());
574 
575   PartialOrderingVisitor Visitor(F);
576   Visitor.partialOrderVisit(*F.begin(), [&Order](BasicBlock *Block) {
577     Order.push_back(Block);
578     return true;
579   });
580 
581   assert(&*F.begin() == Order[0]);
582   BasicBlock *LastBlock = &*F.begin();
583   for (BasicBlock *BB : Order) {
584     if (BB != LastBlock && &*LastBlock->getNextNode() != BB) {
585       Modified = true;
586       BB->moveAfter(LastBlock);
587     }
588     LastBlock = BB;
589   }
590 
591   return Modified;
592 }
593 
594 } // namespace llvm
595