xref: /llvm-project/polly/include/polly/Support/ScopHelper.h (revision 5aafc6d58f3405662902cee006be11e599801b88)
1 //===------ Support/ScopHelper.h -- Some Helper Functions for Scop. -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Small functions that help with LLVM-IR.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #ifndef POLLY_SUPPORT_IRHELPER_H
14 #define POLLY_SUPPORT_IRHELPER_H
15 
16 #include "llvm/ADT/SetVector.h"
17 #include "llvm/IR/Instructions.h"
18 #include "llvm/IR/IntrinsicInst.h"
19 #include "llvm/IR/ValueHandle.h"
20 #include "isl/isl-noexceptions.h"
21 #include <optional>
22 
23 namespace llvm {
24 class LoopInfo;
25 class Loop;
26 class ScalarEvolution;
27 class SCEV;
28 class Region;
29 class Pass;
30 class DominatorTree;
31 class RegionInfo;
32 class RegionNode;
33 } // namespace llvm
34 
35 namespace polly {
36 class Scop;
37 class ScopStmt;
38 
39 /// Same as llvm/Analysis/ScalarEvolutionExpressions.h
40 using LoopToScevMapT = llvm::DenseMap<const llvm::Loop *, const llvm::SCEV *>;
41 
42 /// Enumeration of assumptions Polly can take.
43 enum AssumptionKind {
44   ALIASING,
45   INBOUNDS,
46   WRAPPING,
47   UNSIGNED,
48   PROFITABLE,
49   ERRORBLOCK,
50   COMPLEXITY,
51   INFINITELOOP,
52   INVARIANTLOAD,
53   DELINEARIZATION,
54 };
55 
56 /// Enum to distinguish between assumptions and restrictions.
57 enum AssumptionSign { AS_ASSUMPTION, AS_RESTRICTION };
58 
59 /// Helper struct to remember assumptions.
60 struct Assumption {
61   /// The kind of the assumption (e.g., WRAPPING).
62   AssumptionKind Kind;
63 
64   /// Flag to distinguish assumptions and restrictions.
65   AssumptionSign Sign;
66 
67   /// The valid/invalid context if this is an assumption/restriction.
68   isl::set Set;
69 
70   /// The location that caused this assumption.
71   llvm::DebugLoc Loc;
72 
73   /// An optional block whose domain can simplify the assumption.
74   llvm::BasicBlock *BB;
75 
76   // Whether the assumption must be checked at runtime.
77   bool RequiresRTC;
78 };
79 
80 using RecordedAssumptionsTy = llvm::SmallVector<Assumption, 8>;
81 
82 /// Record an assumption for later addition to the assumed context.
83 ///
84 /// This function will add the assumption to the RecordedAssumptions. This
85 /// collection will be added (@see addAssumption) to the assumed context once
86 /// all parameters are known and the context is fully built.
87 ///
88 /// @param RecordedAssumption container which keeps all recorded assumptions.
89 /// @param Kind The assumption kind describing the underlying cause.
90 /// @param Set  The relations between parameters that are assumed to hold.
91 /// @param Loc  The location in the source that caused this assumption.
92 /// @param Sign Enum to indicate if the assumptions in @p Set are positive
93 ///             (needed/assumptions) or negative (invalid/restrictions).
94 /// @param BB   The block in which this assumption was taken. If it is
95 ///             set, the domain of that block will be used to simplify the
96 ///             actual assumption in @p Set once it is added. This is useful
97 ///             if the assumption was created prior to the domain.
98 /// @param RTC  Does the assumption require a runtime check?
99 void recordAssumption(RecordedAssumptionsTy *RecordedAssumptions,
100                       AssumptionKind Kind, isl::set Set, llvm::DebugLoc Loc,
101                       AssumptionSign Sign, llvm::BasicBlock *BB = nullptr,
102                       bool RTC = true);
103 
104 /// Type to remap values.
105 using ValueMapT = llvm::DenseMap<llvm::AssertingVH<llvm::Value>,
106                                  llvm::AssertingVH<llvm::Value>>;
107 
108 /// Type for a set of invariant loads.
109 using InvariantLoadsSetTy = llvm::SetVector<llvm::AssertingVH<llvm::LoadInst>>;
110 
111 /// Set type for parameters.
112 using ParameterSetTy = llvm::SetVector<const llvm::SCEV *>;
113 
114 /// Set of loops (used to remember loops in non-affine subregions).
115 using BoxedLoopsSetTy = llvm::SetVector<const llvm::Loop *>;
116 
117 /// Utility proxy to wrap the common members of LoadInst and StoreInst.
118 ///
119 /// This works like the LLVM utility class CallSite, ie. it forwards all calls
120 /// to either a LoadInst, StoreInst, MemIntrinsic or MemTransferInst.
121 /// It is similar to LLVM's utility classes IntrinsicInst, MemIntrinsic,
122 /// MemTransferInst, etc. in that it offers a common interface, but does not act
123 /// as a fake base class.
124 /// It is similar to StringRef and ArrayRef in that it holds a pointer to the
125 /// referenced object and should be passed by-value as it is small enough.
126 ///
127 /// This proxy can either represent a LoadInst instance, a StoreInst instance,
128 /// a MemIntrinsic instance (memset, memmove, memcpy), a CallInst instance or a
129 /// nullptr (only creatable using the default constructor); never an Instruction
130 /// that is neither of the above mentioned. When representing a nullptr, only
131 /// the following methods are defined:
132 /// isNull(), isInstruction(), isLoad(), isStore(), ..., isMemTransferInst(),
133 /// operator bool(), operator!()
134 ///
135 /// The functions isa, cast, cast_or_null, dyn_cast are modeled to resemble
136 /// those from llvm/Support/Casting.h. Partial template function specialization
137 /// is currently not supported in C++ such that those cannot be used directly.
138 /// (llvm::isa could, but then llvm:cast etc. would not have the expected
139 /// behavior)
140 class MemAccInst final {
141 private:
142   llvm::Instruction *I;
143 
144 public:
145   MemAccInst() : I(nullptr) {}
146   MemAccInst(const MemAccInst &Inst) : I(Inst.I) {}
147   /* implicit */ MemAccInst(llvm::LoadInst &LI) : I(&LI) {}
148   /* implicit */ MemAccInst(llvm::LoadInst *LI) : I(LI) {}
149   /* implicit */ MemAccInst(llvm::StoreInst &SI) : I(&SI) {}
150   /* implicit */ MemAccInst(llvm::StoreInst *SI) : I(SI) {}
151   /* implicit */ MemAccInst(llvm::MemIntrinsic *MI) : I(MI) {}
152   /* implicit */ MemAccInst(llvm::CallInst *CI) : I(CI) {}
153   explicit MemAccInst(llvm::Instruction &I) : I(&I) { assert(isa(I)); }
154   explicit MemAccInst(llvm::Instruction *I) : I(I) { assert(isa(I)); }
155 
156   static bool isa(const llvm::Value &V) {
157     return llvm::isa<llvm::LoadInst>(V) || llvm::isa<llvm::StoreInst>(V) ||
158            llvm::isa<llvm::CallInst>(V) || llvm::isa<llvm::MemIntrinsic>(V);
159   }
160   static bool isa(const llvm::Value *V) {
161     return llvm::isa<llvm::LoadInst>(V) || llvm::isa<llvm::StoreInst>(V) ||
162            llvm::isa<llvm::CallInst>(V) || llvm::isa<llvm::MemIntrinsic>(V);
163   }
164   static MemAccInst cast(llvm::Value &V) {
165     return MemAccInst(llvm::cast<llvm::Instruction>(V));
166   }
167   static MemAccInst cast(llvm::Value *V) {
168     return MemAccInst(llvm::cast<llvm::Instruction>(V));
169   }
170   static MemAccInst cast_or_null(llvm::Value &V) {
171     return MemAccInst(llvm::cast<llvm::Instruction>(V));
172   }
173   static MemAccInst cast_or_null(llvm::Value *V) {
174     if (!V)
175       return MemAccInst();
176     return MemAccInst(llvm::cast<llvm::Instruction>(V));
177   }
178   static MemAccInst dyn_cast(llvm::Value &V) {
179     if (isa(V))
180       return MemAccInst(llvm::cast<llvm::Instruction>(V));
181     return MemAccInst();
182   }
183   static MemAccInst dyn_cast(llvm::Value *V) {
184     assert(V);
185     if (isa(V))
186       return MemAccInst(llvm::cast<llvm::Instruction>(V));
187     return MemAccInst();
188   }
189 
190   MemAccInst &operator=(const MemAccInst &Inst) {
191     I = Inst.I;
192     return *this;
193   }
194   MemAccInst &operator=(llvm::LoadInst &LI) {
195     I = &LI;
196     return *this;
197   }
198   MemAccInst &operator=(llvm::LoadInst *LI) {
199     I = LI;
200     return *this;
201   }
202   MemAccInst &operator=(llvm::StoreInst &SI) {
203     I = &SI;
204     return *this;
205   }
206   MemAccInst &operator=(llvm::StoreInst *SI) {
207     I = SI;
208     return *this;
209   }
210   MemAccInst &operator=(llvm::MemIntrinsic &MI) {
211     I = &MI;
212     return *this;
213   }
214   MemAccInst &operator=(llvm::MemIntrinsic *MI) {
215     I = MI;
216     return *this;
217   }
218   MemAccInst &operator=(llvm::CallInst &CI) {
219     I = &CI;
220     return *this;
221   }
222   MemAccInst &operator=(llvm::CallInst *CI) {
223     I = CI;
224     return *this;
225   }
226 
227   llvm::Instruction *get() const {
228     assert(I && "Unexpected nullptr!");
229     return I;
230   }
231   operator llvm::Instruction *() const { return asInstruction(); }
232   llvm::Instruction *operator->() const { return get(); }
233 
234   explicit operator bool() const { return isInstruction(); }
235   bool operator!() const { return isNull(); }
236 
237   llvm::Value *getValueOperand() const {
238     if (isLoad())
239       return asLoad();
240     if (isStore())
241       return asStore()->getValueOperand();
242     if (isMemIntrinsic())
243       return nullptr;
244     if (isCallInst())
245       return nullptr;
246     llvm_unreachable("Operation not supported on nullptr");
247   }
248   llvm::Value *getPointerOperand() const {
249     if (isLoad())
250       return asLoad()->getPointerOperand();
251     if (isStore())
252       return asStore()->getPointerOperand();
253     if (isMemIntrinsic())
254       return asMemIntrinsic()->getRawDest();
255     if (isCallInst())
256       return nullptr;
257     llvm_unreachable("Operation not supported on nullptr");
258   }
259   bool isVolatile() const {
260     if (isLoad())
261       return asLoad()->isVolatile();
262     if (isStore())
263       return asStore()->isVolatile();
264     if (isMemIntrinsic())
265       return asMemIntrinsic()->isVolatile();
266     if (isCallInst())
267       return false;
268     llvm_unreachable("Operation not supported on nullptr");
269   }
270   bool isSimple() const {
271     if (isLoad())
272       return asLoad()->isSimple();
273     if (isStore())
274       return asStore()->isSimple();
275     if (isMemIntrinsic())
276       return !asMemIntrinsic()->isVolatile();
277     if (isCallInst())
278       return true;
279     llvm_unreachable("Operation not supported on nullptr");
280   }
281   llvm::AtomicOrdering getOrdering() const {
282     if (isLoad())
283       return asLoad()->getOrdering();
284     if (isStore())
285       return asStore()->getOrdering();
286     if (isMemIntrinsic())
287       return llvm::AtomicOrdering::NotAtomic;
288     if (isCallInst())
289       return llvm::AtomicOrdering::NotAtomic;
290     llvm_unreachable("Operation not supported on nullptr");
291   }
292   bool isUnordered() const {
293     if (isLoad())
294       return asLoad()->isUnordered();
295     if (isStore())
296       return asStore()->isUnordered();
297     // Copied from the Load/Store implementation of isUnordered:
298     if (isMemIntrinsic())
299       return !asMemIntrinsic()->isVolatile();
300     if (isCallInst())
301       return true;
302     llvm_unreachable("Operation not supported on nullptr");
303   }
304 
305   bool isNull() const { return !I; }
306   bool isInstruction() const { return I; }
307 
308   llvm::Instruction *asInstruction() const { return I; }
309 
310   bool isLoad() const { return I && llvm::isa<llvm::LoadInst>(I); }
311   bool isStore() const { return I && llvm::isa<llvm::StoreInst>(I); }
312   bool isCallInst() const { return I && llvm::isa<llvm::CallInst>(I); }
313   bool isMemIntrinsic() const { return I && llvm::isa<llvm::MemIntrinsic>(I); }
314   bool isMemSetInst() const { return I && llvm::isa<llvm::MemSetInst>(I); }
315   bool isMemTransferInst() const {
316     return I && llvm::isa<llvm::MemTransferInst>(I);
317   }
318 
319   llvm::LoadInst *asLoad() const { return llvm::cast<llvm::LoadInst>(I); }
320   llvm::StoreInst *asStore() const { return llvm::cast<llvm::StoreInst>(I); }
321   llvm::CallInst *asCallInst() const { return llvm::cast<llvm::CallInst>(I); }
322   llvm::MemIntrinsic *asMemIntrinsic() const {
323     return llvm::cast<llvm::MemIntrinsic>(I);
324   }
325   llvm::MemSetInst *asMemSetInst() const {
326     return llvm::cast<llvm::MemSetInst>(I);
327   }
328   llvm::MemTransferInst *asMemTransferInst() const {
329     return llvm::cast<llvm::MemTransferInst>(I);
330   }
331 };
332 } // namespace polly
333 
334 namespace llvm {
335 /// Specialize simplify_type for MemAccInst to enable dyn_cast and cast
336 ///        from a MemAccInst object.
337 template <> struct simplify_type<polly::MemAccInst> {
338   typedef Instruction *SimpleType;
339   static SimpleType getSimplifiedValue(polly::MemAccInst &I) {
340     return I.asInstruction();
341   }
342 };
343 } // namespace llvm
344 
345 namespace polly {
346 
347 /// Simplify the region to have a single unconditional entry edge and a
348 /// single exit edge.
349 ///
350 /// Although this function allows DT and RI to be null, regions only work
351 /// properly if the DominatorTree (for Region::contains) and RegionInfo are kept
352 /// up-to-date.
353 ///
354 /// @param R  The region to be simplified
355 /// @param DT DominatorTree to be updated.
356 /// @param LI LoopInfo to be updated.
357 /// @param RI RegionInfo to be updated.
358 void simplifyRegion(llvm::Region *R, llvm::DominatorTree *DT,
359                     llvm::LoopInfo *LI, llvm::RegionInfo *RI);
360 
361 /// Split the entry block of a function to store the newly inserted
362 ///        allocations outside of all Scops.
363 ///
364 /// @param EntryBlock The entry block of the current function.
365 /// @param P          The pass that currently running.
366 ///
367 void splitEntryBlockForAlloca(llvm::BasicBlock *EntryBlock, llvm::Pass *P);
368 
369 /// Split the entry block of a function to store the newly inserted
370 ///        allocations outside of all Scops.
371 ///
372 /// @param DT DominatorTree to be updated.
373 /// @param LI LoopInfo to be updated.
374 /// @param RI RegionInfo to be updated.
375 void splitEntryBlockForAlloca(llvm::BasicBlock *EntryBlock,
376                               llvm::DominatorTree *DT, llvm::LoopInfo *LI,
377                               llvm::RegionInfo *RI);
378 
379 /// Wrapper for SCEVExpander extended to all Polly features.
380 ///
381 /// This wrapper will internally call the SCEVExpander but also makes sure that
382 /// all additional features not represented in SCEV (e.g., SDiv/SRem are not
383 /// black boxes but can be part of the function) will be expanded correctly.
384 ///
385 /// The parameters are the same as for the creation of a SCEVExpander as well
386 /// as the call to SCEVExpander::expandCodeFor:
387 ///
388 /// @param S     The current Scop.
389 /// @param SE    The Scalar Evolution pass used by @p S.
390 /// @param GenFn The function to generate code in. Can be the same as @p SE.
391 /// @param GenSE The Scalar Evolution pass for @p GenFn.
392 /// @param DL    The module data layout.
393 /// @param Name  The suffix added to the new instruction names.
394 /// @param E     The expression for which code is actually generated.
395 /// @param Ty    The type of the resulting code.
396 /// @param IP    The insertion point for the new code.
397 /// @param VMap  A remapping of values used in @p E.
398 /// @param LoopMap A remapping of loops used in @p E.
399 /// @param RTCBB The last block of the RTC. Used to insert loop-invariant
400 ///              instructions in rare cases.
401 llvm::Value *expandCodeFor(Scop &S, llvm::ScalarEvolution &SE,
402                            llvm::Function *GenFn, llvm::ScalarEvolution &GenSE,
403                            const llvm::DataLayout &DL, const char *Name,
404                            const llvm::SCEV *E, llvm::Type *Ty,
405                            llvm::Instruction *IP, ValueMapT *VMap,
406                            LoopToScevMapT *LoopMap, llvm::BasicBlock *RTCBB);
407 
408 /// Return the condition for the terminator @p TI.
409 ///
410 /// For unconditional branches the "i1 true" condition will be returned.
411 ///
412 /// @param TI The terminator to get the condition from.
413 ///
414 /// @return The condition of @p TI and nullptr if none could be extracted.
415 llvm::Value *getConditionFromTerminator(llvm::Instruction *TI);
416 
417 /// Get the smallest loop that contains @p S but is not in @p S.
418 llvm::Loop *getLoopSurroundingScop(Scop &S, llvm::LoopInfo &LI);
419 
420 /// Get the number of blocks in @p L.
421 ///
422 /// The number of blocks in a loop are the number of basic blocks actually
423 /// belonging to the loop, as well as all single basic blocks that the loop
424 /// exits to and which terminate in an unreachable instruction. We do not
425 /// allow such basic blocks in the exit of a scop, hence they belong to the
426 /// scop and represent run-time conditions which we want to model and
427 /// subsequently speculate away.
428 ///
429 /// @see getRegionNodeLoop for additional details.
430 unsigned getNumBlocksInLoop(llvm::Loop *L);
431 
432 /// Get the number of blocks in @p RN.
433 unsigned getNumBlocksInRegionNode(llvm::RegionNode *RN);
434 
435 /// Return the smallest loop surrounding @p RN.
436 llvm::Loop *getRegionNodeLoop(llvm::RegionNode *RN, llvm::LoopInfo &LI);
437 
438 /// Check if @p LInst can be hoisted in @p R.
439 ///
440 /// @param LInst The load to check.
441 /// @param R     The analyzed region.
442 /// @param LI    The loop info.
443 /// @param SE    The scalar evolution analysis.
444 /// @param DT    The dominator tree of the function.
445 /// @param KnownInvariantLoads The invariant load set.
446 ///
447 /// @return True if @p LInst can be hoisted in @p R.
448 bool isHoistableLoad(llvm::LoadInst *LInst, llvm::Region &R, llvm::LoopInfo &LI,
449                      llvm::ScalarEvolution &SE, const llvm::DominatorTree &DT,
450                      const InvariantLoadsSetTy &KnownInvariantLoads);
451 
452 /// Return true iff @p V is an intrinsic that we ignore during code
453 ///        generation.
454 bool isIgnoredIntrinsic(const llvm::Value *V);
455 
456 /// Check whether a value an be synthesized by the code generator.
457 ///
458 /// Some value will be recalculated only from information that is code generated
459 /// from the polyhedral representation. For such instructions we do not need to
460 /// ensure that their operands are available during code generation.
461 ///
462 /// @param V The value to check.
463 /// @param S The current SCoP.
464 /// @param SE The scalar evolution database.
465 /// @param Scope Location where the value would by synthesized.
466 /// @return If the instruction I can be regenerated from its
467 ///         scalar evolution representation, return true,
468 ///         otherwise return false.
469 bool canSynthesize(const llvm::Value *V, const Scop &S,
470                    llvm::ScalarEvolution *SE, llvm::Loop *Scope);
471 
472 /// Return the block in which a value is used.
473 ///
474 /// For normal instructions, this is the instruction's parent block. For PHI
475 /// nodes, this is the incoming block of that use, because this is where the
476 /// operand must be defined (i.e. its definition dominates this block).
477 /// Non-instructions do not use operands at a specific point such that in this
478 /// case this function returns nullptr.
479 llvm::BasicBlock *getUseBlock(const llvm::Use &U);
480 
481 // If the loop is nonaffine/boxed, return the first non-boxed surrounding loop
482 // for Polly. If the loop is affine, return the loop itself.
483 //
484 // @param L             Pointer to the Loop object to analyze.
485 // @param LI            Reference to the LoopInfo.
486 // @param BoxedLoops    Set of Boxed Loops we get from the SCoP.
487 llvm::Loop *getFirstNonBoxedLoopFor(llvm::Loop *L, llvm::LoopInfo &LI,
488                                     const BoxedLoopsSetTy &BoxedLoops);
489 
490 // If the Basic Block belongs to a loop that is nonaffine/boxed, return the
491 // first non-boxed surrounding loop for Polly. If the loop is affine, return
492 // the loop itself.
493 //
494 // @param BB            Pointer to the Basic Block to analyze.
495 // @param LI            Reference to the LoopInfo.
496 // @param BoxedLoops    Set of Boxed Loops we get from the SCoP.
497 llvm::Loop *getFirstNonBoxedLoopFor(llvm::BasicBlock *BB, llvm::LoopInfo &LI,
498                                     const BoxedLoopsSetTy &BoxedLoops);
499 
500 /// Is the given instruction a call to a debug function?
501 ///
502 /// A debug function can be used to insert output in Polly-optimized code which
503 /// normally does not allow function calls with side-effects. For instance, a
504 /// printf can be inserted to check whether a value still has the expected value
505 /// after Polly generated code:
506 ///
507 ///     int sum = 0;
508 ///     for (int i = 0; i < 16; i+=1) {
509 ///       sum += i;
510 ///       printf("The value of sum at i=%d is %d\n", sum, i);
511 ///     }
512 bool isDebugCall(llvm::Instruction *Inst);
513 
514 /// Does the statement contain a call to a debug function?
515 ///
516 /// Such a statement must not be removed, even if has no side-effects.
517 bool hasDebugCall(ScopStmt *Stmt);
518 
519 /// Find a property value in a LoopID.
520 ///
521 /// Generally, a property MDNode has the format
522 ///
523 ///   !{ !"Name", value }
524 ///
525 /// In which case the value is returned.
526 ///
527 /// If the property is just
528 ///
529 ///   !{ !"Name" }
530 ///
531 /// Then `nullptr` is set to mark the property is existing, but does not carry
532 /// any value. If the property does not exist, `std::nullopt` is returned.
533 std::optional<llvm::Metadata *> findMetadataOperand(llvm::MDNode *LoopMD,
534                                                     llvm::StringRef Name);
535 
536 /// Find a boolean property value in a LoopID. The value not being defined is
537 /// interpreted as a false value.
538 bool getBooleanLoopAttribute(llvm::MDNode *LoopID, llvm::StringRef Name);
539 
540 /// Find an integers property value in a LoopID.
541 std::optional<int> getOptionalIntLoopAttribute(llvm::MDNode *LoopID,
542                                                llvm::StringRef Name);
543 
544 /// Does the loop's LoopID contain a 'llvm.loop.disable_heuristics' property?
545 ///
546 /// This is equivalent to llvm::hasDisableAllTransformsHint(Loop*), but
547 /// including the LoopUtils.h header indirectly also declares llvm::MemoryAccess
548 /// which clashes with polly::MemoryAccess. Declaring this alias here avoid
549 /// having to include LoopUtils.h in other files.
550 bool hasDisableAllTransformsHint(llvm::Loop *L);
551 bool hasDisableAllTransformsHint(llvm::MDNode *LoopID);
552 
553 /// Represent the attributes of a loop.
554 struct BandAttr {
555   /// LoopID which stores the properties of the loop, such as transformations to
556   /// apply and the metadata of followup-loops.
557   ///
558   /// Cannot be used to identify a loop. Two different loops can have the same
559   /// metadata.
560   llvm::MDNode *Metadata = nullptr;
561 
562   /// The LoopInfo reference for this loop.
563   ///
564   /// Only loops from the original IR are represented by LoopInfo. Loops that
565   /// were generated by Polly are not tracked by LoopInfo.
566   llvm::Loop *OriginalLoop = nullptr;
567 };
568 
569 /// Get an isl::id representing a loop.
570 ///
571 /// This takes the ownership of the BandAttr and will be free'd when the
572 /// returned isl::Id is free'd.
573 isl::id getIslLoopAttr(isl::ctx Ctx, BandAttr *Attr);
574 
575 /// Create an isl::id that identifies an original loop.
576 ///
577 /// Return nullptr if the loop does not need a BandAttr (i.e. has no
578 /// properties);
579 ///
580 /// This creates a BandAttr which must be unique per loop and therefore this
581 /// must not be called multiple times on the same loop as their id would be
582 /// different.
583 isl::id createIslLoopAttr(isl::ctx Ctx, llvm::Loop *L);
584 
585 /// Is @p Id representing a loop?
586 ///
587 /// Such ids contain a polly::BandAttr as its user pointer.
588 bool isLoopAttr(const isl::id &Id);
589 
590 /// Return the BandAttr of a loop's isl::id.
591 BandAttr *getLoopAttr(const isl::id &Id);
592 
593 } // namespace polly
594 #endif
595