1 //===- AffineOps.h - MLIR Affine Operations -------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file defines convenience types for working with Affine operations 10 // in the MLIR operation set. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #ifndef MLIR_DIALECT_AFFINE_IR_AFFINEOPS_H 15 #define MLIR_DIALECT_AFFINE_IR_AFFINEOPS_H 16 17 #include "mlir/Dialect/Affine/IR/AffineMemoryOpInterfaces.h" 18 #include "mlir/Dialect/Arith/IR/Arith.h" 19 #include "mlir/Dialect/Utils/StaticValueUtils.h" 20 #include "mlir/IR/AffineMap.h" 21 #include "mlir/IR/Builders.h" 22 #include "mlir/Interfaces/ControlFlowInterfaces.h" 23 #include "mlir/Interfaces/LoopLikeInterface.h" 24 namespace mlir { 25 namespace affine { 26 27 class AffineApplyOp; 28 class AffineBound; 29 class AffineMaxOp; 30 class AffineMinOp; 31 class AffineValueMap; 32 33 /// A utility function to check if a value is defined at the top level of an 34 /// op with trait `AffineScope` or is a region argument for such an op. A value 35 /// of index type defined at the top level is always a valid symbol for all its 36 /// uses. 37 bool isTopLevelValue(Value value); 38 39 /// A utility function to check if a value is defined at the top level of 40 /// `region` or is an argument of `region`. A value of index type defined at the 41 /// top level of a `AffineScope` region is always a valid symbol for all 42 /// uses in that region. 43 bool isTopLevelValue(Value value, Region *region); 44 45 /// Returns the closest region enclosing `op` that is held by an operation with 46 /// trait `AffineScope`; `nullptr` if there is no such region. 47 Region *getAffineScope(Operation *op); 48 49 /// AffineDmaStartOp starts a non-blocking DMA operation that transfers data 50 /// from a source memref to a destination memref. The source and destination 51 /// memref need not be of the same dimensionality, but need to have the same 52 /// elemental type. The operands include the source and destination memref's 53 /// each followed by its indices, size of the data transfer in terms of the 54 /// number of elements (of the elemental type of the memref), a tag memref with 55 /// its indices, and optionally at the end, a stride and a 56 /// number_of_elements_per_stride arguments. The tag location is used by an 57 /// AffineDmaWaitOp to check for completion. The indices of the source memref, 58 /// destination memref, and the tag memref have the same restrictions as any 59 /// affine.load/store. In particular, index for each memref dimension must be an 60 /// affine expression of loop induction variables and symbols. 61 /// The optional stride arguments should be of 'index' type, and specify a 62 /// stride for the slower memory space (memory space with a lower memory space 63 /// id), transferring chunks of number_of_elements_per_stride every stride until 64 /// %num_elements are transferred. Either both or no stride arguments should be 65 /// specified. The value of 'num_elements' must be a multiple of 66 /// 'number_of_elements_per_stride'. If the source and destination locations 67 /// overlap the behavior of this operation is not defined. 68 // 69 // For example, an AffineDmaStartOp operation that transfers 256 elements of a 70 // memref '%src' in memory space 0 at indices [%i + 3, %j] to memref '%dst' in 71 // memory space 1 at indices [%k + 7, %l], would be specified as follows: 72 // 73 // %num_elements = arith.constant 256 74 // %idx = arith.constant 0 : index 75 // %tag = memref.alloc() : memref<1xi32, 4> 76 // affine.dma_start %src[%i + 3, %j], %dst[%k + 7, %l], %tag[%idx], 77 // %num_elements : 78 // memref<40x128xf32, 0>, memref<2x1024xf32, 1>, memref<1xi32, 2> 79 // 80 // If %stride and %num_elt_per_stride are specified, the DMA is expected to 81 // transfer %num_elt_per_stride elements every %stride elements apart from 82 // memory space 0 until %num_elements are transferred. 83 // 84 // affine.dma_start %src[%i, %j], %dst[%k, %l], %tag[%idx], %num_elements, 85 // %stride, %num_elt_per_stride : ... 86 // 87 // TODO: add additional operands to allow source and destination striding, and 88 // multiple stride levels (possibly using AffineMaps to specify multiple levels 89 // of striding). 90 class AffineDmaStartOp 91 : public Op<AffineDmaStartOp, OpTrait::MemRefsNormalizable, 92 OpTrait::VariadicOperands, OpTrait::ZeroResults, 93 OpTrait::OpInvariants, AffineMapAccessInterface::Trait, 94 MemoryEffectOpInterface::Trait> { 95 public: 96 using Op::Op; 97 static ArrayRef<StringRef> getAttributeNames() { return {}; } 98 99 static void build(OpBuilder &builder, OperationState &result, Value srcMemRef, 100 AffineMap srcMap, ValueRange srcIndices, Value destMemRef, 101 AffineMap dstMap, ValueRange destIndices, Value tagMemRef, 102 AffineMap tagMap, ValueRange tagIndices, Value numElements, 103 Value stride = nullptr, Value elementsPerStride = nullptr); 104 105 /// Returns the operand index of the source memref. 106 unsigned getSrcMemRefOperandIndex() { return 0; } 107 108 /// Returns the source MemRefType for this DMA operation. 109 Value getSrcMemRef() { return getOperand(getSrcMemRefOperandIndex()); } 110 OpOperand &getSrcMemRefMutable() { 111 return getOperation()->getOpOperand(getSrcMemRefOperandIndex()); 112 } 113 MemRefType getSrcMemRefType() { 114 return cast<MemRefType>(getSrcMemRef().getType()); 115 } 116 117 /// Returns the rank (number of indices) of the source MemRefType. 118 unsigned getSrcMemRefRank() { return getSrcMemRefType().getRank(); } 119 120 /// Returns the affine map used to access the source memref. 121 AffineMap getSrcMap() { return getSrcMapAttr().getValue(); } 122 AffineMapAttr getSrcMapAttr() { 123 return cast<AffineMapAttr>( 124 *(*this)->getInherentAttr(getSrcMapAttrStrName())); 125 } 126 127 /// Returns the source memref affine map indices for this DMA operation. 128 operand_range getSrcIndices() { 129 return {operand_begin() + getSrcMemRefOperandIndex() + 1, 130 operand_begin() + getSrcMemRefOperandIndex() + 1 + 131 getSrcMap().getNumInputs()}; 132 } 133 134 /// Returns the memory space of the source memref. 135 unsigned getSrcMemorySpace() { 136 return cast<MemRefType>(getSrcMemRef().getType()).getMemorySpaceAsInt(); 137 } 138 139 /// Returns the operand index of the destination memref. 140 unsigned getDstMemRefOperandIndex() { 141 return getSrcMemRefOperandIndex() + 1 + getSrcMap().getNumInputs(); 142 } 143 144 /// Returns the destination MemRefType for this DMA operation. 145 Value getDstMemRef() { return getOperand(getDstMemRefOperandIndex()); } 146 OpOperand &getDstMemRefMutable() { 147 return getOperation()->getOpOperand(getDstMemRefOperandIndex()); 148 } 149 MemRefType getDstMemRefType() { 150 return cast<MemRefType>(getDstMemRef().getType()); 151 } 152 153 /// Returns the rank (number of indices) of the destination MemRefType. 154 unsigned getDstMemRefRank() { 155 return cast<MemRefType>(getDstMemRef().getType()).getRank(); 156 } 157 158 /// Returns the memory space of the source memref. 159 unsigned getDstMemorySpace() { 160 return cast<MemRefType>(getDstMemRef().getType()).getMemorySpaceAsInt(); 161 } 162 163 /// Returns the affine map used to access the destination memref. 164 AffineMap getDstMap() { return getDstMapAttr().getValue(); } 165 AffineMapAttr getDstMapAttr() { 166 return cast<AffineMapAttr>( 167 *(*this)->getInherentAttr(getDstMapAttrStrName())); 168 } 169 170 /// Returns the destination memref indices for this DMA operation. 171 operand_range getDstIndices() { 172 return {operand_begin() + getDstMemRefOperandIndex() + 1, 173 operand_begin() + getDstMemRefOperandIndex() + 1 + 174 getDstMap().getNumInputs()}; 175 } 176 177 /// Returns the operand index of the tag memref. 178 unsigned getTagMemRefOperandIndex() { 179 return getDstMemRefOperandIndex() + 1 + getDstMap().getNumInputs(); 180 } 181 182 /// Returns the Tag MemRef for this DMA operation. 183 Value getTagMemRef() { return getOperand(getTagMemRefOperandIndex()); } 184 OpOperand &getTagMemRefMutable() { 185 return getOperation()->getOpOperand(getTagMemRefOperandIndex()); 186 } 187 MemRefType getTagMemRefType() { 188 return cast<MemRefType>(getTagMemRef().getType()); 189 } 190 191 /// Returns the rank (number of indices) of the tag MemRefType. 192 unsigned getTagMemRefRank() { 193 return cast<MemRefType>(getTagMemRef().getType()).getRank(); 194 } 195 196 /// Returns the affine map used to access the tag memref. 197 AffineMap getTagMap() { return getTagMapAttr().getValue(); } 198 AffineMapAttr getTagMapAttr() { 199 return cast<AffineMapAttr>( 200 *(*this)->getInherentAttr(getTagMapAttrStrName())); 201 } 202 203 /// Returns the tag memref indices for this DMA operation. 204 operand_range getTagIndices() { 205 return {operand_begin() + getTagMemRefOperandIndex() + 1, 206 operand_begin() + getTagMemRefOperandIndex() + 1 + 207 getTagMap().getNumInputs()}; 208 } 209 210 /// Returns the number of elements being transferred by this DMA operation. 211 Value getNumElements() { 212 return getOperand(getTagMemRefOperandIndex() + 1 + 213 getTagMap().getNumInputs()); 214 } 215 216 /// Impelements the AffineMapAccessInterface. 217 /// Returns the AffineMapAttr associated with 'memref'. 218 NamedAttribute getAffineMapAttrForMemRef(Value memref) { 219 if (memref == getSrcMemRef()) 220 return {StringAttr::get(getContext(), getSrcMapAttrStrName()), 221 getSrcMapAttr()}; 222 if (memref == getDstMemRef()) 223 return {StringAttr::get(getContext(), getDstMapAttrStrName()), 224 getDstMapAttr()}; 225 assert(memref == getTagMemRef() && 226 "DmaStartOp expected source, destination or tag memref"); 227 return {StringAttr::get(getContext(), getTagMapAttrStrName()), 228 getTagMapAttr()}; 229 } 230 231 /// Returns true if this is a DMA from a faster memory space to a slower one. 232 bool isDestMemorySpaceFaster() { 233 return (getSrcMemorySpace() < getDstMemorySpace()); 234 } 235 236 /// Returns true if this is a DMA from a slower memory space to a faster one. 237 bool isSrcMemorySpaceFaster() { 238 // Assumes that a lower number is for a slower memory space. 239 return (getDstMemorySpace() < getSrcMemorySpace()); 240 } 241 242 /// Given a DMA start operation, returns the operand position of either the 243 /// source or destination memref depending on the one that is at the higher 244 /// level of the memory hierarchy. Asserts failure if neither is true. 245 unsigned getFasterMemPos() { 246 assert(isSrcMemorySpaceFaster() || isDestMemorySpaceFaster()); 247 return isSrcMemorySpaceFaster() ? 0 : getDstMemRefOperandIndex(); 248 } 249 250 void 251 getEffects(SmallVectorImpl<SideEffects::EffectInstance<MemoryEffects::Effect>> 252 &effects); 253 254 static StringRef getSrcMapAttrStrName() { return "src_map"; } 255 static StringRef getDstMapAttrStrName() { return "dst_map"; } 256 static StringRef getTagMapAttrStrName() { return "tag_map"; } 257 258 static StringRef getOperationName() { return "affine.dma_start"; } 259 static ParseResult parse(OpAsmParser &parser, OperationState &result); 260 void print(OpAsmPrinter &p); 261 LogicalResult verifyInvariantsImpl(); 262 LogicalResult verifyInvariants() { return verifyInvariantsImpl(); } 263 LogicalResult fold(ArrayRef<Attribute> cstOperands, 264 SmallVectorImpl<OpFoldResult> &results); 265 266 /// Returns true if this DMA operation is strided, returns false otherwise. 267 bool isStrided() { 268 return getNumOperands() != 269 getTagMemRefOperandIndex() + 1 + getTagMap().getNumInputs() + 1; 270 } 271 272 /// Returns the stride value for this DMA operation. 273 Value getStride() { 274 if (!isStrided()) 275 return nullptr; 276 return getOperand(getNumOperands() - 1 - 1); 277 } 278 279 /// Returns the number of elements to transfer per stride for this DMA op. 280 Value getNumElementsPerStride() { 281 if (!isStrided()) 282 return nullptr; 283 return getOperand(getNumOperands() - 1); 284 } 285 }; 286 287 /// AffineDmaWaitOp blocks until the completion of a DMA operation associated 288 /// with the tag element '%tag[%index]'. %tag is a memref, and %index has to be 289 /// an index with the same restrictions as any load/store index. In particular, 290 /// index for each memref dimension must be an affine expression of loop 291 /// induction variables and symbols. %num_elements is the number of elements 292 /// associated with the DMA operation. For example: 293 // 294 // affine.dma_start %src[%i, %j], %dst[%k, %l], %tag[%index], %num_elements : 295 // memref<2048xf32, 0>, memref<256xf32, 1>, memref<1xi32, 2> 296 // ... 297 // ... 298 // affine.dma_wait %tag[%index], %num_elements : memref<1xi32, 2> 299 // 300 class AffineDmaWaitOp 301 : public Op<AffineDmaWaitOp, OpTrait::MemRefsNormalizable, 302 OpTrait::VariadicOperands, OpTrait::ZeroResults, 303 OpTrait::OpInvariants, AffineMapAccessInterface::Trait> { 304 public: 305 using Op::Op; 306 static ArrayRef<StringRef> getAttributeNames() { return {}; } 307 308 static void build(OpBuilder &builder, OperationState &result, Value tagMemRef, 309 AffineMap tagMap, ValueRange tagIndices, Value numElements); 310 311 static StringRef getOperationName() { return "affine.dma_wait"; } 312 313 /// Returns the Tag MemRef associated with the DMA operation being waited on. 314 Value getTagMemRef() { return getOperand(0); } 315 OpOperand &getTagMemRefMutable() { return getOperation()->getOpOperand(0); } 316 MemRefType getTagMemRefType() { 317 return cast<MemRefType>(getTagMemRef().getType()); 318 } 319 320 /// Returns the affine map used to access the tag memref. 321 AffineMap getTagMap() { return getTagMapAttr().getValue(); } 322 AffineMapAttr getTagMapAttr() { 323 return cast<AffineMapAttr>( 324 *(*this)->getInherentAttr(getTagMapAttrStrName())); 325 } 326 327 /// Returns the tag memref index for this DMA operation. 328 operand_range getTagIndices() { 329 return {operand_begin() + 1, 330 operand_begin() + 1 + getTagMap().getNumInputs()}; 331 } 332 333 /// Returns the rank (number of indices) of the tag memref. 334 unsigned getTagMemRefRank() { 335 return cast<MemRefType>(getTagMemRef().getType()).getRank(); 336 } 337 338 /// Impelements the AffineMapAccessInterface. Returns the AffineMapAttr 339 /// associated with 'memref'. 340 NamedAttribute getAffineMapAttrForMemRef(Value memref) { 341 assert(memref == getTagMemRef()); 342 return {StringAttr::get(getContext(), getTagMapAttrStrName()), 343 getTagMapAttr()}; 344 } 345 346 /// Returns the number of elements transferred by the associated DMA op. 347 Value getNumElements() { return getOperand(1 + getTagMap().getNumInputs()); } 348 349 static StringRef getTagMapAttrStrName() { return "tag_map"; } 350 static ParseResult parse(OpAsmParser &parser, OperationState &result); 351 void print(OpAsmPrinter &p); 352 LogicalResult verifyInvariantsImpl(); 353 LogicalResult verifyInvariants() { return verifyInvariantsImpl(); } 354 LogicalResult fold(ArrayRef<Attribute> cstOperands, 355 SmallVectorImpl<OpFoldResult> &results); 356 void 357 getEffects(SmallVectorImpl<SideEffects::EffectInstance<MemoryEffects::Effect>> 358 &effects); 359 }; 360 361 /// Returns true if the given Value can be used as a dimension id in the region 362 /// of the closest surrounding op that has the trait `AffineScope`. 363 bool isValidDim(Value value); 364 365 /// Returns true if the given Value can be used as a dimension id in `region`, 366 /// i.e., for all its uses in `region`. 367 bool isValidDim(Value value, Region *region); 368 369 /// Returns true if the given value can be used as a symbol in the region of the 370 /// closest surrounding op that has the trait `AffineScope`. 371 bool isValidSymbol(Value value); 372 373 /// Returns true if the given Value can be used as a symbol for `region`, i.e., 374 /// for all its uses in `region`. 375 bool isValidSymbol(Value value, Region *region); 376 377 /// Parses dimension and symbol list. `numDims` is set to the number of 378 /// dimensions in the list parsed. 379 ParseResult parseDimAndSymbolList(OpAsmParser &parser, 380 SmallVectorImpl<Value> &operands, 381 unsigned &numDims); 382 383 /// Modifies both `map` and `operands` in-place so as to: 384 /// 1. drop duplicate operands 385 /// 2. drop unused dims and symbols from map 386 /// 3. promote valid symbols to symbolic operands in case they appeared as 387 /// dimensional operands 388 /// 4. propagate constant operands and drop them 389 void canonicalizeMapAndOperands(AffineMap *map, 390 SmallVectorImpl<Value> *operands); 391 392 /// Canonicalizes an integer set the same way canonicalizeMapAndOperands does 393 /// for affine maps. 394 void canonicalizeSetAndOperands(IntegerSet *set, 395 SmallVectorImpl<Value> *operands); 396 397 /// Returns a composed AffineApplyOp by composing `map` and `operands` with 398 /// other AffineApplyOps supplying those operands. The operands of the resulting 399 /// AffineApplyOp do not change the length of AffineApplyOp chains. 400 AffineApplyOp makeComposedAffineApply(OpBuilder &b, Location loc, AffineMap map, 401 ArrayRef<OpFoldResult> operands); 402 AffineApplyOp makeComposedAffineApply(OpBuilder &b, Location loc, AffineExpr e, 403 ArrayRef<OpFoldResult> operands); 404 405 /// Constructs an AffineApplyOp that applies `map` to `operands` after composing 406 /// the map with the maps of any other AffineApplyOp supplying the operands, 407 /// then immediately attempts to fold it. If folding results in a constant 408 /// value, no ops are actually created. The `map` must be a single-result affine 409 /// map. 410 OpFoldResult makeComposedFoldedAffineApply(OpBuilder &b, Location loc, 411 AffineMap map, 412 ArrayRef<OpFoldResult> operands); 413 /// Variant of `makeComposedFoldedAffineApply` that applies to an expression. 414 OpFoldResult makeComposedFoldedAffineApply(OpBuilder &b, Location loc, 415 AffineExpr expr, 416 ArrayRef<OpFoldResult> operands); 417 /// Variant of `makeComposedFoldedAffineApply` suitable for multi-result maps. 418 /// Note that this may create as many affine.apply operations as the map has 419 /// results given that affine.apply must be single-result. 420 SmallVector<OpFoldResult> makeComposedFoldedMultiResultAffineApply( 421 OpBuilder &b, Location loc, AffineMap map, ArrayRef<OpFoldResult> operands); 422 423 /// Returns an AffineMinOp obtained by composing `map` and `operands` with 424 /// AffineApplyOps supplying those operands. 425 AffineMinOp makeComposedAffineMin(OpBuilder &b, Location loc, AffineMap map, 426 ArrayRef<OpFoldResult> operands); 427 428 /// Constructs an AffineMinOp that computes a minimum across the results of 429 /// applying `map` to `operands`, then immediately attempts to fold it. If 430 /// folding results in a constant value, no ops are actually created. 431 OpFoldResult makeComposedFoldedAffineMin(OpBuilder &b, Location loc, 432 AffineMap map, 433 ArrayRef<OpFoldResult> operands); 434 435 /// Constructs an AffineMinOp that computes a maximum across the results of 436 /// applying `map` to `operands`, then immediately attempts to fold it. If 437 /// folding results in a constant value, no ops are actually created. 438 OpFoldResult makeComposedFoldedAffineMax(OpBuilder &b, Location loc, 439 AffineMap map, 440 ArrayRef<OpFoldResult> operands); 441 442 /// Given an affine map `map` and its input `operands`, this method composes 443 /// into `map`, maps of AffineApplyOps whose results are the values in 444 /// `operands`, iteratively until no more of `operands` are the result of an 445 /// AffineApplyOp. When this function returns, `map` becomes the composed affine 446 /// map, and each Value in `operands` is guaranteed to be either a loop IV or a 447 /// terminal symbol, i.e., a symbol defined at the top level or a block/function 448 /// argument. 449 void fullyComposeAffineMapAndOperands(AffineMap *map, 450 SmallVectorImpl<Value> *operands); 451 452 } // namespace affine 453 } // namespace mlir 454 455 #include "mlir/Dialect/Affine/IR/AffineOpsDialect.h.inc" 456 457 #define GET_OP_CLASSES 458 #include "mlir/Dialect/Affine/IR/AffineOps.h.inc" 459 460 namespace mlir { 461 namespace affine { 462 463 /// Returns true if the provided value is the induction variable of an 464 /// AffineForOp. 465 bool isAffineForInductionVar(Value val); 466 467 /// Returns true if `val` is the induction variable of an AffineParallelOp. 468 bool isAffineParallelInductionVar(Value val); 469 470 /// Returns true if the provided value is the induction variable of an 471 /// AffineForOp or AffineParallelOp. 472 bool isAffineInductionVar(Value val); 473 474 /// Returns the loop parent of an induction variable. If the provided value is 475 /// not an induction variable, then return nullptr. 476 AffineForOp getForInductionVarOwner(Value val); 477 478 /// Returns true if the provided value is among the induction variables of an 479 /// AffineParallelOp. 480 AffineParallelOp getAffineParallelInductionVarOwner(Value val); 481 482 /// Extracts the induction variables from a list of AffineForOps and places them 483 /// in the output argument `ivs`. 484 void extractForInductionVars(ArrayRef<AffineForOp> forInsts, 485 SmallVectorImpl<Value> *ivs); 486 487 /// Extracts the induction variables from a list of either AffineForOp or 488 /// AffineParallelOp and places them in the output argument `ivs`. 489 void extractInductionVars(ArrayRef<Operation *> affineOps, 490 SmallVectorImpl<Value> &ivs); 491 492 /// Builds a perfect nest of affine.for loops, i.e., each loop except the 493 /// innermost one contains only another loop and a terminator. The loops iterate 494 /// from "lbs" to "ubs" with "steps". The body of the innermost loop is 495 /// populated by calling "bodyBuilderFn" and providing it with an OpBuilder, a 496 /// Location and a list of loop induction variables. 497 void buildAffineLoopNest(OpBuilder &builder, Location loc, 498 ArrayRef<int64_t> lbs, ArrayRef<int64_t> ubs, 499 ArrayRef<int64_t> steps, 500 function_ref<void(OpBuilder &, Location, ValueRange)> 501 bodyBuilderFn = nullptr); 502 void buildAffineLoopNest(OpBuilder &builder, Location loc, ValueRange lbs, 503 ValueRange ubs, ArrayRef<int64_t> steps, 504 function_ref<void(OpBuilder &, Location, ValueRange)> 505 bodyBuilderFn = nullptr); 506 507 /// AffineBound represents a lower or upper bound in the for operation. 508 /// This class does not own the underlying operands. Instead, it refers 509 /// to the operands stored in the AffineForOp. Its life span should not exceed 510 /// that of the for operation it refers to. 511 class AffineBound { 512 public: 513 AffineForOp getAffineForOp() { return op; } 514 AffineMap getMap() { return map; } 515 516 unsigned getNumOperands() { return operands.size(); } 517 Value getOperand(unsigned idx) { 518 return op.getOperand(operands.getBeginOperandIndex() + idx); 519 } 520 521 using operand_iterator = AffineForOp::operand_iterator; 522 using operand_range = AffineForOp::operand_range; 523 524 operand_iterator operandBegin() { return operands.begin(); } 525 operand_iterator operandEnd() { return operands.end(); } 526 operand_range getOperands() { return {operandBegin(), operandEnd()}; } 527 528 private: 529 // 'affine.for' operation that contains this bound. 530 AffineForOp op; 531 // Operands of the affine map. 532 OperandRange operands; 533 // Affine map for this bound. 534 AffineMap map; 535 536 AffineBound(AffineForOp op, OperandRange operands, AffineMap map) 537 : op(op), operands(operands), map(map) {} 538 539 friend class AffineForOp; 540 }; 541 542 } // namespace affine 543 } // namespace mlir 544 545 #endif 546