/llvm-project/mlir/lib/Dialect/Affine/Transforms/ |
H A D | LoopUnroll.cpp | 87 f.walk([&](AffineForOp forOp) { in gatherInnermostLoops() 105 getOperation().walk([&](AffineForOp forOp) { in runOnOperation() 110 for (auto forOp : loops) in runOnOperation() local 123 for (auto forOp : loops) in runOnOperation() local 133 LogicalResult LoopUnroll::runOnAffineForOp(AffineForOp forOp) { in runOnAffineForOp()
|
H A D | PipelineDataTransfer.cpp | 77 static bool doubleBuffer(Value oldMemRef, AffineForOp forOp) { in doubleBuffer() argument 145 __anon46c09dec0302(AffineForOp forOp) runOnOperation() argument 146 for (auto forOp : forOps) runOnOperation() local 175 findMatchingStartFinishInsts(AffineForOp forOp,SmallVectorImpl<std::pair<Operation *,Operation * >> & startWaitPairs) findMatchingStartFinishInsts() argument 246 runOnAffineForOp(AffineForOp forOp) runOnAffineForOp() argument [all...] |
H A D | AffineDataCopyGeneration.cpp | 138 AffineForOp forOp; in runOnBlock() local 146 auto exceedsCapacity = [&](AffineForOp forOp) { in runOnBlock() argument 227 if (auto forOp = dyn_cast<AffineForOp>(op)) runOnOperation() local [all...] |
H A D | LoopUnrollAndJam.cpp | 93 if (auto forOp = dyn_cast<AffineForOp>(entryBlock.front())) in runOnOperation() local
|
H A D | AffineLoopInvariantCodeMotion.cpp | 87 } else if (auto forOp = dyn_cast<AffineForOp>(op)) { isOpLoopInvariant() local 204 runOnAffineForOp(AffineForOp forOp) runOnAffineForOp() argument
|
/llvm-project/mlir/lib/Dialect/SCF/Transforms/ |
H A D | LoopCanonicalization.cpp | 38 static bool isShapePreserving(ForOp forOp, int64_t arg) { in isShapePreserving() argument 53 .template Case<ForOp>([&](ForOp forOp) { in isShapePreserving() argument 95 auto forOp = dyn_cast<ForOp>(blockArg.getParentBlock()->getParentOp()); in matchAndRewrite() local 137 auto forOp = dimOp.getSource().template getDefiningOp<scf::ForOp>(); matchAndRewrite() local [all...] |
H A D | LoopSpecialization.cpp | 119 static LogicalResult peelForLoop(RewriterBase &b, ForOp forOp, in peelForLoop() argument 169 rewriteAffineOpAfterPeeling(RewriterBase & rewriter,ForOp forOp,ForOp partialIteration,Value previousUb) rewriteAffineOpAfterPeeling() argument 196 peelForLoopAndSimplifyBounds(RewriterBase & rewriter,ForOp forOp,ForOp & partialIteration) peelForLoopAndSimplifyBounds() argument 215 peelForLoopFirstIteration(RewriterBase & b,ForOp forOp,ForOp & firstIteration) peelForLoopFirstIteration() argument [all...] |
H A D | BufferizableOpInterfaceImpl.cpp | 581 mayHaveZeroIterations(scf::ForOp forOp) mayHaveZeroIterations() argument 596 auto forOp = cast<scf::ForOp>(op); bufferizesToMemoryRead() local 616 auto forOp = cast<scf::ForOp>(op); getAliasingValues() local 627 auto forOp = cast<scf::ForOp>(op); bufferRelation() local 660 auto forOp = cast<scf::ForOp>(op); resolveConflicts() local 699 auto forOp = cast<scf::ForOp>(op); getBufferType() local 724 auto forOp = cast<scf::ForOp>(op); bufferize() local 791 auto forOp = cast<scf::ForOp>(op); verifyAnalysis() local [all...] |
/llvm-project/mlir/include/mlir/Dialect/SCF/Transforms/ |
H A D | Patterns.h | 29 LogicalResult matchAndRewrite(ForOp forOp, in matchAndRewrite() argument 34 FailureOr<ForOp> returningMatchAndRewrite(ForOp forOp, in returningMatchAndRewrite() argument
|
/llvm-project/mlir/lib/Conversion/SCFToGPU/ |
H A D | SCFToGPU.cpp | 76 static Operation::operand_range getLowerBoundOperands(AffineForOp forOp) { in getLowerBoundOperands() argument 81 static Operation::operand_range getUpperBoundOperands(AffineForOp forOp) { in getUpperBoundOperands() argument 87 static Value getOrCreateStep(AffineForOp forOp, OpBuilder &builder) { in getOrCreateStep() argument 94 static Value getOrEmitLowerBound(AffineForOp forOp, OpBuilder &builder) { in getOrEmitLowerBound() argument 100 static Value getOrEmitUpperBound(AffineForOp forOp, OpBuilde argument 110 checkAffineLoopNestMappableImpl(AffineForOp forOp,unsigned numDims) checkAffineLoopNestMappableImpl() argument 136 checkAffineLoopNestMappable(AffineForOp forOp,unsigned numBlockDims,unsigned numThreadDims) checkAffineLoopNestMappable() argument 180 collectBounds(AffineForOp forOp,unsigned numLoops) collectBounds() argument 281 convertAffineLoopNestToGPULaunch(AffineForOp forOp,unsigned numBlockDims,unsigned numThreadDims) convertAffineLoopNestToGPULaunch() argument 297 convertAffineLoopNestToGPULaunch(AffineForOp forOp,unsigned numBlockDims,unsigned numThreadDims) convertAffineLoopNestToGPULaunch() argument [all...] |
H A D | SCFToGPUPass.cpp | 45 if (auto forOp = dyn_cast<affine::AffineForOp>(&op)) { in runOnOperation() local
|
/llvm-project/mlir/test/lib/Dialect/Affine/ |
H A D | TestLoopPermutation.cpp | 54 getOperation()->walk([&](AffineForOp forOp) { forOps.push_back(forOp); }); in runOnOperation() 56 for (auto forOp : forOps) { in runOnOperation() local
|
H A D | TestAccessAnalysis.cpp | 42 gatherLoadsAndStores(AffineForOp forOp, in gatherLoadsAndStores() 55 for (auto forOp : getOperation().getOps<AffineForOp>()) { in runOnOperation() local
|
H A D | TestAffineDataCopy.cpp | 115 if (auto forOp = dyn_cast<AffineForOp>(op)) in runOnOperation() local
|
/llvm-project/mlir/lib/Dialect/Affine/Utils/ |
H A D | LoopFusionUtils.cpp | 172 gatherLoadsAndStores(AffineForOp forOp, in gatherLoadsAndStores() 358 static LogicalResult promoteSingleIterReductionLoop(AffineForOp forOp, in promoteSingleIterReductionLoop() 440 auto forOp = getForInductionVarOwner(loopIV); in fuseLoops() local 460 for (AffineForOp forOp : sliceLoops) { in fuseLoops() local 477 auto walkResult = forOpRoot.walk([&](AffineForOp forOp) { in getLoopNestStats() 529 Operation *forOp, LoopNestStats &stats, in getComputeCostHelper() 564 int64_t mlir::affine::getComputeCost(AffineForOp forOp, LoopNestStats &stats) { in getComputeCost() 617 if (auto forOp = dyn_cast_or_null<AffineForOp>(user->getParentOp())) { in getFusionComputeCost() local
|
H A D | LoopUtils.cpp | 104 auto iterOperands = forOp argument 46 getCleanupLoopLowerBound(AffineForOp forOp,unsigned unrollFactor,AffineMap & cleanupLbMap,SmallVectorImpl<Value> & cleanupLbOperands) getCleanupLoopLowerBound() argument 120 promoteIfSingleIteration(AffineForOp forOp) promoteIfSingleIteration() argument 226 affineForOpBodySkew(AffineForOp forOp,ArrayRef<uint64_t> shifts,bool unrollPrologueEpilogue) affineForOpBodySkew() argument 882 loopUnrollFull(AffineForOp forOp) loopUnrollFull() argument 897 loopUnrollUpToFactor(AffineForOp forOp,uint64_t unrollFactor) loopUnrollUpToFactor() argument 970 generateCleanupLoopForUnroll(AffineForOp forOp,uint64_t unrollFactor) generateCleanupLoopForUnroll() argument 1006 loopUnrollByFactor(AffineForOp forOp,uint64_t unrollFactor,function_ref<void (unsigned,Operation *,OpBuilder)> annotateFn,bool cleanUpUnroll) loopUnrollByFactor() argument 1072 loopUnrollJamUpToFactor(AffineForOp forOp,uint64_t unrollJamFactor) loopUnrollJamUpToFactor() argument 1083 areInnerBoundsInvariant(AffineForOp forOp) areInnerBoundsInvariant() argument 1095 loopUnrollJamByFactor(AffineForOp forOp,uint64_t unrollJamFactor) loopUnrollJamByFactor() argument 1454 sinkSequentialLoops(AffineForOp forOp) sinkSequentialLoops() argument 1530 stripmineSink(AffineForOp forOp,uint64_t factor,ArrayRef<AffineForOp> targets) stripmineSink() argument 1573 stripmineSink(AffineForOp forOp,SizeType factor,AffineForOp target) stripmineSink() argument 1722 mapLoopToProcessorIds(scf::ForOp forOp,ArrayRef<Value> processorId,ArrayRef<Value> numProcessors) mapLoopToProcessorIds() argument 1864 auto forOp = createCanonicalizedAffineForOp(b, loc, lbOperands, lbMaps[d], generatePointWiseCopy() local 2437 AffineForOp forOp; affineDataCopyGenerate() local 2456 affineDataCopyGenerate(AffineForOp forOp,const AffineCopyOptions & copyOptions,std::optional<Value> filterMemRef,DenseSet<Operation * > & copyNests) affineDataCopyGenerate() argument 2499 if (auto forOp = dyn_cast<AffineForOp>(op)) { gatherLoopsInBlock() local [all...] |
/llvm-project/mlir/lib/Dialect/Affine/Analysis/ |
H A D | LoopAnalysis.cpp | 39 getTripCountMapAndOperands(AffineForOp forOp,AffineMap * tripCountMap,SmallVectorImpl<Value> * tripCountOperands) getTripCountMapAndOperands() argument 88 getConstantTripCount(AffineForOp forOp) getConstantTripCount() argument 114 getLargestDivisorOfTripCount(AffineForOp forOp) getLargestDivisorOfTripCount() argument 165 isInvariantAccess(LoadOrStoreOp memOp,AffineForOp forOp) isInvariantAccess() argument 255 auto *forOp = loop.getOperation(); isVectorizableLoopBodyWithOpCond() local 357 isOpwiseShiftValid(AffineForOp forOp,ArrayRef<uint64_t> shifts) isOpwiseShiftValid() argument [all...] |
H A D | AffineAnalysis.cpp | 43 static Value getSupportedReduction(AffineForOp forOp, unsigned pos, in getSupportedReduction() 86 AffineForOp forOp, SmallVectorImpl<LoopReduction> &supportedReductions) { in getSupportedReductions() 102 AffineForOp forOp, SmallVectorImpl<LoopReduction> *parallelReductions) { in isLoopParallel() 139 bool mlir::affine::isLoopMemoryParallel(AffineForOp forOp) { in isLoopMemoryParallel() 253 if (AffineForOp forOp = dyn_cast<AffineForOp>(op)) { in getIndexSet() local 268 if (AffineForOp forOp = dyn_cast<AffineForOp>(op)) { in getIndexSet() local 691 AffineForOp forOp, unsigned maxLoopDepth, in getDependenceComponents()
|
/llvm-project/mlir/lib/Dialect/Linalg/Transforms/ |
H A D | HoistPadding.cpp | 46 if (auto forOp = dyn_cast<scf::ForOp>(op)) { in debugPrintLoopInShortForm() local 353 for (scf::ForOp forOp : llvm::reverse(reverseEnclosingLoops)) in finalizeHoistPaddingAnalysis() local 420 if (auto forOp = dyn_cast<scf::ForOp>(op)) { in dropNonIndexDependencies() local 466 for (auto forOp : packingLoops) { in getHoistedPackedTensorSizes() local 515 buildLoopIterationCount(RewriterBase & rewriter,scf::ForOp outer,scf::ForOp forOp) buildLoopIterationCount() argument 557 auto forOp = dyn_cast<scf::ForOp>(bbArg.getOwner()->getParentOp()); buildPackingLoopNestImpl() local 581 auto forOp = dyn_cast<scf::ForOp>(op); buildPackingLoopNestImpl() local 657 auto forOp = scf::getForInductionVarOwner(iv); buildPackingLoopNestImpl() local 799 padThroughLoopIterArg(RewriterBase & rewriter,Value paddedValueBeforeHoisting,Value hoistedPackedTensor,tensor::ExtractSliceOp outerSliceOp,scf::ForOp forOp) padThroughLoopIterArg() argument 924 scf::ForOp forOp = analysis.padConsumingForOp; replaceByPackingResult() local [all...] |
/llvm-project/mlir/lib/Dialect/SCF/IR/ |
H A D | ValueBoundsOpInterfaceImpl.cpp | 77 // `value` is result of `forOp`, we can prove that: in populateBoundsForIndexValue() local 92 auto forOp = cast<ForOp>(op); populateBoundsForShapedValueDim() local [all...] |
/llvm-project/mlir/lib/Conversion/ArmSMEToSCF/ |
H A D | ArmSMEToSCF.cpp | 112 auto forOp = rewriter.create<scf::ForOp>(loc, lowerBound, upperBound, step, in createLoadStoreForOverTileSlices() local 205 auto forOp = createLoadStoreForOverTileSlices( in matchAndRewrite() local 302 auto forOp in matchAndRewrite() local [all...] |
/llvm-project/mlir/lib/Dialect/SparseTensor/Transforms/ |
H A D | SparseVectorization.cpp | 237 vectorizeSubscripts(PatternRewriter & rewriter,scf::ForOp forOp,VL vl,ValueRange subs,bool codegen,Value vmask,SmallVectorImpl<Value> & idxs) vectorizeSubscripts() argument 370 vectorizeExpr(PatternRewriter & rewriter,scf::ForOp forOp,VL vl,Value exp,bool codegen,Value vmask,Value & vexp) vectorizeExpr() argument 500 vectorizeStmt(PatternRewriter & rewriter,scf::ForOp forOp,VL vl,bool codegen) vectorizeStmt() argument 644 if (auto forOp = redOp.getVector().getDefiningOp<scf::ForOp>()) { matchAndRewrite() local [all...] |
/llvm-project/mlir/lib/Conversion/ArithToArmSME/ |
H A D | ArithToArmSME.cpp | 88 auto forOp = mlir::arm_sme::createLoopOverTileSlices( in matchAndRewrite() local
|
/llvm-project/mlir/lib/Conversion/VectorToArmSME/ |
H A D | VectorToArmSME.cpp | 249 auto forOp = in matchAndRewrite() local 309 auto forOp = in matchAndRewrite() local 652 auto forOp = rewriter.create<scf::ForOp>(loc, lowerBound, upperBound, step); matchAndRewrite() local
|
/llvm-project/mlir/test/lib/Dialect/SCF/ |
H A D | TestLoopUnrolling.cpp | 57 getOperation()->walk([&](scf::ForOp forOp) { in runOnOperation()
|