Lines Matching defs:memref
51 /// which fuses loop nests with single-writer/single-reader memref dependences
54 // memrefs, where each memref can have multiple users (if profitable).
110 // escaping memref, we can only remove it if the fusion slice is maximal so
180 /// A memref escapes in the context of the fusion pass if either:
184 /// (e.g., by call op, memref load/store ops, alias creating ops, unknown ops,
185 /// terminator ops, etc.); such ops do not deference the memref in an affine
187 static bool isEscapingMemref(Value memref, Block *block) {
188 Operation *defOp = memref.getDefiningOp();
189 // Check if 'memref' is a block argument.
193 // Check if this is defined to be an alias of another memref.
199 if (!hasSingleEffect<mlir::MemoryEffects::Allocate>(defOp, memref))
202 // Check if 'memref' is used by a non-deferencing op (including unknown ones)
204 return llvm::any_of(memref.getUsers(), [&](Operation *user) {
221 auto memref = cast<AffineWriteOpInterface>(storeOp).getMemRef();
222 if (escapingMemRefs.count(memref))
224 if (isEscapingMemref(memref, &mdg->block))
225 escapingMemRefs.insert(memref);
240 // Creates and returns a private (single-user) memref for fused loop rooted
241 // at 'forOp', with (potentially reduced) memref size based on the
255 // Create new memref type based on slice bounds.
264 assert(validRegion && "unexpected memref region failure");
312 // Create new private memref for fused loop 'forOp'. 'newShape' is always
318 Value newMemRef = top.create<memref::AllocOp>(forOp.getLoc(), newMemRefType);
346 /// Returns true if there are any non-affine uses of `memref` in any of
348 /// than affine read/write are treated as non-affine uses of `memref`.
350 Value memref) {
354 // Check if there is a non-affine memref user in any op between `start` and
356 return llvm::any_of(memref.getUsers(), [&](Operation *user) {
365 /// Check whether a memref value used in any operation of 'src' has a
368 /// Any other than affine read/write are treated as non-affine uses of memref.
376 // Collect relevant memref values.
380 // Collect memref values only.
386 return llvm::any_of(memRefValues, [&](Value memref) {
387 return hasNonAffineUsersOnPath(src, end, memref);
394 // the memref being produced and consumed, which is an input to the cost model.
398 // same memref as dst loop nest load ops, and 'srcStoreOpInst' will be the
540 // each dimension, so that we are sure they are covering the same memref
653 // input-reuse relationship on a memref, with the goal of improving locality.
664 // memref.
680 // loads from the same memref, but which has no dependence paths to/from.
685 // This function also checks that the memref write region of 'sibLoopNest',
761 /// Returns true if a private memref can be created for `memref` given
763 bool canCreatePrivateMemRef(Value memref,
768 // If `memref` is an escaping one, do not create a private memref
770 // memref unmodified as all the writes originally meant for the
771 // escaping memref would be performed on the private memref:
774 // 2. The destination writes to `memref`.
775 if (srcEscapingMemRefs.count(memref) > 0 &&
776 (removeSrcNode || consumerNode->getStoreOpCount(memref) > 0))
779 // Don't create a private memref if 'srcNode' has in edges on
780 // 'memref' or 'dstNode' has out edges on 'memref'.
781 if (mdg->getIncomingMemRefAccesses(producerId, memref) > 0 ||
782 mdg->getOutEdgeCount(consumerId, memref) > 0)
785 // If 'srcNode' will be removed but it has out edges on 'memref' to
787 // cannot create a private memref.
790 return edge.value == memref && edge.id != consumerId;
853 // Skip if 'srcNode' out edge count on any memref is greater than
855 if (any_of(producerConsumerMemrefs, [&](Value memref) {
856 return mdg->getOutEdgeCount(srcNode->id, memref) >
862 // block (e.g., memref block arguments, returned memrefs,
975 for (Value memref : producerConsumerMemrefs) {
976 if (canCreatePrivateMemRef(memref, srcEscapingMemRefs, srcId, dstId,
978 // Create a private version of this memref.
980 << "Creating private memref for " << memref << '\n');
981 // Create a private version of this memref.
982 privateMemrefs.insert(memref);
1018 // TODO: Use union of memref write regions to compute
1019 // private memref footprint.
1036 // Collect dst loop stats after memref privatization transformation.
1101 Value memref = idAndMemref.second;
1103 // stores to the same memref in 'sibNode' loop nest.
1117 // Get unique 'sibNode' load op to 'memref'.
1119 sibNode->getLoadOpsForMemref(memref, &sibLoadOpInsts);
1124 // Gather 'dstNode' load ops to 'memref'.
1126 dstNode->getLoadOpsForMemref(memref, &dstLoadOpInsts);
1146 FusionStrategy strategy(memref);
1200 // but which loads from the same memref. Returns true and sets
1206 // on 'memref'.
1207 auto canFuseWithSibNode = [&](Node *sibNode, Value memref) {
1210 if (sibNode->getLoadOpCount(memref) != 1)
1217 // Skip sib node if it loads to (and stores from) the same memref on
1221 if (llvm::any_of(loadAndStoreMemrefSet, [=](Value memref) {
1222 return mdg->getIncomingMemRefAccesses(sibNode->id, memref) > 0;
1226 // Check that all stores are to the same memref if any.
1235 // Skip if a memref value in one node is used by a non-affine memref
1243 // Search for siblings which load the same memref block argument.
1270 // Skip 'use' if it does not load from the same memref as 'dstNode'.
1271 auto memref = loadOp.getMemRef();
1272 if (dstNode->getLoadOpCount(memref) == 0)
1274 // Check if 'sibNode/dstNode' can be input-reuse fused on 'memref'.
1275 if (canFuseWithSibNode(sibNode, memref)) {
1278 idAndMemrefToFuse->second = memref;
1305 // Skip output edge if not a sibling using the same memref.
1311 // Check if 'sibNode/dstNode' can be input-reuse fused on 'memref'.
1335 // Collect dst loop stats after memref privatization transformation.
1344 // edges, and it does not write to a memref which escapes the block.
1357 auto memref = pair.first;
1359 if (!memref.use_empty())
1362 auto *op = memref.getDefiningOp();
1363 if (isa_and_nonnull<memref::AllocOp>(op))