Lines Matching defs:tensor
9 // This file implements converting sparse tensor types to actual sparse code.
106 /// based codegen. It finds the dependent index set for all tensor levels in the
110 /// merger between (tensor, level) pairs and their dependent index variable set:
119 static bool findDepIdxSet(Merger &merger, TensorId tensor, Level lvl,
129 if (!isUndefLT(merger.getLvlType(tensor, idx)))
137 merger.setLevelAndType(tensor, idx, lvl, lt);
142 // same tensor. We can not handle this case. e.g., A[i+j][i+k], `i` is
144 if (merger.hasDependentLvl(idx, tensor)) {
146 // appeared on affine index for different tensor, or take slice on
147 // multiple dimensions when it is on the same tensor.
156 merger.setLoopDependentTensorLevel(idx, tensor, lvl, lt, coefficient);
178 return findDepIdxSet(merger, tensor, lvl, rhs, lt, isSubExp, coefficient);
182 return findDepIdxSet(merger, tensor, lvl, binOp.getLHS(), lt, true) &&
183 findDepIdxSet(merger, tensor, lvl, binOp.getRHS(), lt, true);
191 /// `getMatchingIndexingMap` for the given tensor. For the following inputs:
198 Value tensor) {
199 // The `tensor` is not guaranteed to have `RankedTensorType`, therefore
203 const auto rtp = dyn_cast<RankedTensorType>(tensor.getType());
239 /// Helper method to inspect sparse encodings in the tensor types.
265 // If then current tensor being inspected requires affine index, it need
298 /// Generates buffer for the output tensor.
306 Value tensor) -> Value {
307 // Must not be a sparse tensor.
308 assert(!getSparseTensorEncoding(tensor.getType()));
309 // Two output tensor references should point to the same object.
311 assert(lhs->get() == tensor);
312 // An output tensor can simply materialize from the buffer of the tensor
324 getElementTypeOrSelf(tensor.getType()));
336 /// Generates index for load/store on sparse tensor.
348 /// Generates subscript for load/store on a dense or sparse tensor.
360 // Simply returns the tensor to extract value using iterators.
376 /// Generates insertion code to implement dynamic tensor load.
381 // Direct lexicographic coordinate order, tensor loads as zero.
391 /// Generates insertion code to implement dynamic tensor load for reduction.
397 // Direct lexicographic coordinate order, tensor loads as identity.
415 Value res = builder.create<tensor::InsertOp>(loc, v, sparseOut, ivs);
425 /// Generates insertion code to implement dynamic tensor store.
456 sparseOut = builder.create<tensor::InsertOp>(loc, rhs, chain, ivs);
498 /// Generates a load on a dense or sparse tensor.
504 // Get tensor operand.
507 OpOperand *t = &op->getOpOperand(env.exp(exp).tensor);
508 // Fold binary-valued tensor into explicit value.
531 /// Generates a store on a dense or sparse tensor.
598 // into dense tensor loads. Note that we should not encounter
627 /// Recursively generates tensor expression.
686 /// Hoists loop invariant tensor loads for which indices have been exhausted.
692 // Inspect tensor indices.
694 OpOperand &t = op->getOpOperand(env.exp(exp).tensor);
732 // Start or end loop invariant hoisting of a tensor load.
743 // tensor loads, since subsequent MLIR/LLVM passes know how to
766 // scheme, we can use the original tensor as incoming SSA value (which
769 Value tensor = lhs->get();
773 Type etp = cast<ShapedType>(tensor.getType()).getElementType();
778 auto r = builder.create<ExpandOp>(loc, TypeRange({t1, t2, t3, t4}), tensor);
805 // Parallel loops on tensor expansion can cause data races.
831 // Queries the LT based on the tensor and loop id, as requested by
840 /// Emit a loop to coiterate over the list of tensor levels. The generated loop
940 // expression), we need to reconstruct the tensor level types if this
1025 // generate a dense loop according to the synthetic tensor (for
1026 // invariants and sparse output tensor).
1033 // The level of the synthetic tensor is the current loop depth;
1034 // the rank of the synthetic tensor equals to number of loops.
1038 // Skips invalid lvl (e.g., when this is a zero ranked tensor).
1090 // Note that we generate dense indices of the output tensor unconditionally,
1102 // synthetic tensor.
1122 // Emit access pattern expansion for sparse tensor output.
1129 // TODO: remove this! The same tensor level might be added for multiple
1130 // times due to the special handling for all-dense "sparse" output tensor
1152 // TODO: Handle affine expression on output tensor.
1264 // Finalize access pattern expansion for sparse tensor output.
1275 // At each leaf, assign remaining tensor (sub)expression to output tensor.
1367 Value tensor = lhs->get();
1368 Type resType = tensor.getType();
1370 // The sparse tensor rematerializes from the original sparse tensor's
1372 // tensor materializes from the chain with 'hasInserts' enabled.
1376 tensor = chain;
1378 rewriter.replaceOpWithNewOp<LoadOp>(op, resType, tensor, hasInserts);
1380 // To rematerialize an non-annotated tensor, simply load it
1401 // Only accept single output operations with pure tensor semantics.
1426 // TODO: Constant indices are currently not support on sparse tensor, but
1427 // are allowed in non-annotated dense tensor. Support it, it would be
1428 // required for sparse tensor slice rank reducing too.
1460 // Constructs the tensor expressions tree from `op`, returns failure if the
1461 // tree can not be built or the tensor expression is inadmissible.