Lines Matching defs:tensor

28 using namespace mlir::tensor;
31 namespace tensor {
36 tensor::CastOp> {
55 auto castOp = cast<tensor::CastOp>(op);
63 // type in case the input is an unranked tensor type.
65 // Case 1: Casting an unranked tensor
67 // When casting to a ranked tensor, we cannot infer any static offset or
72 // Case 2: Casting to an unranked tensor type
77 // Case 3: Ranked tensor -> ranked tensor. The offsets and strides do not
87 auto castOp = cast<tensor::CastOp>(op);
117 /// Bufferization of tensor.collapse_shape. Replace with memref.collapse_shape.
120 tensor::CollapseShapeOp> {
123 // tensor.collapse_shape may reallocate, at which point the source buffer is
144 auto collapseShapeOp = cast<tensor::CollapseShapeOp>(op);
166 auto collapseShapeOp = cast<tensor::CollapseShapeOp>(op);
229 /// Bufferization of tensor.dim. Replace with memref.dim.
232 tensor::DimOp> {
235 // The op reads the tensor's metadata but not its contents.
251 auto dimOp = cast<tensor::DimOp>(op);
261 /// Bufferization of "tensor.empty". Replace with "bufferization.alloc_tensor".
264 tensor::EmptyOp> {
269 // The returned tensor does not have specified contents.
275 auto emptyOp = cast<tensor::EmptyOp>(op);
283 // Allocate a tensor. This emits a "bufferization.alloc_tensor" op.
293 /// Bufferization of tensor.expand_shape. Replace with memref.expand_shape.
296 tensor::ExpandShapeOp> {
299 // In contrast to tensor.collapse_shape, this op can always be bufferized
317 auto expandShapeOp = cast<tensor::ExpandShapeOp>(op);
333 auto expandShapeOp = cast<tensor::ExpandShapeOp>(op);
343 // memref.expand_shape op, use output_shape argument of tensor.expand_shape
352 /// Bufferization of tensor.extract_slice. Replace with memref.subview.
355 tensor::ExtractSliceOp> {
373 auto extractSliceOp = cast<tensor::ExtractSliceOp>(op);
401 auto extractSliceOp = cast<tensor::ExtractSliceOp>(op);
417 /// Bufferization of tensor.extract. Replace with memref.load.
420 tensor::ExtractOp> {
438 auto extractOp = cast<tensor::ExtractOp>(op);
471 /// Bufferization of tensor.from_elements.
474 tensor::FromElementsOp> {
480 auto fromElementsOp = cast<tensor::FromElementsOp>(op);
499 // Case: tensor<0xelem_type>.
505 // Case: tensor<elem_type>.
532 /// Lower the body of a tensor.generate like op (one index-typed bbArg per dim).
533 /// Such ops are lowered to linalg.map with the given tensor as a destination.
537 /// %r = tensor.generate %x, %y {
540 /// tensor.yield %0 : index
541 /// } : tensor<?x?xindex>
577 auto yieldOp = cast<tensor::YieldOp>(linalgBody.getTerminator());
583 /// Bufferization of tensor.generate.
586 tensor::GenerateOp> {
592 auto generateOp = cast<tensor::GenerateOp>(op);
617 /// Bufferization of tensor.insert. Replace with memref.store.
623 tensor::InsertOp> {
626 auto insertOp = cast<tensor::InsertOp>(op);
649 // tensor.insert_slice %a into %t[0][10][1] : ... into tensor<10xf32>
660 /// Bufferization of tensor.insert_slice. Replace with a memory copy. Under
667 tensor::InsertSliceOp> {
670 return insertSliceOpRequiresRead(cast<tensor::InsertSliceOp>(op),
678 // whole tensor on every single iteration and is a symptom of a
681 auto insertSliceOp = cast<tensor::InsertSliceOp>(op);
703 // Copy tensor. If this tensor.insert_slice has a matching
704 // tensor.extract_slice, the copy operation will eventually fold away.
717 /// Bufferization of tensor.pad. Replace with bufferization.alloc_tensor +
723 tensor::PadOp> {
744 // Infer memory space from the source tensor.
745 auto padOp = cast<tensor::PadOp>(op);
758 auto padOp = cast<tensor::PadOp>(op);
778 Value srcDim = rewriter.create<tensor::DimOp>(loc, padOp.getSource(), i);
796 // tensor::PadOp is like tensor::GenerateOp: The only difference is that
797 // only a part of the generated tensor is needed. For simplicity, we reuse
802 // Create tensor::InsertSliceOp.
807 rewriter.replaceOpWithNewOp<tensor::InsertSliceOp>(
815 /// Bufferization of tensor.rank. Replace with memref.rank.
818 tensor::RankOp> {
821 // The op reads the tensor's metadata but not its contents.
837 auto rankOp = cast<tensor::RankOp>(op);
847 /// Bufferization of tensor.reshape. Replace with memref.reshape.
850 tensor::ReshapeOp> {
854 auto reshapeOp = cast<tensor::ReshapeOp>(op);
870 auto reshapeOp = cast<tensor::ReshapeOp>(op);
908 auto reshapeOp = cast<tensor::ReshapeOp>(op);
931 return insertSliceOpRequiresRead(cast<tensor::ParallelInsertSliceOp>(op),
1000 /// tensor.parallel_insert_slice op has implicit inplace behavior. We
1008 /// Bufferization of tensor.splat. Bufferizes to a new allocation that is filled
1009 /// with a linalg.map. Similar to tensor.generate.
1012 tensor::SplatOp> {
1019 auto splatOp = cast<tensor::SplatOp>(op);
1051 } // namespace tensor
1054 void mlir::tensor::registerBufferizableOpInterfaceExternalModels(
1056 registry.addExtension(+[](MLIRContext *ctx, tensor::TensorDialect *dialect) {
1081 tensor::registerSubsetOpInterfaceExternalModels(registry);