168ae0d78SAlex Zinenko// RUN: mlir-opt %s \ 2*b33b91a2SOleksandr "Alex" Zinenko// RUN: --pass-pipeline="builtin.module(transform-interpreter{ \ 3*b33b91a2SOleksandr "Alex" Zinenko// RUN: debug-bind-trailing-args=linalg.matmul,linalg.elemwise_binary},\ 4*b33b91a2SOleksandr "Alex" Zinenko// RUN: canonicalize,cse,symbol-dce)" \ 568ae0d78SAlex Zinenko// RUN: --split-input-file --verify-diagnostics 668ae0d78SAlex Zinenko 768ae0d78SAlex Zinenko// ****************************** IMPORTANT NOTE ****************************** 868ae0d78SAlex Zinenko// 968ae0d78SAlex Zinenko// If you are changing this file, you may also need to change 1068ae0d78SAlex Zinenko// mlir/docs/Tutorials/Transform accordingly. 1168ae0d78SAlex Zinenko// 1268ae0d78SAlex Zinenko// **************************************************************************** 1368ae0d78SAlex Zinenko 14*b33b91a2SOleksandr "Alex" Zinenkomodule attributes {transform.with_named_sequence} { 15*b33b91a2SOleksandr "Alex" Zinenko transform.named_sequence @__transform_main( 16*b33b91a2SOleksandr "Alex" Zinenko %arg0: !transform.any_op, 1768ae0d78SAlex Zinenko // expected-note @below {{handle to invalidated ops}} 1868ae0d78SAlex Zinenko %arg1: !transform.op<"linalg.matmul">, 19*b33b91a2SOleksandr "Alex" Zinenko %arg2: !transform.op<"linalg.elemwise_binary">) { 2068ae0d78SAlex Zinenko // The actual tiling transformation takes tile sizes as attributes. 2168ae0d78SAlex Zinenko // expected-note @below {{invalidated by this transform op that consumes its operand #0 and invalidates all handles to payload IR entities associated with this operand and entities nested in them}} 22ca5d34ecSAndrzej Warzyński %tiled, %loop = transform.structured.tile_using_forall %arg1 tile_sizes [4, 32] 2368ae0d78SAlex Zinenko : (!transform.op<"linalg.matmul">) -> (!transform.any_op, !transform.any_op) 2468ae0d78SAlex Zinenko 2568ae0d78SAlex Zinenko // This is trying to use an invalidated handle leading to undefined behavior. 2668ae0d78SAlex Zinenko // expected-error @below {{uses a handle invalidated by a previously executed transform op}} 272798b72aSOleksandr "Alex" Zinenko transform.debug.emit_remark_at %arg1, "remark" : !transform.op<"linalg.matmul"> 2868ae0d78SAlex Zinenko transform.yield 2968ae0d78SAlex Zinenko } 30*b33b91a2SOleksandr "Alex" Zinenko} 3168ae0d78SAlex Zinenko 3268ae0d78SAlex Zinenko// Original function to optimize. 3368ae0d78SAlex Zinenkofunc.func @fc_relu(%lhs: tensor<512x512xf32>, %rhs: tensor<512x512xf32>, 3468ae0d78SAlex Zinenko %bias: tensor<512x512xf32>, %output: tensor<512x512xf32>) 3568ae0d78SAlex Zinenko -> tensor<512x512xf32> { 3668ae0d78SAlex Zinenko // Matrix-matrix multiplication. 3768ae0d78SAlex Zinenko // expected-note @below {{payload op}} 3868ae0d78SAlex Zinenko %matmul = linalg.matmul ins(%lhs, %rhs: tensor<512x512xf32>, tensor<512x512xf32>) 3968ae0d78SAlex Zinenko outs(%output: tensor<512x512xf32>) -> tensor<512x512xf32> 4068ae0d78SAlex Zinenko 4168ae0d78SAlex Zinenko // Elementwise addition. 4268ae0d78SAlex Zinenko %biased = linalg.elemwise_binary { fun = #linalg.binary_fn<add> } 4368ae0d78SAlex Zinenko ins(%matmul, %bias : tensor<512x512xf32>, tensor<512x512xf32>) 4468ae0d78SAlex Zinenko outs(%output : tensor<512x512xf32>) -> tensor<512x512xf32> 4568ae0d78SAlex Zinenko 4668ae0d78SAlex Zinenko // Elementwise max with 0 (ReLU). 4768ae0d78SAlex Zinenko %c0f = arith.constant 0.0 : f32 4868ae0d78SAlex Zinenko %relued = linalg.elemwise_binary { fun = #linalg.binary_fn<max_signed> } 4968ae0d78SAlex Zinenko ins(%biased, %c0f : tensor<512x512xf32>, f32) 5068ae0d78SAlex Zinenko outs(%output : tensor<512x512xf32>) -> tensor<512x512xf32> 5168ae0d78SAlex Zinenko func.return %relued : tensor<512x512xf32> 5268ae0d78SAlex Zinenko} 5368ae0d78SAlex Zinenko 5468ae0d78SAlex Zinenko// ----- 5568ae0d78SAlex Zinenko 56*b33b91a2SOleksandr "Alex" Zinenkomodule attributes {transform.with_named_sequence} { 57*b33b91a2SOleksandr "Alex" Zinenko transform.named_sequence @__transform_main( 58*b33b91a2SOleksandr "Alex" Zinenko %arg0: !transform.any_op, 5968ae0d78SAlex Zinenko %arg1: !transform.op<"linalg.matmul">, 60*b33b91a2SOleksandr "Alex" Zinenko %arg2: !transform.op<"linalg.elemwise_binary">) { 6168ae0d78SAlex Zinenko // We can cast one type to another as long as operations are compatible 6268ae0d78SAlex Zinenko // with both types. This creates "aliasing" handles. 6368ae0d78SAlex Zinenko // expected-note @below {{handle to invalidated ops}} 6468ae0d78SAlex Zinenko %casted = transform.cast %arg1 : !transform.op<"linalg.matmul"> to 6568ae0d78SAlex Zinenko !transform.any_op 6668ae0d78SAlex Zinenko 6768ae0d78SAlex Zinenko // The actual tiling transformation takes tile sizes as attributes. 6868ae0d78SAlex Zinenko // expected-note @below {{invalidated by this transform op that consumes its operand #0 and invalidates all handles to payload IR entities associated with this operand and entities nested in them}} 69ca5d34ecSAndrzej Warzyński %tiled, %loop = transform.structured.tile_using_forall %arg1 tile_sizes [4, 32] 7068ae0d78SAlex Zinenko : (!transform.op<"linalg.matmul">) -> (!transform.any_op, !transform.any_op) 7168ae0d78SAlex Zinenko 7268ae0d78SAlex Zinenko // Consuming an operand invalidates the consumed handle and any other handle that is 7368ae0d78SAlex Zinenko // associated with the same payload operations, or payload operations nested in them. 7468ae0d78SAlex Zinenko // expected-error @below {{uses a handle invalidated by a previously executed transform op}} 752798b72aSOleksandr "Alex" Zinenko transform.debug.emit_remark_at %casted, "remark" 7668ae0d78SAlex Zinenko : !transform.any_op 7768ae0d78SAlex Zinenko transform.yield 7868ae0d78SAlex Zinenko } 79*b33b91a2SOleksandr "Alex" Zinenko} 8068ae0d78SAlex Zinenko 8168ae0d78SAlex Zinenko// Original function to optimize. 8268ae0d78SAlex Zinenkofunc.func @fc_relu(%lhs: tensor<512x512xf32>, %rhs: tensor<512x512xf32>, 8368ae0d78SAlex Zinenko %bias: tensor<512x512xf32>, %output: tensor<512x512xf32>) 8468ae0d78SAlex Zinenko -> tensor<512x512xf32> { 8568ae0d78SAlex Zinenko // Matrix-matrix multiplication. 8668ae0d78SAlex Zinenko // expected-note @below {{payload op}} 8768ae0d78SAlex Zinenko %matmul = linalg.matmul ins(%lhs, %rhs: tensor<512x512xf32>, tensor<512x512xf32>) 8868ae0d78SAlex Zinenko outs(%output: tensor<512x512xf32>) -> tensor<512x512xf32> 8968ae0d78SAlex Zinenko 9068ae0d78SAlex Zinenko // Elementwise addition. 9168ae0d78SAlex Zinenko %biased = linalg.elemwise_binary { fun = #linalg.binary_fn<add> } 9268ae0d78SAlex Zinenko ins(%matmul, %bias : tensor<512x512xf32>, tensor<512x512xf32>) 9368ae0d78SAlex Zinenko outs(%output : tensor<512x512xf32>) -> tensor<512x512xf32> 9468ae0d78SAlex Zinenko 9568ae0d78SAlex Zinenko // Elementwise max with 0 (ReLU). 9668ae0d78SAlex Zinenko %c0f = arith.constant 0.0 : f32 9768ae0d78SAlex Zinenko %relued = linalg.elemwise_binary { fun = #linalg.binary_fn<max_signed> } 9868ae0d78SAlex Zinenko ins(%biased, %c0f : tensor<512x512xf32>, f32) 9968ae0d78SAlex Zinenko outs(%output : tensor<512x512xf32>) -> tensor<512x512xf32> 10068ae0d78SAlex Zinenko func.return %relued : tensor<512x512xf32> 10168ae0d78SAlex Zinenko} 102