xref: /llvm-project/mlir/test/Examples/transform/Ch1/invalidation-1.mlir (revision b33b91a21788d439f49d6db4e7224c20f740f1a7)
1// RUN: mlir-opt %s \
2// RUN:   --pass-pipeline="builtin.module(transform-interpreter{ \
3// RUN:        debug-bind-trailing-args=linalg.matmul,linalg.elemwise_binary},\
4// RUN:        canonicalize,cse,symbol-dce)" \
5// RUN:   --split-input-file --verify-diagnostics
6
7// ****************************** IMPORTANT NOTE ******************************
8//
9// If you are changing this file, you may also need to change
10// mlir/docs/Tutorials/Transform accordingly.
11//
12// ****************************************************************************
13
14module attributes {transform.with_named_sequence} {
15  transform.named_sequence @__transform_main(
16      %arg0: !transform.any_op,
17      // expected-note @below {{handle to invalidated ops}}
18      %arg1: !transform.op<"linalg.matmul">,
19      %arg2: !transform.op<"linalg.elemwise_binary">) {
20    // The actual tiling transformation takes tile sizes as attributes.
21    // expected-note @below {{invalidated by this transform op that consumes its operand #0 and invalidates all handles to payload IR entities associated with this operand and entities nested in them}}
22    %tiled, %loop = transform.structured.tile_using_forall %arg1 tile_sizes [4, 32]
23        : (!transform.op<"linalg.matmul">) -> (!transform.any_op, !transform.any_op)
24
25    // This is trying to use an invalidated handle leading to undefined behavior.
26    // expected-error @below {{uses a handle invalidated by a previously executed transform op}}
27    transform.debug.emit_remark_at %arg1, "remark" : !transform.op<"linalg.matmul">
28    transform.yield
29  }
30}
31
32// Original function to optimize.
33func.func @fc_relu(%lhs: tensor<512x512xf32>, %rhs: tensor<512x512xf32>,
34                   %bias: tensor<512x512xf32>, %output: tensor<512x512xf32>)
35                   -> tensor<512x512xf32> {
36  // Matrix-matrix multiplication.
37  // expected-note @below {{payload op}}
38  %matmul = linalg.matmul ins(%lhs, %rhs: tensor<512x512xf32>, tensor<512x512xf32>)
39                          outs(%output: tensor<512x512xf32>) -> tensor<512x512xf32>
40
41  // Elementwise addition.
42  %biased = linalg.elemwise_binary { fun = #linalg.binary_fn<add> }
43    ins(%matmul, %bias : tensor<512x512xf32>, tensor<512x512xf32>)
44    outs(%output : tensor<512x512xf32>) -> tensor<512x512xf32>
45
46  // Elementwise max with 0 (ReLU).
47  %c0f = arith.constant 0.0 : f32
48  %relued = linalg.elemwise_binary { fun = #linalg.binary_fn<max_signed> }
49    ins(%biased, %c0f : tensor<512x512xf32>, f32)
50    outs(%output : tensor<512x512xf32>) -> tensor<512x512xf32>
51  func.return %relued : tensor<512x512xf32>
52}
53
54// -----
55
56module attributes {transform.with_named_sequence} {
57  transform.named_sequence @__transform_main(
58      %arg0: !transform.any_op,
59      %arg1: !transform.op<"linalg.matmul">,
60      %arg2: !transform.op<"linalg.elemwise_binary">) {
61    // We can cast one type to another as long as operations are compatible
62    // with both types. This creates "aliasing" handles.
63    // expected-note @below {{handle to invalidated ops}}
64    %casted = transform.cast %arg1 : !transform.op<"linalg.matmul"> to
65        !transform.any_op
66
67    // The actual tiling transformation takes tile sizes as attributes.
68    // expected-note @below {{invalidated by this transform op that consumes its operand #0 and invalidates all handles to payload IR entities associated with this operand and entities nested in them}}
69    %tiled, %loop = transform.structured.tile_using_forall %arg1 tile_sizes [4, 32]
70      : (!transform.op<"linalg.matmul">) -> (!transform.any_op, !transform.any_op)
71
72    // Consuming an operand invalidates the consumed handle and any other handle that is
73    // associated with the same payload operations, or payload operations nested in them.
74    // expected-error @below {{uses a handle invalidated by a previously executed transform op}}
75    transform.debug.emit_remark_at %casted, "remark"
76      : !transform.any_op
77    transform.yield
78  }
79}
80
81// Original function to optimize.
82func.func @fc_relu(%lhs: tensor<512x512xf32>, %rhs: tensor<512x512xf32>,
83                   %bias: tensor<512x512xf32>, %output: tensor<512x512xf32>)
84                   -> tensor<512x512xf32> {
85  // Matrix-matrix multiplication.
86  // expected-note @below {{payload op}}
87  %matmul = linalg.matmul ins(%lhs, %rhs: tensor<512x512xf32>, tensor<512x512xf32>)
88                          outs(%output: tensor<512x512xf32>) -> tensor<512x512xf32>
89
90  // Elementwise addition.
91  %biased = linalg.elemwise_binary { fun = #linalg.binary_fn<add> }
92    ins(%matmul, %bias : tensor<512x512xf32>, tensor<512x512xf32>)
93    outs(%output : tensor<512x512xf32>) -> tensor<512x512xf32>
94
95  // Elementwise max with 0 (ReLU).
96  %c0f = arith.constant 0.0 : f32
97  %relued = linalg.elemwise_binary { fun = #linalg.binary_fn<max_signed> }
98    ins(%biased, %c0f : tensor<512x512xf32>, f32)
99    outs(%output : tensor<512x512xf32>) -> tensor<512x512xf32>
100  func.return %relued : tensor<512x512xf32>
101}
102