xref: /llvm-project/llvm/unittests/Analysis/TFUtilsTest.cpp (revision b51e844f7a4ca4a0cb976bd59bf8b5588d6f3be5)
1 //===- TFUtilsTest.cpp - test for TFUtils ---------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "llvm/Analysis/Utils/TFUtils.h"
10 #include "llvm/AsmParser/Parser.h"
11 #include "llvm/IR/Dominators.h"
12 #include "llvm/IR/Instructions.h"
13 #include "llvm/IR/LLVMContext.h"
14 #include "llvm/IR/Module.h"
15 #include "llvm/Support/Path.h"
16 #include "llvm/Support/SourceMgr.h"
17 #include "llvm/Testing/Support/SupportHelpers.h"
18 #include "gtest/gtest.h"
19 
20 using namespace llvm;
21 
22 extern const char *TestMainArgv0;
23 
24 // NOTE! This test model is currently also used by test/Transforms/Inline/ML tests
25 //- relevant if updating this model.
26 static std::string getModelPath() {
27   SmallString<128> InputsDir = unittest::getInputFileDirectory(TestMainArgv0);
28   llvm::sys::path::append(InputsDir, "ir2native_x86_64_model");
29   return std::string(InputsDir);
30 }
31 
32 // Test observable behavior when no model is provided.
33 TEST(TFUtilsTest, NoModel) {
34   TFModelEvaluator Evaluator("", {}, {});
35   EXPECT_FALSE(Evaluator.isValid());
36 }
37 
38 // Test we can correctly load a savedmodel and evaluate it.
39 TEST(TFUtilsTest, LoadAndExecuteTest) {
40   // We use the ir2native model for test. We know it has one feature of
41   // dimension (1, 214)
42   const static int64_t KnownSize = 214;
43   std::vector<TensorSpec> InputSpecs{TensorSpec::createSpec<int32_t>(
44       "serving_default_input_1", {1, KnownSize})};
45   std::vector<TensorSpec> OutputSpecs{
46       TensorSpec::createSpec<float>("StatefulPartitionedCall", {1})};
47 
48   TFModelEvaluator Evaluator(getModelPath(), InputSpecs, OutputSpecs);
49   EXPECT_TRUE(Evaluator.isValid());
50 
51   int32_t *V = Evaluator.getInput<int32_t>(0);
52   // Fill it up with 1's, we know the output.
53   for (auto I = 0; I < KnownSize; ++I) {
54     V[I] = 1;
55   }
56   {
57     auto ER = Evaluator.evaluate();
58     EXPECT_TRUE(ER.hasValue());
59     float Ret = *ER->getTensorValue<float>(0);
60     EXPECT_EQ(static_cast<int64_t>(Ret), 80);
61     EXPECT_EQ(ER->getUntypedTensorValue(0),
62               reinterpret_cast<const void *>(ER->getTensorValue<float>(0)));
63   }
64   // The input vector should be unchanged
65   for (auto I = 0; I < KnownSize; ++I) {
66     EXPECT_EQ(V[I], 1);
67   }
68   // Zero-out the unused position '0' of the instruction histogram, which is
69   // after the first 9 calculated values. Should the the same result.
70   V[9] = 0;
71   {
72     auto ER = Evaluator.evaluate();
73     EXPECT_TRUE(ER.hasValue());
74     float Ret = *ER->getTensorValue<float>(0);
75     EXPECT_EQ(static_cast<int64_t>(Ret), 80);
76   }
77 }
78 
79 // Test incorrect input setup
80 TEST(TFUtilsTest, EvalError) {
81   // We use the ir2native model for test. We know it has one feature of
82   // dimension (1, 214)
83   const static int64_t KnownSize = 213;
84   std::vector<TensorSpec> InputSpecs{TensorSpec::createSpec<int32_t>(
85       "serving_default_input_1", {1, KnownSize})};
86   std::vector<TensorSpec> OutputSpecs{
87       TensorSpec::createSpec<float>("StatefulPartitionedCall", {1})};
88 
89   TFModelEvaluator Evaluator(getModelPath(), InputSpecs, OutputSpecs);
90   EXPECT_TRUE(Evaluator.isValid());
91 
92   int32_t *V = Evaluator.getInput<int32_t>(0);
93   // Fill it up with 1's, we know the output.
94   for (auto I = 0; I < KnownSize; ++I) {
95     V[I] = 1;
96   }
97   auto ER = Evaluator.evaluate();
98   EXPECT_FALSE(ER.hasValue());
99   EXPECT_FALSE(Evaluator.isValid());
100 }
101 
102 TEST(TFUtilsTest, JSONParsing) {
103   auto Value = json::parse(
104       R"({"name": "tensor_name",
105         "port": 2,
106         "type": "int32_t",
107         "shape":[1,4]
108         })");
109   EXPECT_TRUE(!!Value);
110   LLVMContext Ctx;
111   Optional<TensorSpec> Spec = getTensorSpecFromJSON(Ctx, *Value);
112   EXPECT_TRUE(Spec.hasValue());
113   EXPECT_EQ(*Spec, TensorSpec::createSpec<int32_t>("tensor_name", {1, 4}, 2));
114 }
115 
116 TEST(TFUtilsTest, JSONParsingInvalidTensorType) {
117   auto Value = json::parse(
118       R"(
119         {"name": "tensor_name",
120         "port": 2,
121         "type": "no such type",
122         "shape":[1,4]
123         }
124       )");
125   EXPECT_TRUE(!!Value);
126   LLVMContext Ctx;
127   auto Spec = getTensorSpecFromJSON(Ctx, *Value);
128   EXPECT_FALSE(Spec.hasValue());
129 }
130 
131 TEST(TFUtilsTest, TensorSpecSizesAndTypes) {
132   auto Spec1D = TensorSpec::createSpec<int16_t>("Hi1", {1});
133   auto Spec2D = TensorSpec::createSpec<int16_t>("Hi2", {1, 1});
134   auto Spec1DLarge = TensorSpec::createSpec<float>("Hi3", {10});
135   auto Spec3DLarge = TensorSpec::createSpec<float>("Hi3", {2, 4, 10});
136   EXPECT_TRUE(Spec1D.isElementType<int16_t>());
137   EXPECT_FALSE(Spec3DLarge.isElementType<double>());
138   EXPECT_EQ(Spec1D.getElementCount(), 1U);
139   EXPECT_EQ(Spec2D.getElementCount(), 1U);
140   EXPECT_EQ(Spec1DLarge.getElementCount(), 10U);
141   EXPECT_EQ(Spec3DLarge.getElementCount(), 80U);
142   EXPECT_EQ(Spec3DLarge.getElementByteSize(), sizeof(float));
143   EXPECT_EQ(Spec1D.getElementByteSize(), sizeof(int16_t));
144 }
145 
146 TEST(TFUtilsTest, Logger) {
147   std::vector<LoggedFeatureSpec> Features;
148   Features.push_back(
149       {TensorSpec::createSpec<float>("the_float", {2, 3}), None});
150   Features.push_back({TensorSpec::createSpec<int64_t>("the_int", {2}),
151                       std::string("alternate_name")});
152 
153   auto Rewards = TensorSpec::createSpec<float>("reward", {1});
154   Logger L(Features, Rewards, true);
155   float F00[]{0.0, 0.1, 0.2, 0.3, 0.4, 0.5};
156   int64_t F01[]{2, 3};
157 
158   L.logTensorValue(0, F00, 6);
159   L.logTensorValue(1, F01, 2);
160   L.logReward<float>(3.4);
161   float F10[]{0.0, 1.0, 2.0, 3.0, 4.0, 5.0};
162   int64_t F11[]{-2, -3};
163   L.logTensorValue(0, F10, 6);
164   L.logTensorValue(1, F11, 2);
165   L.logReward<float>(-3.0);
166   const auto *Expected = R"(feature_lists: {
167   feature_list: {
168     key: "the_float" value: {
169       feature: { float_list: { value: [0.000000e+00, 1.000000e-01, 2.000000e-01, 3.000000e-01, 4.000000e-01, 5.000000e-01] } }
170       feature: { float_list: { value: [0.000000e+00, 1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00] } }
171     }
172   }
173   feature_list: {
174     key: "alternate_name" value: {
175       feature: { int64_list: { value: [2, 3] } }
176       feature: { int64_list: { value: [-2, -3] } }
177     }
178   }
179   feature_list: {
180     key: "reward" value: {
181       feature: { float_list: { value: [3.400000e+00] } }
182       feature: { float_list: { value: [-3.000000e+00] } }
183     }
184   }
185 }
186 )";
187   std::string Result;
188   raw_string_ostream OS(Result);
189   L.print(OS);
190   EXPECT_EQ(Result, Expected);
191 }
192 
193 TEST(TFUtilsTest, LoggerNoReward) {
194   std::vector<LoggedFeatureSpec> Features;
195   Features.push_back(
196       {TensorSpec::createSpec<float>("the_float", {2, 3}), None});
197   Features.push_back({TensorSpec::createSpec<int64_t>("the_int", {2}),
198                       std::string("alternate_name")});
199 
200   auto Rewards = TensorSpec::createSpec<float>("reward", {1});
201   Logger L(Features, Rewards, false);
202   float F00[]{0.0, 0.1, 0.2, 0.3, 0.4, 0.5};
203   int64_t F01[]{2, 3};
204 
205   L.logTensorValue(0, F00, 6);
206   L.logTensorValue(1, F01, 2);
207   float F10[]{0.0, 1.0, 2.0, 3.0, 4.0, 5.0};
208   int64_t F11[]{-2, -3};
209   L.logTensorValue(0, F10, 6);
210   L.logTensorValue(1, F11, 2);
211   const auto *Expected = R"(feature_lists: {
212   feature_list: {
213     key: "the_float" value: {
214       feature: { float_list: { value: [0.000000e+00, 1.000000e-01, 2.000000e-01, 3.000000e-01, 4.000000e-01, 5.000000e-01] } }
215       feature: { float_list: { value: [0.000000e+00, 1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00] } }
216     }
217   }
218   feature_list: {
219     key: "alternate_name" value: {
220       feature: { int64_list: { value: [2, 3] } }
221       feature: { int64_list: { value: [-2, -3] } }
222     }
223   }
224 }
225 )";
226   std::string Result;
227   raw_string_ostream OS(Result);
228   L.print(OS);
229   EXPECT_EQ(Result, Expected);
230 }
231 
232 TEST(TFUtilsTest, LoggerFinalReward) {
233   std::vector<LoggedFeatureSpec> Features;
234   Features.push_back({TensorSpec::createSpec<float>("the_float", {1}), None});
235   Features.push_back({TensorSpec::createSpec<int64_t>("the_int", {1}), None});
236 
237   auto Rewards = TensorSpec::createSpec<float>("reward", {1});
238   Logger L(Features, Rewards, true);
239   for (size_t I = 0; I < 3; ++I) {
240     float F = static_cast<float>(I);
241     L.logTensorValue(0, &F);
242     L.logTensorValue(1, &I);
243   }
244   L.logFinalReward<float>(3.14);
245   const auto *Expected = R"(feature_lists: {
246   feature_list: {
247     key: "the_float" value: {
248       feature: { float_list: { value: [0.000000e+00] } }
249       feature: { float_list: { value: [1.000000e+00] } }
250       feature: { float_list: { value: [2.000000e+00] } }
251     }
252   }
253   feature_list: {
254     key: "the_int" value: {
255       feature: { int64_list: { value: [0] } }
256       feature: { int64_list: { value: [1] } }
257       feature: { int64_list: { value: [2] } }
258     }
259   }
260   feature_list: {
261     key: "reward" value: {
262       feature: { float_list: { value: [0.000000e+00] } }
263       feature: { float_list: { value: [0.000000e+00] } }
264       feature: { float_list: { value: [3.140000e+00] } }
265     }
266   }
267 }
268 )";
269   std::string Result;
270   raw_string_ostream OS(Result);
271   L.print(OS);
272   EXPECT_EQ(Result, Expected);
273 }
274