xref: /llvm-project/llvm/unittests/Analysis/TFUtilsTest.cpp (revision 90b9c49ca6477a85e69018967c0a4d4d38ee6e72)
1 //===- TFUtilsTest.cpp - test for TFUtils ---------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "llvm/Analysis/Utils/TFUtils.h"
10 #include "llvm/AsmParser/Parser.h"
11 #include "llvm/IR/Dominators.h"
12 #include "llvm/IR/Instructions.h"
13 #include "llvm/IR/LLVMContext.h"
14 #include "llvm/IR/Module.h"
15 #include "llvm/Support/Path.h"
16 #include "llvm/Support/SourceMgr.h"
17 #include "llvm/Testing/Support/SupportHelpers.h"
18 #include "gtest/gtest.h"
19 
20 using namespace llvm;
21 
22 extern const char *TestMainArgv0;
23 
24 static std::string getModelPath() {
25   SmallString<128> InputsDir = unittest::getInputFileDirectory(TestMainArgv0);
26   llvm::sys::path::append(InputsDir, "ir2native_x86_64_model");
27   return std::string(InputsDir);
28 }
29 
30 // Test observable behavior when no model is provided.
31 TEST(TFUtilsTest, NoModel) {
32   TFModelEvaluator Evaluator("", {}, {});
33   EXPECT_FALSE(Evaluator.isValid());
34 }
35 
36 // Test we can correctly load a savedmodel and evaluate it.
37 TEST(TFUtilsTest, LoadAndExecuteTest) {
38   // We use the ir2native model for test. We know it has one feature of
39   // dimension (1, 214)
40   const static int64_t KnownSize = 214;
41   std::vector<TensorSpec> InputSpecs{TensorSpec::createSpec<int32_t>(
42       "serving_default_input_1", {1, KnownSize})};
43   std::vector<TensorSpec> OutputSpecs{
44       TensorSpec::createSpec<float>("StatefulPartitionedCall", {1})};
45 
46   TFModelEvaluator Evaluator(getModelPath(), InputSpecs, OutputSpecs);
47   EXPECT_TRUE(Evaluator.isValid());
48 
49   int32_t *V = Evaluator.getInput<int32_t>(0);
50   // Fill it up with 1's, we know the output.
51   for (auto I = 0; I < KnownSize; ++I) {
52     V[I] = 1;
53   }
54   {
55     auto ER = Evaluator.evaluate();
56     EXPECT_TRUE(ER.hasValue());
57     float Ret = *ER->getTensorValue<float>(0);
58     EXPECT_EQ(static_cast<size_t>(Ret), 80);
59   }
60   // The input vector should be unchanged
61   for (auto I = 0; I < KnownSize; ++I) {
62     EXPECT_EQ(V[I], 1);
63   }
64   // Zero-out the unused position '0' of the instruction histogram, which is
65   // after the first 9 calculated values. Should the the same result.
66   V[9] = 0;
67   {
68     auto ER = Evaluator.evaluate();
69     EXPECT_TRUE(ER.hasValue());
70     float Ret = *ER->getTensorValue<float>(0);
71     EXPECT_EQ(static_cast<size_t>(Ret), 80);
72   }
73 }
74 
75 // Test incorrect input setup
76 TEST(TFUtilsTest, EvalError) {
77   // We use the ir2native model for test. We know it has one feature of
78   // dimension (1, 214)
79   const static int64_t KnownSize = 213;
80   std::vector<TensorSpec> InputSpecs{TensorSpec::createSpec<int32_t>(
81       "serving_default_input_1", {1, KnownSize})};
82   std::vector<TensorSpec> OutputSpecs{
83       TensorSpec::createSpec<float>("StatefulPartitionedCall", {1})};
84 
85   TFModelEvaluator Evaluator(getModelPath(), InputSpecs, OutputSpecs);
86   EXPECT_TRUE(Evaluator.isValid());
87 
88   int32_t *V = Evaluator.getInput<int32_t>(0);
89   // Fill it up with 1's, we know the output.
90   for (auto I = 0; I < KnownSize; ++I) {
91     V[I] = 1;
92   }
93   auto ER = Evaluator.evaluate();
94   EXPECT_FALSE(ER.hasValue());
95   EXPECT_FALSE(Evaluator.isValid());
96 }
97 
98 TEST(TFUtilsTest, JSONParsing) {
99   auto Value = json::parse(
100       R"({"name": "tensor_name",
101         "port": 2,
102         "type": "int32",
103         "shape":[1,4]
104         })");
105   EXPECT_TRUE(!!Value);
106   LLVMContext Ctx;
107   Optional<TensorSpec> Spec = getTensorSpecFromJSON(Ctx, *Value);
108   EXPECT_TRUE(Spec.hasValue());
109   EXPECT_EQ(*Spec, TensorSpec::createSpec<int32_t>("tensor_name", {1, 4}, 2));
110 }
111 
112 TEST(TFUtilsTest, JSONParsingInvalidTensorType) {
113   auto Value = json::parse(
114       R"(
115         {"name": "tensor_name",
116         "port": 2,
117         "type": "no such type",
118         "shape":[1,4]
119         }
120       )");
121   EXPECT_TRUE(!!Value);
122   LLVMContext Ctx;
123   auto Spec = getTensorSpecFromJSON(Ctx, *Value);
124   EXPECT_FALSE(Spec.hasValue());
125 }
126 
127 TEST(TFUtilsTest, TensorSpecSizesAndTypes) {
128   auto Spec1D = TensorSpec::createSpec<int16_t>("Hi1", {1});
129   auto Spec2D = TensorSpec::createSpec<int16_t>("Hi2", {1, 1});
130   auto Spec1DLarge = TensorSpec::createSpec<float>("Hi3", {10});
131   auto Spec3DLarge = TensorSpec::createSpec<float>("Hi3", {2, 4, 10});
132   EXPECT_TRUE(Spec1D.isElementType<int16_t>());
133   EXPECT_FALSE(Spec3DLarge.isElementType<double>());
134   EXPECT_EQ(Spec1D.getElementCount(), 1);
135   EXPECT_EQ(Spec2D.getElementCount(), 1);
136   EXPECT_EQ(Spec1DLarge.getElementCount(), 10);
137   EXPECT_EQ(Spec3DLarge.getElementCount(), 80);
138   EXPECT_EQ(Spec3DLarge.getElementByteSize(), sizeof(float));
139   EXPECT_EQ(Spec1D.getElementByteSize(), sizeof(int16_t));
140 }