1caf395eeSMircea Trofin //===- TFUtilsTest.cpp - test for TFUtils ---------------------------------===//
2caf395eeSMircea Trofin //
3caf395eeSMircea Trofin // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4caf395eeSMircea Trofin // See https://llvm.org/LICENSE.txt for license information.
5caf395eeSMircea Trofin // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6caf395eeSMircea Trofin //
7caf395eeSMircea Trofin //===----------------------------------------------------------------------===//
8caf395eeSMircea Trofin
9caf395eeSMircea Trofin #include "llvm/Analysis/Utils/TFUtils.h"
10c35ad9eeSMircea Trofin #include "llvm/Analysis/ModelUnderTrainingRunner.h"
11c35ad9eeSMircea Trofin #include "llvm/Analysis/TensorSpec.h"
12caf395eeSMircea Trofin #include "llvm/AsmParser/Parser.h"
13caf395eeSMircea Trofin #include "llvm/IR/Dominators.h"
14caf395eeSMircea Trofin #include "llvm/IR/Instructions.h"
15caf395eeSMircea Trofin #include "llvm/IR/LLVMContext.h"
16caf395eeSMircea Trofin #include "llvm/IR/Module.h"
17caf395eeSMircea Trofin #include "llvm/Support/Path.h"
18caf395eeSMircea Trofin #include "llvm/Support/SourceMgr.h"
19caf395eeSMircea Trofin #include "llvm/Testing/Support/SupportHelpers.h"
20caf395eeSMircea Trofin #include "gtest/gtest.h"
21caf395eeSMircea Trofin
22caf395eeSMircea Trofin using namespace llvm;
23caf395eeSMircea Trofin
24caf395eeSMircea Trofin extern const char *TestMainArgv0;
25caf395eeSMircea Trofin
26ca7973cfSMircea Trofin // NOTE! This test model is currently also used by test/Transforms/Inline/ML tests
27ca7973cfSMircea Trofin //- relevant if updating this model.
getModelPath()28caf395eeSMircea Trofin static std::string getModelPath() {
29caf395eeSMircea Trofin SmallString<128> InputsDir = unittest::getInputFileDirectory(TestMainArgv0);
30caf395eeSMircea Trofin llvm::sys::path::append(InputsDir, "ir2native_x86_64_model");
31caf395eeSMircea Trofin return std::string(InputsDir);
32caf395eeSMircea Trofin }
33caf395eeSMircea Trofin
34caf395eeSMircea Trofin // Test observable behavior when no model is provided.
TEST(TFUtilsTest,NoModel)35caf395eeSMircea Trofin TEST(TFUtilsTest, NoModel) {
36caf395eeSMircea Trofin TFModelEvaluator Evaluator("", {}, {});
37caf395eeSMircea Trofin EXPECT_FALSE(Evaluator.isValid());
38caf395eeSMircea Trofin }
39caf395eeSMircea Trofin
40caf395eeSMircea Trofin // Test we can correctly load a savedmodel and evaluate it.
TEST(TFUtilsTest,LoadAndExecuteTest)41caf395eeSMircea Trofin TEST(TFUtilsTest, LoadAndExecuteTest) {
42caf395eeSMircea Trofin // We use the ir2native model for test. We know it has one feature of
43caf395eeSMircea Trofin // dimension (1, 214)
44caf395eeSMircea Trofin const static int64_t KnownSize = 214;
4571059257SMircea Trofin std::vector<TensorSpec> InputSpecs{TensorSpec::createSpec<int32_t>(
4671059257SMircea Trofin "serving_default_input_1", {1, KnownSize})};
4771059257SMircea Trofin std::vector<TensorSpec> OutputSpecs{
4871059257SMircea Trofin TensorSpec::createSpec<float>("StatefulPartitionedCall", {1})};
49caf395eeSMircea Trofin
5071059257SMircea Trofin TFModelEvaluator Evaluator(getModelPath(), InputSpecs, OutputSpecs);
51caf395eeSMircea Trofin EXPECT_TRUE(Evaluator.isValid());
52caf395eeSMircea Trofin
534f763b21SMircea Trofin int32_t *V = Evaluator.getInput<int32_t>(0);
54caf395eeSMircea Trofin // Fill it up with 1's, we know the output.
55caf395eeSMircea Trofin for (auto I = 0; I < KnownSize; ++I) {
56caf395eeSMircea Trofin V[I] = 1;
57caf395eeSMircea Trofin }
58caf395eeSMircea Trofin {
59caf395eeSMircea Trofin auto ER = Evaluator.evaluate();
601cd45630SKazu Hirata EXPECT_TRUE(ER.has_value());
61caf395eeSMircea Trofin float Ret = *ER->getTensorValue<float>(0);
6257d3e9cdSMircea Trofin EXPECT_EQ(static_cast<int64_t>(Ret), 80);
63b18c41c6SMircea Trofin EXPECT_EQ(ER->getUntypedTensorValue(0),
64b18c41c6SMircea Trofin reinterpret_cast<const void *>(ER->getTensorValue<float>(0)));
65caf395eeSMircea Trofin }
66caf395eeSMircea Trofin // The input vector should be unchanged
67caf395eeSMircea Trofin for (auto I = 0; I < KnownSize; ++I) {
68caf395eeSMircea Trofin EXPECT_EQ(V[I], 1);
69caf395eeSMircea Trofin }
70caf395eeSMircea Trofin // Zero-out the unused position '0' of the instruction histogram, which is
71caf395eeSMircea Trofin // after the first 9 calculated values. Should the the same result.
72caf395eeSMircea Trofin V[9] = 0;
73caf395eeSMircea Trofin {
74caf395eeSMircea Trofin auto ER = Evaluator.evaluate();
751cd45630SKazu Hirata EXPECT_TRUE(ER.has_value());
76caf395eeSMircea Trofin float Ret = *ER->getTensorValue<float>(0);
7757d3e9cdSMircea Trofin EXPECT_EQ(static_cast<int64_t>(Ret), 80);
78caf395eeSMircea Trofin }
79caf395eeSMircea Trofin }
80caf395eeSMircea Trofin
81caf395eeSMircea Trofin // Test incorrect input setup
TEST(TFUtilsTest,EvalError)82caf395eeSMircea Trofin TEST(TFUtilsTest, EvalError) {
83caf395eeSMircea Trofin // We use the ir2native model for test. We know it has one feature of
84caf395eeSMircea Trofin // dimension (1, 214)
85caf395eeSMircea Trofin const static int64_t KnownSize = 213;
8671059257SMircea Trofin std::vector<TensorSpec> InputSpecs{TensorSpec::createSpec<int32_t>(
8771059257SMircea Trofin "serving_default_input_1", {1, KnownSize})};
8871059257SMircea Trofin std::vector<TensorSpec> OutputSpecs{
8971059257SMircea Trofin TensorSpec::createSpec<float>("StatefulPartitionedCall", {1})};
90caf395eeSMircea Trofin
9171059257SMircea Trofin TFModelEvaluator Evaluator(getModelPath(), InputSpecs, OutputSpecs);
92caf395eeSMircea Trofin EXPECT_FALSE(Evaluator.isValid());
93caf395eeSMircea Trofin }
944b1b109cSMircea Trofin
TEST(TFUtilsTest,UnsupportedFeature)95c35ad9eeSMircea Trofin TEST(TFUtilsTest, UnsupportedFeature) {
96c35ad9eeSMircea Trofin const static int64_t KnownSize = 214;
97c35ad9eeSMircea Trofin std::vector<TensorSpec> InputSpecs{
98c35ad9eeSMircea Trofin TensorSpec::createSpec<int32_t>("serving_default_input_1",
99c35ad9eeSMircea Trofin {1, KnownSize}),
100c35ad9eeSMircea Trofin TensorSpec::createSpec<float>("this_feature_does_not_exist", {2, 5})};
101c35ad9eeSMircea Trofin
102c35ad9eeSMircea Trofin LLVMContext Ctx;
103*1ee3bb17SMircea Trofin ModelUnderTrainingRunner Evaluator(
104*1ee3bb17SMircea Trofin Ctx, getModelPath(), InputSpecs,
105*1ee3bb17SMircea Trofin {TensorSpec::createSpec<float>("StatefulPartitionedCall", {1})});
106*1ee3bb17SMircea Trofin EXPECT_TRUE(Evaluator.isValid());
107*1ee3bb17SMircea Trofin int32_t *V = Evaluator.getTensor<int32_t>(0);
108c35ad9eeSMircea Trofin // Fill it up with 1s, we know the output.
109c35ad9eeSMircea Trofin for (auto I = 0; I < KnownSize; ++I)
110c35ad9eeSMircea Trofin V[I] = 1;
111c35ad9eeSMircea Trofin
112*1ee3bb17SMircea Trofin float *F = Evaluator.getTensor<float>(1);
113c35ad9eeSMircea Trofin for (auto I = 0; I < 2 * 5; ++I)
114c35ad9eeSMircea Trofin F[I] = 3.14 + I;
115*1ee3bb17SMircea Trofin float Ret = Evaluator.evaluate<float>();
116c35ad9eeSMircea Trofin EXPECT_EQ(static_cast<int64_t>(Ret), 80);
117c35ad9eeSMircea Trofin // The input vector should be unchanged
118c35ad9eeSMircea Trofin for (auto I = 0; I < KnownSize; ++I)
119c35ad9eeSMircea Trofin EXPECT_EQ(V[I], 1);
120c35ad9eeSMircea Trofin for (auto I = 0; I < 2 * 5; ++I)
121c35ad9eeSMircea Trofin EXPECT_FLOAT_EQ(F[I], 3.14 + I);
122c35ad9eeSMircea Trofin }
123ec83c7e3SAiden Grossman
TEST(TFUtilsTest,MissingFeature)124ec83c7e3SAiden Grossman TEST(TFUtilsTest, MissingFeature) {
125ec83c7e3SAiden Grossman std::vector<TensorSpec> InputSpecs{};
126ec83c7e3SAiden Grossman std::vector<TensorSpec> OutputSpecs{
127ec83c7e3SAiden Grossman TensorSpec::createSpec<float>("StatefulPartitionedCall", {1})};
128ec83c7e3SAiden Grossman
129ec83c7e3SAiden Grossman TFModelEvaluator Evaluator(getModelPath(), InputSpecs, OutputSpecs);
130ec83c7e3SAiden Grossman EXPECT_FALSE(Evaluator.isValid());
131ec83c7e3SAiden Grossman }
132