1 //===- TFUtilsTest.cpp - test for TFUtils ---------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "llvm/Analysis/Utils/TFUtils.h" 10 #include "llvm/Analysis/ModelUnderTrainingRunner.h" 11 #include "llvm/Analysis/TensorSpec.h" 12 #include "llvm/AsmParser/Parser.h" 13 #include "llvm/IR/Dominators.h" 14 #include "llvm/IR/Instructions.h" 15 #include "llvm/IR/LLVMContext.h" 16 #include "llvm/IR/Module.h" 17 #include "llvm/Support/Path.h" 18 #include "llvm/Support/SourceMgr.h" 19 #include "llvm/Testing/Support/SupportHelpers.h" 20 #include "gtest/gtest.h" 21 22 using namespace llvm; 23 24 extern const char *TestMainArgv0; 25 26 // NOTE! This test model is currently also used by test/Transforms/Inline/ML tests 27 //- relevant if updating this model. 28 static std::string getModelPath() { 29 SmallString<128> InputsDir = unittest::getInputFileDirectory(TestMainArgv0); 30 llvm::sys::path::append(InputsDir, "ir2native_x86_64_model"); 31 return std::string(InputsDir); 32 } 33 34 // Test observable behavior when no model is provided. 35 TEST(TFUtilsTest, NoModel) { 36 TFModelEvaluator Evaluator("", {}, {}); 37 EXPECT_FALSE(Evaluator.isValid()); 38 } 39 40 // Test we can correctly load a savedmodel and evaluate it. 41 TEST(TFUtilsTest, LoadAndExecuteTest) { 42 // We use the ir2native model for test. We know it has one feature of 43 // dimension (1, 214) 44 const static int64_t KnownSize = 214; 45 std::vector<TensorSpec> InputSpecs{TensorSpec::createSpec<int32_t>( 46 "serving_default_input_1", {1, KnownSize})}; 47 std::vector<TensorSpec> OutputSpecs{ 48 TensorSpec::createSpec<float>("StatefulPartitionedCall", {1})}; 49 50 TFModelEvaluator Evaluator(getModelPath(), InputSpecs, OutputSpecs); 51 EXPECT_TRUE(Evaluator.isValid()); 52 53 int32_t *V = Evaluator.getInput<int32_t>(0); 54 // Fill it up with 1's, we know the output. 55 for (auto I = 0; I < KnownSize; ++I) { 56 V[I] = 1; 57 } 58 { 59 auto ER = Evaluator.evaluate(); 60 EXPECT_TRUE(ER.hasValue()); 61 float Ret = *ER->getTensorValue<float>(0); 62 EXPECT_EQ(static_cast<int64_t>(Ret), 80); 63 EXPECT_EQ(ER->getUntypedTensorValue(0), 64 reinterpret_cast<const void *>(ER->getTensorValue<float>(0))); 65 } 66 // The input vector should be unchanged 67 for (auto I = 0; I < KnownSize; ++I) { 68 EXPECT_EQ(V[I], 1); 69 } 70 // Zero-out the unused position '0' of the instruction histogram, which is 71 // after the first 9 calculated values. Should the the same result. 72 V[9] = 0; 73 { 74 auto ER = Evaluator.evaluate(); 75 EXPECT_TRUE(ER.hasValue()); 76 float Ret = *ER->getTensorValue<float>(0); 77 EXPECT_EQ(static_cast<int64_t>(Ret), 80); 78 } 79 } 80 81 // Test incorrect input setup 82 TEST(TFUtilsTest, EvalError) { 83 // We use the ir2native model for test. We know it has one feature of 84 // dimension (1, 214) 85 const static int64_t KnownSize = 213; 86 std::vector<TensorSpec> InputSpecs{TensorSpec::createSpec<int32_t>( 87 "serving_default_input_1", {1, KnownSize})}; 88 std::vector<TensorSpec> OutputSpecs{ 89 TensorSpec::createSpec<float>("StatefulPartitionedCall", {1})}; 90 91 TFModelEvaluator Evaluator(getModelPath(), InputSpecs, OutputSpecs); 92 EXPECT_TRUE(Evaluator.isValid()); 93 94 int32_t *V = Evaluator.getInput<int32_t>(0); 95 // Fill it up with 1's, we know the output. 96 for (auto I = 0; I < KnownSize; ++I) { 97 V[I] = 1; 98 } 99 auto ER = Evaluator.evaluate(); 100 EXPECT_FALSE(ER.hasValue()); 101 EXPECT_FALSE(Evaluator.isValid()); 102 } 103 104 TEST(TFUtilsTest, UnsupportedFeature) { 105 const static int64_t KnownSize = 214; 106 std::vector<TensorSpec> InputSpecs{ 107 TensorSpec::createSpec<int32_t>("serving_default_input_1", 108 {1, KnownSize}), 109 TensorSpec::createSpec<float>("this_feature_does_not_exist", {2, 5})}; 110 111 LLVMContext Ctx; 112 auto Evaluator = ModelUnderTrainingRunner::createAndEnsureValid( 113 Ctx, getModelPath(), "StatefulPartitionedCall", InputSpecs, 114 {LoggedFeatureSpec{ 115 TensorSpec::createSpec<float>("StatefulPartitionedCall", {1}), 116 None}}); 117 int32_t *V = Evaluator->getTensor<int32_t>(0); 118 // Fill it up with 1s, we know the output. 119 for (auto I = 0; I < KnownSize; ++I) 120 V[I] = 1; 121 122 float *F = Evaluator->getTensor<float>(1); 123 for (auto I = 0; I < 2 * 5; ++I) 124 F[I] = 3.14 + I; 125 float Ret = Evaluator->evaluate<float>(); 126 EXPECT_EQ(static_cast<int64_t>(Ret), 80); 127 // The input vector should be unchanged 128 for (auto I = 0; I < KnownSize; ++I) 129 EXPECT_EQ(V[I], 1); 130 for (auto I = 0; I < 2 * 5; ++I) 131 EXPECT_FLOAT_EQ(F[I], 3.14 + I); 132 } 133