1 //===- TFUtilsTest.cpp - test for TFUtils ---------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "llvm/Analysis/Utils/TFUtils.h" 10 #include "llvm/AsmParser/Parser.h" 11 #include "llvm/IR/Dominators.h" 12 #include "llvm/IR/Instructions.h" 13 #include "llvm/IR/LLVMContext.h" 14 #include "llvm/IR/Module.h" 15 #include "llvm/Support/Path.h" 16 #include "llvm/Support/SourceMgr.h" 17 #include "llvm/Testing/Support/SupportHelpers.h" 18 #include "gtest/gtest.h" 19 20 using namespace llvm; 21 22 extern const char *TestMainArgv0; 23 24 // NOTE! This test model is currently also used by test/Transforms/Inline/ML tests 25 //- relevant if updating this model. 26 static std::string getModelPath() { 27 SmallString<128> InputsDir = unittest::getInputFileDirectory(TestMainArgv0); 28 llvm::sys::path::append(InputsDir, "ir2native_x86_64_model"); 29 return std::string(InputsDir); 30 } 31 32 // Test observable behavior when no model is provided. 33 TEST(TFUtilsTest, NoModel) { 34 TFModelEvaluator Evaluator("", {}, {}); 35 EXPECT_FALSE(Evaluator.isValid()); 36 } 37 38 // Test we can correctly load a savedmodel and evaluate it. 39 TEST(TFUtilsTest, LoadAndExecuteTest) { 40 // We use the ir2native model for test. We know it has one feature of 41 // dimension (1, 214) 42 const static int64_t KnownSize = 214; 43 std::vector<TensorSpec> InputSpecs{TensorSpec::createSpec<int32_t>( 44 "serving_default_input_1", {1, KnownSize})}; 45 std::vector<TensorSpec> OutputSpecs{ 46 TensorSpec::createSpec<float>("StatefulPartitionedCall", {1})}; 47 48 TFModelEvaluator Evaluator(getModelPath(), InputSpecs, OutputSpecs); 49 EXPECT_TRUE(Evaluator.isValid()); 50 51 int32_t *V = Evaluator.getInput<int32_t>(0); 52 // Fill it up with 1's, we know the output. 53 for (auto I = 0; I < KnownSize; ++I) { 54 V[I] = 1; 55 } 56 { 57 auto ER = Evaluator.evaluate(); 58 EXPECT_TRUE(ER.hasValue()); 59 float Ret = *ER->getTensorValue<float>(0); 60 EXPECT_EQ(static_cast<size_t>(Ret), 80); 61 EXPECT_EQ(ER->getUntypedTensorValue(0), 62 reinterpret_cast<const void *>(ER->getTensorValue<float>(0))); 63 } 64 // The input vector should be unchanged 65 for (auto I = 0; I < KnownSize; ++I) { 66 EXPECT_EQ(V[I], 1); 67 } 68 // Zero-out the unused position '0' of the instruction histogram, which is 69 // after the first 9 calculated values. Should the the same result. 70 V[9] = 0; 71 { 72 auto ER = Evaluator.evaluate(); 73 EXPECT_TRUE(ER.hasValue()); 74 float Ret = *ER->getTensorValue<float>(0); 75 EXPECT_EQ(static_cast<size_t>(Ret), 80); 76 } 77 } 78 79 // Test incorrect input setup 80 TEST(TFUtilsTest, EvalError) { 81 // We use the ir2native model for test. We know it has one feature of 82 // dimension (1, 214) 83 const static int64_t KnownSize = 213; 84 std::vector<TensorSpec> InputSpecs{TensorSpec::createSpec<int32_t>( 85 "serving_default_input_1", {1, KnownSize})}; 86 std::vector<TensorSpec> OutputSpecs{ 87 TensorSpec::createSpec<float>("StatefulPartitionedCall", {1})}; 88 89 TFModelEvaluator Evaluator(getModelPath(), InputSpecs, OutputSpecs); 90 EXPECT_TRUE(Evaluator.isValid()); 91 92 int32_t *V = Evaluator.getInput<int32_t>(0); 93 // Fill it up with 1's, we know the output. 94 for (auto I = 0; I < KnownSize; ++I) { 95 V[I] = 1; 96 } 97 auto ER = Evaluator.evaluate(); 98 EXPECT_FALSE(ER.hasValue()); 99 EXPECT_FALSE(Evaluator.isValid()); 100 } 101 102 TEST(TFUtilsTest, JSONParsing) { 103 auto Value = json::parse( 104 R"({"name": "tensor_name", 105 "port": 2, 106 "type": "int32_t", 107 "shape":[1,4] 108 })"); 109 EXPECT_TRUE(!!Value); 110 LLVMContext Ctx; 111 Optional<TensorSpec> Spec = getTensorSpecFromJSON(Ctx, *Value); 112 EXPECT_TRUE(Spec.hasValue()); 113 EXPECT_EQ(*Spec, TensorSpec::createSpec<int32_t>("tensor_name", {1, 4}, 2)); 114 } 115 116 TEST(TFUtilsTest, JSONParsingInvalidTensorType) { 117 auto Value = json::parse( 118 R"( 119 {"name": "tensor_name", 120 "port": 2, 121 "type": "no such type", 122 "shape":[1,4] 123 } 124 )"); 125 EXPECT_TRUE(!!Value); 126 LLVMContext Ctx; 127 auto Spec = getTensorSpecFromJSON(Ctx, *Value); 128 EXPECT_FALSE(Spec.hasValue()); 129 } 130 131 TEST(TFUtilsTest, TensorSpecSizesAndTypes) { 132 auto Spec1D = TensorSpec::createSpec<int16_t>("Hi1", {1}); 133 auto Spec2D = TensorSpec::createSpec<int16_t>("Hi2", {1, 1}); 134 auto Spec1DLarge = TensorSpec::createSpec<float>("Hi3", {10}); 135 auto Spec3DLarge = TensorSpec::createSpec<float>("Hi3", {2, 4, 10}); 136 EXPECT_TRUE(Spec1D.isElementType<int16_t>()); 137 EXPECT_FALSE(Spec3DLarge.isElementType<double>()); 138 EXPECT_EQ(Spec1D.getElementCount(), 1); 139 EXPECT_EQ(Spec2D.getElementCount(), 1); 140 EXPECT_EQ(Spec1DLarge.getElementCount(), 10); 141 EXPECT_EQ(Spec3DLarge.getElementCount(), 80); 142 EXPECT_EQ(Spec3DLarge.getElementByteSize(), sizeof(float)); 143 EXPECT_EQ(Spec1D.getElementByteSize(), sizeof(int16_t)); 144 } 145