1 //===- TFUtilsTest.cpp - test for TFUtils ---------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "llvm/Analysis/Utils/TFUtils.h" 10 #include "tensorflow/core/example/example.pb.h" 11 #include "tensorflow/core/example/feature.pb.h" 12 #include "llvm/AsmParser/Parser.h" 13 #include "llvm/IR/Dominators.h" 14 #include "llvm/IR/Instructions.h" 15 #include "llvm/IR/LLVMContext.h" 16 #include "llvm/IR/Module.h" 17 #include "llvm/Support/Path.h" 18 #include "llvm/Support/SourceMgr.h" 19 #include "llvm/Testing/Support/SupportHelpers.h" 20 #include "gtest/gtest.h" 21 22 using namespace llvm; 23 24 extern const char *TestMainArgv0; 25 26 // NOTE! This test model is currently also used by test/Transforms/Inline/ML tests 27 //- relevant if updating this model. 28 static std::string getModelPath() { 29 SmallString<128> InputsDir = unittest::getInputFileDirectory(TestMainArgv0); 30 llvm::sys::path::append(InputsDir, "ir2native_x86_64_model"); 31 return std::string(InputsDir); 32 } 33 34 // Test observable behavior when no model is provided. 35 TEST(TFUtilsTest, NoModel) { 36 TFModelEvaluator Evaluator("", {}, {}); 37 EXPECT_FALSE(Evaluator.isValid()); 38 } 39 40 // Test we can correctly load a savedmodel and evaluate it. 41 TEST(TFUtilsTest, LoadAndExecuteTest) { 42 // We use the ir2native model for test. We know it has one feature of 43 // dimension (1, 214) 44 const static int64_t KnownSize = 214; 45 std::vector<TensorSpec> InputSpecs{TensorSpec::createSpec<int32_t>( 46 "serving_default_input_1", {1, KnownSize})}; 47 std::vector<TensorSpec> OutputSpecs{ 48 TensorSpec::createSpec<float>("StatefulPartitionedCall", {1})}; 49 50 TFModelEvaluator Evaluator(getModelPath(), InputSpecs, OutputSpecs); 51 EXPECT_TRUE(Evaluator.isValid()); 52 53 int32_t *V = Evaluator.getInput<int32_t>(0); 54 // Fill it up with 1's, we know the output. 55 for (auto I = 0; I < KnownSize; ++I) { 56 V[I] = 1; 57 } 58 { 59 auto ER = Evaluator.evaluate(); 60 EXPECT_TRUE(ER.hasValue()); 61 float Ret = *ER->getTensorValue<float>(0); 62 EXPECT_EQ(static_cast<int64_t>(Ret), 80); 63 EXPECT_EQ(ER->getUntypedTensorValue(0), 64 reinterpret_cast<const void *>(ER->getTensorValue<float>(0))); 65 } 66 // The input vector should be unchanged 67 for (auto I = 0; I < KnownSize; ++I) { 68 EXPECT_EQ(V[I], 1); 69 } 70 // Zero-out the unused position '0' of the instruction histogram, which is 71 // after the first 9 calculated values. Should the the same result. 72 V[9] = 0; 73 { 74 auto ER = Evaluator.evaluate(); 75 EXPECT_TRUE(ER.hasValue()); 76 float Ret = *ER->getTensorValue<float>(0); 77 EXPECT_EQ(static_cast<int64_t>(Ret), 80); 78 } 79 } 80 81 // Test incorrect input setup 82 TEST(TFUtilsTest, EvalError) { 83 // We use the ir2native model for test. We know it has one feature of 84 // dimension (1, 214) 85 const static int64_t KnownSize = 213; 86 std::vector<TensorSpec> InputSpecs{TensorSpec::createSpec<int32_t>( 87 "serving_default_input_1", {1, KnownSize})}; 88 std::vector<TensorSpec> OutputSpecs{ 89 TensorSpec::createSpec<float>("StatefulPartitionedCall", {1})}; 90 91 TFModelEvaluator Evaluator(getModelPath(), InputSpecs, OutputSpecs); 92 EXPECT_TRUE(Evaluator.isValid()); 93 94 int32_t *V = Evaluator.getInput<int32_t>(0); 95 // Fill it up with 1's, we know the output. 96 for (auto I = 0; I < KnownSize; ++I) { 97 V[I] = 1; 98 } 99 auto ER = Evaluator.evaluate(); 100 EXPECT_FALSE(ER.hasValue()); 101 EXPECT_FALSE(Evaluator.isValid()); 102 } 103 104 TEST(TFUtilsTest, JSONParsing) { 105 auto Value = json::parse( 106 R"({"name": "tensor_name", 107 "port": 2, 108 "type": "int32_t", 109 "shape":[1,4] 110 })"); 111 EXPECT_TRUE(!!Value); 112 LLVMContext Ctx; 113 Optional<TensorSpec> Spec = getTensorSpecFromJSON(Ctx, *Value); 114 EXPECT_TRUE(Spec.hasValue()); 115 EXPECT_EQ(*Spec, TensorSpec::createSpec<int32_t>("tensor_name", {1, 4}, 2)); 116 } 117 118 TEST(TFUtilsTest, JSONParsingInvalidTensorType) { 119 auto Value = json::parse( 120 R"( 121 {"name": "tensor_name", 122 "port": 2, 123 "type": "no such type", 124 "shape":[1,4] 125 } 126 )"); 127 EXPECT_TRUE(!!Value); 128 LLVMContext Ctx; 129 auto Spec = getTensorSpecFromJSON(Ctx, *Value); 130 EXPECT_FALSE(Spec.hasValue()); 131 } 132 133 TEST(TFUtilsTest, TensorSpecSizesAndTypes) { 134 auto Spec1D = TensorSpec::createSpec<int16_t>("Hi1", {1}); 135 auto Spec2D = TensorSpec::createSpec<int16_t>("Hi2", {1, 1}); 136 auto Spec1DLarge = TensorSpec::createSpec<float>("Hi3", {10}); 137 auto Spec3DLarge = TensorSpec::createSpec<float>("Hi3", {2, 4, 10}); 138 EXPECT_TRUE(Spec1D.isElementType<int16_t>()); 139 EXPECT_FALSE(Spec3DLarge.isElementType<double>()); 140 EXPECT_EQ(Spec1D.getElementCount(), 1U); 141 EXPECT_EQ(Spec2D.getElementCount(), 1U); 142 EXPECT_EQ(Spec1DLarge.getElementCount(), 10U); 143 EXPECT_EQ(Spec3DLarge.getElementCount(), 80U); 144 EXPECT_EQ(Spec3DLarge.getElementByteSize(), sizeof(float)); 145 EXPECT_EQ(Spec1D.getElementByteSize(), sizeof(int16_t)); 146 } 147 148 #define PROTO_CHECKER(FNAME, TYPE, INDEX, EXP) \ 149 do { \ 150 const auto &V = Expected.feature_lists() \ 151 .feature_list() \ 152 .at(FNAME) \ 153 .feature(INDEX) \ 154 .TYPE() \ 155 .value(); \ 156 for (auto I = 0; I < V.size(); ++I) \ 157 EXPECT_EQ(V.at(I), EXP[I]); \ 158 } while (false) 159 160 TEST(TFUtilsTest, Logger) { 161 std::vector<LoggedFeatureSpec> Features; 162 Features.push_back( 163 {TensorSpec::createSpec<float>("the_float", {2, 3}), None}); 164 Features.push_back({TensorSpec::createSpec<int64_t>("the_int", {2}), 165 std::string("alternate_name")}); 166 167 auto Rewards = TensorSpec::createSpec<float>("reward", {1}); 168 Logger L(Features, Rewards, true); 169 const float F00[]{0.0, 0.1, 0.2, 0.3, 0.4, 0.5}; 170 const int64_t F01[]{2, 3}; 171 172 L.logFloatValue(0, F00); 173 L.logInt64Value(1, F01); 174 L.logFloatReward(3.4); 175 const float F10[]{0.0, 1.0, 2.0, 3.0, 4.0, 5.0}; 176 const int64_t F11[]{-2, -3}; 177 L.logFloatValue(0, F10); 178 L.logInt64Value(1, F11); 179 L.logFloatReward(-3.0); 180 std::string Result; 181 raw_string_ostream OS(Result); 182 L.flush(OS); 183 184 tensorflow::SequenceExample Expected; 185 EXPECT_TRUE(Expected.ParseFromString(Result)); 186 PROTO_CHECKER("the_float", float_list, 0, F00); 187 PROTO_CHECKER("the_float", float_list, 1, F10); 188 PROTO_CHECKER("alternate_name", int64_list, 0, F01); 189 PROTO_CHECKER("alternate_name", int64_list, 1, F11); 190 float R0[]{3.4}; 191 float R1[]{-3.0}; 192 PROTO_CHECKER("reward", float_list, 0, R0); 193 PROTO_CHECKER("reward", float_list, 1, R1); 194 } 195 196 TEST(TFUtilsTest, LoggerInt32FeaturesAndReward) { 197 std::vector<LoggedFeatureSpec> Features; 198 Features.push_back( 199 {TensorSpec::createSpec<float>("the_float", {2, 3}), None}); 200 Features.push_back({TensorSpec::createSpec<int32_t>("the_int", {2}), 201 std::string("alternate_name")}); 202 203 auto Rewards = TensorSpec::createSpec<int32_t>("reward", {1}); 204 Logger L(Features, Rewards, true); 205 const float F00[]{0.0, 0.1, 0.2, 0.3, 0.4, 0.5}; 206 const int32_t F01[]{2, 3}; 207 208 L.logFloatValue(0, F00); 209 L.logInt32Value(1, F01); 210 L.logInt32Reward(3); 211 const float F10[]{0.0, 1.0, 2.0, 3.0, 4.0, 5.0}; 212 const int32_t F11[]{-2, -3}; 213 L.logFloatValue(0, F10); 214 L.logInt32Value(1, F11); 215 L.logInt32Reward(-3); 216 std::string Result; 217 raw_string_ostream OS(Result); 218 L.flush(OS); 219 220 tensorflow::SequenceExample Expected; 221 EXPECT_TRUE(Expected.ParseFromString(Result)); 222 PROTO_CHECKER("the_float", float_list, 0, F00); 223 PROTO_CHECKER("the_float", float_list, 1, F10); 224 PROTO_CHECKER("alternate_name", int64_list, 0, F01); 225 PROTO_CHECKER("alternate_name", int64_list, 1, F11); 226 int32_t R0[]{3}; 227 int32_t R1[]{-3}; 228 PROTO_CHECKER("reward", int64_list, 0, R0); 229 PROTO_CHECKER("reward", int64_list, 1, R1); 230 } 231 232 TEST(TFUtilsTest, LoggerNoReward) { 233 std::vector<LoggedFeatureSpec> Features; 234 Features.push_back( 235 {TensorSpec::createSpec<float>("the_float", {2, 3}), None}); 236 Features.push_back({TensorSpec::createSpec<int64_t>("the_int", {2}), 237 std::string("alternate_name")}); 238 239 auto Rewards = TensorSpec::createSpec<float>("reward", {1}); 240 Logger L(Features, Rewards, false); 241 const float F00[]{0.0, 0.1, 0.2, 0.3, 0.4, 0.5}; 242 const int64_t F01[]{2, 3}; 243 244 L.logFloatValue(0, F00); 245 L.logInt64Value(1, F01); 246 const float F10[]{0.0, 1.0, 2.0, 3.0, 4.0, 5.0}; 247 const int64_t F11[]{-2, -3}; 248 L.logFloatValue(0, F10); 249 L.logInt64Value(1, F11); 250 251 std::string Result; 252 raw_string_ostream OS(Result); 253 L.flush(OS); 254 tensorflow::SequenceExample Expected; 255 EXPECT_TRUE(Expected.ParseFromString(Result)); 256 PROTO_CHECKER("the_float", float_list, 0, F00); 257 PROTO_CHECKER("the_float", float_list, 1, F10); 258 PROTO_CHECKER("alternate_name", int64_list, 0, F01); 259 PROTO_CHECKER("alternate_name", int64_list, 1, F11); 260 } 261 262 TEST(TFUtilsTest, LoggerFinalReward) { 263 std::vector<LoggedFeatureSpec> Features; 264 Features.push_back({TensorSpec::createSpec<float>("the_float", {1}), None}); 265 Features.push_back({TensorSpec::createSpec<int64_t>("the_int", {1}), None}); 266 267 auto Rewards = TensorSpec::createSpec<float>("reward", {1}); 268 Logger L(Features, Rewards, true); 269 for (int64_t I = 0; I < 3; ++I) { 270 float F = static_cast<float>(I); 271 L.logFloatValue(0, &F); 272 L.logInt64Value(1, &I); 273 } 274 L.logFloatFinalReward(3.14); 275 std::string Result; 276 raw_string_ostream OS(Result); 277 L.flush(OS); 278 const float Zero[]{0.0}; 279 const float R[]{3.14}; 280 tensorflow::SequenceExample Expected; 281 EXPECT_TRUE(Expected.ParseFromString(Result)); 282 PROTO_CHECKER("reward", float_list, 0, Zero); 283 PROTO_CHECKER("reward", float_list, 1, Zero); 284 PROTO_CHECKER("reward", float_list, 2, R); 285 } 286