1 //===- TFUtilsTest.cpp - test for TFUtils ---------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "llvm/Analysis/Utils/TFUtils.h"
10 #include "google/protobuf/struct.pb.h"
11 #include "tensorflow/core/example/example.pb.h"
12 #include "tensorflow/core/example/feature.pb.h"
13 #include "llvm/AsmParser/Parser.h"
14 #include "llvm/IR/Dominators.h"
15 #include "llvm/IR/Instructions.h"
16 #include "llvm/IR/LLVMContext.h"
17 #include "llvm/IR/Module.h"
18 #include "llvm/Support/Path.h"
19 #include "llvm/Support/SourceMgr.h"
20 #include "llvm/Testing/Support/SupportHelpers.h"
21 #include "gtest/gtest.h"
22 
23 using namespace llvm;
24 
25 extern const char *TestMainArgv0;
26 
27 // NOTE! This test model is currently also used by test/Transforms/Inline/ML tests
28 //- relevant if updating this model.
29 static std::string getModelPath() {
30   SmallString<128> InputsDir = unittest::getInputFileDirectory(TestMainArgv0);
31   llvm::sys::path::append(InputsDir, "ir2native_x86_64_model");
32   return std::string(InputsDir);
33 }
34 
35 // Test observable behavior when no model is provided.
36 TEST(TFUtilsTest, NoModel) {
37   TFModelEvaluator Evaluator("", {}, {});
38   EXPECT_FALSE(Evaluator.isValid());
39 }
40 
41 // Test we can correctly load a savedmodel and evaluate it.
42 TEST(TFUtilsTest, LoadAndExecuteTest) {
43   // We use the ir2native model for test. We know it has one feature of
44   // dimension (1, 214)
45   const static int64_t KnownSize = 214;
46   std::vector<TensorSpec> InputSpecs{TensorSpec::createSpec<int32_t>(
47       "serving_default_input_1", {1, KnownSize})};
48   std::vector<TensorSpec> OutputSpecs{
49       TensorSpec::createSpec<float>("StatefulPartitionedCall", {1})};
50 
51   TFModelEvaluator Evaluator(getModelPath(), InputSpecs, OutputSpecs);
52   EXPECT_TRUE(Evaluator.isValid());
53 
54   int32_t *V = Evaluator.getInput<int32_t>(0);
55   // Fill it up with 1's, we know the output.
56   for (auto I = 0; I < KnownSize; ++I) {
57     V[I] = 1;
58   }
59   {
60     auto ER = Evaluator.evaluate();
61     EXPECT_TRUE(ER.hasValue());
62     float Ret = *ER->getTensorValue<float>(0);
63     EXPECT_EQ(static_cast<int64_t>(Ret), 80);
64     EXPECT_EQ(ER->getUntypedTensorValue(0),
65               reinterpret_cast<const void *>(ER->getTensorValue<float>(0)));
66   }
67   // The input vector should be unchanged
68   for (auto I = 0; I < KnownSize; ++I) {
69     EXPECT_EQ(V[I], 1);
70   }
71   // Zero-out the unused position '0' of the instruction histogram, which is
72   // after the first 9 calculated values. Should the the same result.
73   V[9] = 0;
74   {
75     auto ER = Evaluator.evaluate();
76     EXPECT_TRUE(ER.hasValue());
77     float Ret = *ER->getTensorValue<float>(0);
78     EXPECT_EQ(static_cast<int64_t>(Ret), 80);
79   }
80 }
81 
82 // Test incorrect input setup
83 TEST(TFUtilsTest, EvalError) {
84   // We use the ir2native model for test. We know it has one feature of
85   // dimension (1, 214)
86   const static int64_t KnownSize = 213;
87   std::vector<TensorSpec> InputSpecs{TensorSpec::createSpec<int32_t>(
88       "serving_default_input_1", {1, KnownSize})};
89   std::vector<TensorSpec> OutputSpecs{
90       TensorSpec::createSpec<float>("StatefulPartitionedCall", {1})};
91 
92   TFModelEvaluator Evaluator(getModelPath(), InputSpecs, OutputSpecs);
93   EXPECT_TRUE(Evaluator.isValid());
94 
95   int32_t *V = Evaluator.getInput<int32_t>(0);
96   // Fill it up with 1's, we know the output.
97   for (auto I = 0; I < KnownSize; ++I) {
98     V[I] = 1;
99   }
100   auto ER = Evaluator.evaluate();
101   EXPECT_FALSE(ER.hasValue());
102   EXPECT_FALSE(Evaluator.isValid());
103 }
104 
105 TEST(TFUtilsTest, JSONParsing) {
106   auto Value = json::parse(
107       R"({"name": "tensor_name",
108         "port": 2,
109         "type": "int32_t",
110         "shape":[1,4]
111         })");
112   EXPECT_TRUE(!!Value);
113   LLVMContext Ctx;
114   Optional<TensorSpec> Spec = getTensorSpecFromJSON(Ctx, *Value);
115   EXPECT_TRUE(Spec.hasValue());
116   EXPECT_EQ(*Spec, TensorSpec::createSpec<int32_t>("tensor_name", {1, 4}, 2));
117 }
118 
119 TEST(TFUtilsTest, JSONParsingInvalidTensorType) {
120   auto Value = json::parse(
121       R"(
122         {"name": "tensor_name",
123         "port": 2,
124         "type": "no such type",
125         "shape":[1,4]
126         }
127       )");
128   EXPECT_TRUE(!!Value);
129   LLVMContext Ctx;
130   auto Spec = getTensorSpecFromJSON(Ctx, *Value);
131   EXPECT_FALSE(Spec.hasValue());
132 }
133 
134 TEST(TFUtilsTest, TensorSpecSizesAndTypes) {
135   auto Spec1D = TensorSpec::createSpec<int16_t>("Hi1", {1});
136   auto Spec2D = TensorSpec::createSpec<int16_t>("Hi2", {1, 1});
137   auto Spec1DLarge = TensorSpec::createSpec<float>("Hi3", {10});
138   auto Spec3DLarge = TensorSpec::createSpec<float>("Hi3", {2, 4, 10});
139   EXPECT_TRUE(Spec1D.isElementType<int16_t>());
140   EXPECT_FALSE(Spec3DLarge.isElementType<double>());
141   EXPECT_EQ(Spec1D.getElementCount(), 1U);
142   EXPECT_EQ(Spec2D.getElementCount(), 1U);
143   EXPECT_EQ(Spec1DLarge.getElementCount(), 10U);
144   EXPECT_EQ(Spec3DLarge.getElementCount(), 80U);
145   EXPECT_EQ(Spec3DLarge.getElementByteSize(), sizeof(float));
146   EXPECT_EQ(Spec1D.getElementByteSize(), sizeof(int16_t));
147 }
148 
149 #define PROTO_CHECKER(FNAME, TYPE, INDEX, EXP)                                 \
150   do {                                                                         \
151     const auto &V = Expected.feature_lists()                                   \
152                         .feature_list()                                        \
153                         .at(FNAME)                                             \
154                         .feature(INDEX)                                        \
155                         .TYPE()                                                \
156                         .value();                                              \
157     for (auto I = 0; I < V.size(); ++I)                                        \
158       EXPECT_EQ(V.at(I), EXP[I]);                                              \
159   } while (false)
160 
161 TEST(TFUtilsTest, Logger) {
162   std::vector<LoggedFeatureSpec> Features;
163   Features.push_back(
164       {TensorSpec::createSpec<float>("the_float", {2, 3}), None});
165   Features.push_back({TensorSpec::createSpec<int64_t>("the_int", {2}),
166                       std::string("alternate_name")});
167 
168   auto Rewards = TensorSpec::createSpec<float>("reward", {1});
169   Logger L(Features, Rewards, true);
170   const float F00[]{0.0, 0.1, 0.2, 0.3, 0.4, 0.5};
171   const int64_t F01[]{2, 3};
172 
173   L.logFloatValue(0, F00);
174   L.logInt64Value(1, F01);
175   L.logFloatReward(3.4);
176   const float F10[]{0.0, 1.0, 2.0, 3.0, 4.0, 5.0};
177   const int64_t F11[]{-2, -3};
178   L.logFloatValue(0, F10);
179   L.logInt64Value(1, F11);
180   L.logFloatReward(-3.0);
181   std::string Result;
182   raw_string_ostream OS(Result);
183   L.flush(OS);
184 
185   tensorflow::SequenceExample Expected;
186   ASSERT_TRUE(Expected.ParseFromString(Result));
187   PROTO_CHECKER("the_float", float_list, 0, F00);
188   PROTO_CHECKER("the_float", float_list, 1, F10);
189   PROTO_CHECKER("alternate_name", int64_list, 0, F01);
190   PROTO_CHECKER("alternate_name", int64_list, 1, F11);
191   float R0[]{3.4};
192   float R1[]{-3.0};
193   PROTO_CHECKER("reward", float_list, 0, R0);
194   PROTO_CHECKER("reward", float_list, 1, R1);
195 }
196 
197 TEST(TFUtilsTest, LoggerInt32FeaturesAndReward) {
198   std::vector<LoggedFeatureSpec> Features;
199   Features.push_back(
200       {TensorSpec::createSpec<float>("the_float", {2, 3}), None});
201   Features.push_back({TensorSpec::createSpec<int32_t>("the_int", {2}),
202                       std::string("alternate_name")});
203 
204   auto Rewards = TensorSpec::createSpec<int32_t>("reward", {1});
205   Logger L(Features, Rewards, true);
206   const float F00[]{0.0, 0.1, 0.2, 0.3, 0.4, 0.5};
207   const int32_t F01[]{2, 3};
208 
209   L.logFloatValue(0, F00);
210   L.logInt32Value(1, F01);
211   L.logInt32Reward(3);
212   const float F10[]{0.0, 1.0, 2.0, 3.0, 4.0, 5.0};
213   const int32_t F11[]{-2, -3};
214   L.logFloatValue(0, F10);
215   L.logInt32Value(1, F11);
216   L.logInt32Reward(-3);
217   std::string Result;
218   raw_string_ostream OS(Result);
219   L.flush(OS);
220 
221   tensorflow::SequenceExample Expected;
222   ASSERT_TRUE(Expected.ParseFromString(Result));
223   PROTO_CHECKER("the_float", float_list, 0, F00);
224   PROTO_CHECKER("the_float", float_list, 1, F10);
225   PROTO_CHECKER("alternate_name", int64_list, 0, F01);
226   PROTO_CHECKER("alternate_name", int64_list, 1, F11);
227   int32_t R0[]{3};
228   int32_t R1[]{-3};
229   PROTO_CHECKER("reward", int64_list, 0, R0);
230   PROTO_CHECKER("reward", int64_list, 1, R1);
231 }
232 
233 TEST(TFUtilsTest, LoggerNoReward) {
234   std::vector<LoggedFeatureSpec> Features;
235   Features.push_back(
236       {TensorSpec::createSpec<float>("the_float", {2, 3}), None});
237   Features.push_back({TensorSpec::createSpec<int64_t>("the_int", {2}),
238                       std::string("alternate_name")});
239 
240   auto Rewards = TensorSpec::createSpec<float>("reward", {1});
241   Logger L(Features, Rewards, false);
242   const float F00[]{0.0, 0.1, 0.2, 0.3, 0.4, 0.5};
243   const int64_t F01[]{2, 3};
244 
245   L.logFloatValue(0, F00);
246   L.logInt64Value(1, F01);
247   const float F10[]{0.0, 1.0, 2.0, 3.0, 4.0, 5.0};
248   const int64_t F11[]{-2, -3};
249   L.logFloatValue(0, F10);
250   L.logInt64Value(1, F11);
251 
252   std::string Result;
253   raw_string_ostream OS(Result);
254   L.flush(OS);
255   tensorflow::SequenceExample Expected;
256   ASSERT_TRUE(Expected.ParseFromString(Result));
257   PROTO_CHECKER("the_float", float_list, 0, F00);
258   PROTO_CHECKER("the_float", float_list, 1, F10);
259   PROTO_CHECKER("alternate_name", int64_list, 0, F01);
260   PROTO_CHECKER("alternate_name", int64_list, 1, F11);
261 }
262 
263 TEST(TFUtilsTest, LoggerFinalReward) {
264   std::vector<LoggedFeatureSpec> Features;
265   Features.push_back({TensorSpec::createSpec<float>("the_float", {1}), None});
266   Features.push_back({TensorSpec::createSpec<int64_t>("the_int", {1}), None});
267 
268   auto Rewards = TensorSpec::createSpec<float>("reward", {1});
269   Logger L(Features, Rewards, true);
270   for (int64_t I = 0; I < 3; ++I) {
271     float F = static_cast<float>(I);
272     L.logFloatValue(0, &F);
273     L.logInt64Value(1, &I);
274   }
275   L.logFloatFinalReward(3.14);
276   std::string Result;
277   raw_string_ostream OS(Result);
278   L.flush(OS);
279   const float Zero[]{0.0};
280   const float R[]{3.14};
281   tensorflow::SequenceExample Expected;
282   ASSERT_TRUE(Expected.ParseFromString(Result));
283   PROTO_CHECKER("reward", float_list, 0, Zero);
284   PROTO_CHECKER("reward", float_list, 1, Zero);
285   PROTO_CHECKER("reward", float_list, 2, R);
286 }
287 
288 TEST(TFUtilsTest, LoggerGroup) {
289   std::vector<LoggedFeatureSpec> Features;
290   Features.push_back({TensorSpec::createSpec<float>("the_float", {1}), None});
291   Features.push_back({TensorSpec::createSpec<int64_t>("the_int", {1}), None});
292 
293   auto Rewards = TensorSpec::createSpec<float>("reward", {1});
294   StringMap<std::unique_ptr<Logger>> Loggers;
295   std::vector<std::string> Names{"a", "b"};
296   size_t Bump = 0;
297   for (auto Name : Names) {
298     auto L = std::make_unique<Logger>(Features, Rewards, true);
299     for (int64_t I = 0; I < 3; ++I) {
300       float F = static_cast<float>(I) + Bump;
301       L->logFloatValue(0, &F);
302       L->logInt64Value(1, &I);
303     }
304     L->logFloatFinalReward(3.14 + Bump);
305     Loggers.insert(std::make_pair(Name, std::move(L)));
306   }
307   std::string Result;
308   raw_string_ostream OS(Result);
309   Logger::flushLogs(OS, Loggers);
310   google::protobuf::Struct Expected;
311   ASSERT_TRUE(Expected.ParseFromString(Result));
312   EXPECT_EQ(Expected.fields_size(), 2);
313   EXPECT_TRUE(Expected.fields().contains("a"));
314   EXPECT_TRUE(Expected.fields().contains("b"));
315 }
316