1 //===- TFUtilsTest.cpp - test for TFUtils ---------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "llvm/Analysis/Utils/TFUtils.h"
10 #include "google/protobuf/struct.pb.h"
11 #include "tensorflow/core/example/example.pb.h"
12 #include "tensorflow/core/example/feature.pb.h"
13 #include "llvm/Analysis/ModelUnderTrainingRunner.h"
14 #include "llvm/Analysis/TensorSpec.h"
15 #include "llvm/AsmParser/Parser.h"
16 #include "llvm/IR/Dominators.h"
17 #include "llvm/IR/Instructions.h"
18 #include "llvm/IR/LLVMContext.h"
19 #include "llvm/IR/Module.h"
20 #include "llvm/Support/Path.h"
21 #include "llvm/Support/SourceMgr.h"
22 #include "llvm/Testing/Support/SupportHelpers.h"
23 #include "gtest/gtest.h"
24
25 using namespace llvm;
26
27 extern const char *TestMainArgv0;
28
29 // NOTE! This test model is currently also used by test/Transforms/Inline/ML tests
30 //- relevant if updating this model.
getModelPath()31 static std::string getModelPath() {
32 SmallString<128> InputsDir = unittest::getInputFileDirectory(TestMainArgv0);
33 llvm::sys::path::append(InputsDir, "ir2native_x86_64_model");
34 return std::string(InputsDir);
35 }
36
37 // Test observable behavior when no model is provided.
TEST(TFUtilsTest,NoModel)38 TEST(TFUtilsTest, NoModel) {
39 TFModelEvaluator Evaluator("", {}, {});
40 EXPECT_FALSE(Evaluator.isValid());
41 }
42
43 // Test we can correctly load a savedmodel and evaluate it.
TEST(TFUtilsTest,LoadAndExecuteTest)44 TEST(TFUtilsTest, LoadAndExecuteTest) {
45 // We use the ir2native model for test. We know it has one feature of
46 // dimension (1, 214)
47 const static int64_t KnownSize = 214;
48 std::vector<TensorSpec> InputSpecs{TensorSpec::createSpec<int32_t>(
49 "serving_default_input_1", {1, KnownSize})};
50 std::vector<TensorSpec> OutputSpecs{
51 TensorSpec::createSpec<float>("StatefulPartitionedCall", {1})};
52
53 TFModelEvaluator Evaluator(getModelPath(), InputSpecs, OutputSpecs);
54 EXPECT_TRUE(Evaluator.isValid());
55
56 int32_t *V = Evaluator.getInput<int32_t>(0);
57 // Fill it up with 1's, we know the output.
58 for (auto I = 0; I < KnownSize; ++I) {
59 V[I] = 1;
60 }
61 {
62 auto ER = Evaluator.evaluate();
63 EXPECT_TRUE(ER.hasValue());
64 float Ret = *ER->getTensorValue<float>(0);
65 EXPECT_EQ(static_cast<int64_t>(Ret), 80);
66 EXPECT_EQ(ER->getUntypedTensorValue(0),
67 reinterpret_cast<const void *>(ER->getTensorValue<float>(0)));
68 }
69 // The input vector should be unchanged
70 for (auto I = 0; I < KnownSize; ++I) {
71 EXPECT_EQ(V[I], 1);
72 }
73 // Zero-out the unused position '0' of the instruction histogram, which is
74 // after the first 9 calculated values. Should the the same result.
75 V[9] = 0;
76 {
77 auto ER = Evaluator.evaluate();
78 EXPECT_TRUE(ER.hasValue());
79 float Ret = *ER->getTensorValue<float>(0);
80 EXPECT_EQ(static_cast<int64_t>(Ret), 80);
81 }
82 }
83
84 // Test incorrect input setup
TEST(TFUtilsTest,EvalError)85 TEST(TFUtilsTest, EvalError) {
86 // We use the ir2native model for test. We know it has one feature of
87 // dimension (1, 214)
88 const static int64_t KnownSize = 213;
89 std::vector<TensorSpec> InputSpecs{TensorSpec::createSpec<int32_t>(
90 "serving_default_input_1", {1, KnownSize})};
91 std::vector<TensorSpec> OutputSpecs{
92 TensorSpec::createSpec<float>("StatefulPartitionedCall", {1})};
93
94 TFModelEvaluator Evaluator(getModelPath(), InputSpecs, OutputSpecs);
95 EXPECT_TRUE(Evaluator.isValid());
96
97 int32_t *V = Evaluator.getInput<int32_t>(0);
98 // Fill it up with 1's, we know the output.
99 for (auto I = 0; I < KnownSize; ++I) {
100 V[I] = 1;
101 }
102 auto ER = Evaluator.evaluate();
103 EXPECT_FALSE(ER.hasValue());
104 EXPECT_FALSE(Evaluator.isValid());
105 }
106
TEST(TFUtilsTest,UnsupportedFeature)107 TEST(TFUtilsTest, UnsupportedFeature) {
108 const static int64_t KnownSize = 214;
109 std::vector<TensorSpec> InputSpecs{
110 TensorSpec::createSpec<int32_t>("serving_default_input_1",
111 {1, KnownSize}),
112 TensorSpec::createSpec<float>("this_feature_does_not_exist", {2, 5})};
113
114 LLVMContext Ctx;
115 auto Evaluator = ModelUnderTrainingRunner::createAndEnsureValid(
116 Ctx, getModelPath(), "StatefulPartitionedCall", InputSpecs,
117 {LoggedFeatureSpec{
118 TensorSpec::createSpec<float>("StatefulPartitionedCall", {1}),
119 None}});
120 int32_t *V = Evaluator->getTensor<int32_t>(0);
121 // Fill it up with 1s, we know the output.
122 for (auto I = 0; I < KnownSize; ++I)
123 V[I] = 1;
124
125 float *F = Evaluator->getTensor<float>(1);
126 for (auto I = 0; I < 2 * 5; ++I)
127 F[I] = 3.14 + I;
128 float Ret = Evaluator->evaluate<float>();
129 EXPECT_EQ(static_cast<int64_t>(Ret), 80);
130 // The input vector should be unchanged
131 for (auto I = 0; I < KnownSize; ++I)
132 EXPECT_EQ(V[I], 1);
133 for (auto I = 0; I < 2 * 5; ++I)
134 EXPECT_FLOAT_EQ(F[I], 3.14 + I);
135 }
136
137 #define PROTO_CHECKER(FNAME, TYPE, INDEX, EXP) \
138 do { \
139 const auto &V = Expected.feature_lists() \
140 .feature_list() \
141 .at(FNAME) \
142 .feature(INDEX) \
143 .TYPE() \
144 .value(); \
145 for (auto I = 0; I < V.size(); ++I) \
146 EXPECT_EQ(V.at(I), EXP[I]); \
147 } while (false)
148
TEST(TFUtilsTest,Logger)149 TEST(TFUtilsTest, Logger) {
150 std::vector<LoggedFeatureSpec> Features;
151 Features.push_back(
152 {TensorSpec::createSpec<float>("the_float", {2, 3}), None});
153 Features.push_back({TensorSpec::createSpec<int64_t>("the_int", {2}),
154 std::string("alternate_name")});
155
156 auto Rewards = TensorSpec::createSpec<float>("reward", {1});
157 Logger L(Features, Rewards, true);
158 const float F00[]{0.0, 0.1, 0.2, 0.3, 0.4, 0.5};
159 const int64_t F01[]{2, 3};
160
161 L.logFloatValue(0, F00);
162 L.logInt64Value(1, F01);
163 L.logFloatReward(3.4);
164 const float F10[]{0.0, 1.0, 2.0, 3.0, 4.0, 5.0};
165 const int64_t F11[]{-2, -3};
166 L.logFloatValue(0, F10);
167 L.logInt64Value(1, F11);
168 L.logFloatReward(-3.0);
169 std::string Result;
170 raw_string_ostream OS(Result);
171 L.flush(OS);
172
173 tensorflow::SequenceExample Expected;
174 ASSERT_TRUE(Expected.ParseFromString(Result));
175 PROTO_CHECKER("the_float", float_list, 0, F00);
176 PROTO_CHECKER("the_float", float_list, 1, F10);
177 PROTO_CHECKER("alternate_name", int64_list, 0, F01);
178 PROTO_CHECKER("alternate_name", int64_list, 1, F11);
179 float R0[]{3.4};
180 float R1[]{-3.0};
181 PROTO_CHECKER("reward", float_list, 0, R0);
182 PROTO_CHECKER("reward", float_list, 1, R1);
183 }
184
TEST(TFUtilsTest,LoggerInt32FeaturesAndReward)185 TEST(TFUtilsTest, LoggerInt32FeaturesAndReward) {
186 std::vector<LoggedFeatureSpec> Features;
187 Features.push_back(
188 {TensorSpec::createSpec<float>("the_float", {2, 3}), None});
189 Features.push_back({TensorSpec::createSpec<int32_t>("the_int", {2}),
190 std::string("alternate_name")});
191
192 auto Rewards = TensorSpec::createSpec<int32_t>("reward", {1});
193 Logger L(Features, Rewards, true);
194 const float F00[]{0.0, 0.1, 0.2, 0.3, 0.4, 0.5};
195 const int32_t F01[]{2, 3};
196
197 L.logFloatValue(0, F00);
198 L.logInt32Value(1, F01);
199 L.logInt32Reward(3);
200 const float F10[]{0.0, 1.0, 2.0, 3.0, 4.0, 5.0};
201 const int32_t F11[]{-2, -3};
202 L.logFloatValue(0, F10);
203 L.logInt32Value(1, F11);
204 L.logInt32Reward(-3);
205 std::string Result;
206 raw_string_ostream OS(Result);
207 L.flush(OS);
208
209 tensorflow::SequenceExample Expected;
210 ASSERT_TRUE(Expected.ParseFromString(Result));
211 PROTO_CHECKER("the_float", float_list, 0, F00);
212 PROTO_CHECKER("the_float", float_list, 1, F10);
213 PROTO_CHECKER("alternate_name", int64_list, 0, F01);
214 PROTO_CHECKER("alternate_name", int64_list, 1, F11);
215 int32_t R0[]{3};
216 int32_t R1[]{-3};
217 PROTO_CHECKER("reward", int64_list, 0, R0);
218 PROTO_CHECKER("reward", int64_list, 1, R1);
219 }
220
TEST(TFUtilsTest,LoggerNoReward)221 TEST(TFUtilsTest, LoggerNoReward) {
222 std::vector<LoggedFeatureSpec> Features;
223 Features.push_back(
224 {TensorSpec::createSpec<float>("the_float", {2, 3}), None});
225 Features.push_back({TensorSpec::createSpec<int64_t>("the_int", {2}),
226 std::string("alternate_name")});
227
228 auto Rewards = TensorSpec::createSpec<float>("reward", {1});
229 Logger L(Features, Rewards, false);
230 const float F00[]{0.0, 0.1, 0.2, 0.3, 0.4, 0.5};
231 const int64_t F01[]{2, 3};
232
233 L.logFloatValue(0, F00);
234 L.logInt64Value(1, F01);
235 const float F10[]{0.0, 1.0, 2.0, 3.0, 4.0, 5.0};
236 const int64_t F11[]{-2, -3};
237 L.logFloatValue(0, F10);
238 L.logInt64Value(1, F11);
239
240 std::string Result;
241 raw_string_ostream OS(Result);
242 L.flush(OS);
243 tensorflow::SequenceExample Expected;
244 ASSERT_TRUE(Expected.ParseFromString(Result));
245 PROTO_CHECKER("the_float", float_list, 0, F00);
246 PROTO_CHECKER("the_float", float_list, 1, F10);
247 PROTO_CHECKER("alternate_name", int64_list, 0, F01);
248 PROTO_CHECKER("alternate_name", int64_list, 1, F11);
249 }
250
TEST(TFUtilsTest,LoggerFinalReward)251 TEST(TFUtilsTest, LoggerFinalReward) {
252 std::vector<LoggedFeatureSpec> Features;
253 Features.push_back({TensorSpec::createSpec<float>("the_float", {1}), None});
254 Features.push_back({TensorSpec::createSpec<int64_t>("the_int", {1}), None});
255
256 auto Rewards = TensorSpec::createSpec<float>("reward", {1});
257 Logger L(Features, Rewards, true);
258 for (int64_t I = 0; I < 3; ++I) {
259 float F = static_cast<float>(I);
260 L.logFloatValue(0, &F);
261 L.logInt64Value(1, &I);
262 }
263 L.logFloatFinalReward(3.14);
264 std::string Result;
265 raw_string_ostream OS(Result);
266 L.flush(OS);
267 const float Zero[]{0.0};
268 const float R[]{3.14};
269 tensorflow::SequenceExample Expected;
270 ASSERT_TRUE(Expected.ParseFromString(Result));
271 PROTO_CHECKER("reward", float_list, 0, Zero);
272 PROTO_CHECKER("reward", float_list, 1, Zero);
273 PROTO_CHECKER("reward", float_list, 2, R);
274 }
275
TEST(TFUtilsTest,LoggerGroup)276 TEST(TFUtilsTest, LoggerGroup) {
277 std::vector<LoggedFeatureSpec> Features;
278 Features.push_back({TensorSpec::createSpec<float>("the_float", {1}), None});
279 Features.push_back({TensorSpec::createSpec<int64_t>("the_int", {1}), None});
280
281 auto Rewards = TensorSpec::createSpec<float>("reward", {1});
282 StringMap<std::unique_ptr<Logger>> Loggers;
283 std::vector<std::string> Names{"a", "b"};
284 size_t Bump = 0;
285 for (auto Name : Names) {
286 auto L = std::make_unique<Logger>(Features, Rewards, true);
287 for (int64_t I = 0; I < 3; ++I) {
288 float F = static_cast<float>(I) + Bump;
289 L->logFloatValue(0, &F);
290 L->logInt64Value(1, &I);
291 }
292 L->logFloatFinalReward(3.14 + Bump);
293 Loggers.insert(std::make_pair(Name, std::move(L)));
294 }
295 std::string Result;
296 raw_string_ostream OS(Result);
297 Logger::flushLogs(OS, Loggers);
298 google::protobuf::Struct Expected;
299 ASSERT_TRUE(Expected.ParseFromString(Result));
300 EXPECT_EQ(Expected.fields_size(), 2);
301 EXPECT_TRUE(Expected.fields().contains("a"));
302 EXPECT_TRUE(Expected.fields().contains("b"));
303 }
304