forked from openvinotoolkit/openvino
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmain.cpp
More file actions
314 lines (276 loc) · 13.7 KB
/
main.cpp
File metadata and controls
314 lines (276 loc) · 13.7 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
//
// Copyright (C) 2018-2026 Intel Corporation.
// SPDX-License-Identifier: Apache 2.0
//
#include <future>
#include <iostream>
#include <memory>
#include <regex>
#include <gflags/gflags.h>
#include "parser/parser.hpp"
#include "simulation/performance_mode.hpp"
#include "simulation/reference_mode.hpp"
#include "simulation/validation_mode.hpp"
#include "simulation/accuracy_mode.hpp"
#include "simulation/workload_type.hpp"
#include "utils/error.hpp"
#include "utils/logger.hpp"
#include "version.hpp"
static constexpr char help_message[] = "Optional. Print the usage message.";
static constexpr char cfg_message[] = "Path to the configuration file.";
static constexpr char device_message[] =
"Optional. Device name. If specified overwrites device specified in config file.";
static constexpr char pipeline_message[] = "Optional. Enable pipelined execution.";
static constexpr char drop_message[] = "Optional. Drop frames if they come earlier than pipeline is completed.";
static constexpr char mode_message[] = "Optional. Simulation mode: performance (default), reference, validation, accuracy.";
static constexpr char reference_device_msg[] = "Optional. Reference device for accuracy mode (default: CPU).";
static constexpr char target_device_msg[] = "Optional. Target device for accuracy mode (default: NPU).";
static constexpr char niter_message[] = "Optional. Number of iterations. If specified overwrites termination criterion"
" for all scenarios in configuration file.";
static constexpr char exec_time_message[] = "Optional. Time in seconds. If specified overwrites termination criterion"
" for all scenarios in configuration file.";
static constexpr char inference_only_message[] =
"Optional. Run only inference execution for every model excluding i/o data transfer."
" Applicable only for \"performance\" mode. (default: true).";
static constexpr char exec_filter_msg[] = "Optional. Run the scenarios that match provided string pattern.";
static constexpr char version_message[] = "Optional. Print the version";
DEFINE_bool(h, false, help_message);
DEFINE_string(cfg, "", cfg_message);
DEFINE_string(d, "", device_message);
DEFINE_bool(pipeline, false, pipeline_message);
DEFINE_bool(drop_frames, false, drop_message);
DEFINE_string(mode, "performance", mode_message);
DEFINE_string(reference_device, "CPU", reference_device_msg);
DEFINE_string(target_device, "NPU", target_device_msg);
DEFINE_uint64(niter, 0, niter_message);
DEFINE_uint64(t, 0, exec_time_message);
DEFINE_bool(inference_only, true, inference_only_message);
DEFINE_string(exec_filter, ".*", exec_filter_msg);
DEFINE_bool(v, false, version_message);
static void showUsage() {
std::cout << "protopipe [OPTIONS]" << std::endl;
std::cout << std::endl;
std::cout << " Common options: " << std::endl;
std::cout << " -h " << help_message << std::endl;
std::cout << " -cfg <value> " << cfg_message << std::endl;
std::cout << " -pipeline " << pipeline_message << std::endl;
std::cout << " -drop_frames " << drop_message << std::endl;
std::cout << " -d <value> " << device_message << std::endl;
std::cout << " -mode <value> " << mode_message << std::endl;
std::cout << " -reference_device <value> " << reference_device_msg << std::endl;
std::cout << " -target_device <value> " << target_device_msg << std::endl;
std::cout << " -niter <value> " << niter_message << std::endl;
std::cout << " -t <value> " << exec_time_message << std::endl;
std::cout << " -inference_only " << inference_only_message << std::endl;
std::cout << " -exec_filter " << exec_filter_msg << std::endl;
std::cout << " -v " << version_message << std::endl;
std::cout << std::endl;
}
bool parseCommandLine(int* argc, char*** argv) {
gflags::ParseCommandLineNonHelpFlags(argc, argv, true);
if (FLAGS_h) {
showUsage();
return false;
}
if (FLAGS_v) {
// NB: Version is already printed at the start of main()
return false;
}
if (FLAGS_cfg.empty()) {
throw std::invalid_argument("Path to config file is required");
}
std::cout << "Parameters:" << std::endl;
std::cout << " Config file: " << FLAGS_cfg << std::endl;
std::cout << " Pipelining is enabled: " << std::boolalpha << FLAGS_pipeline << std::endl;
std::cout << " Simulation mode: " << FLAGS_mode << std::endl;
std::cout << " Inference only: " << std::boolalpha << FLAGS_inference_only << std::endl;
if (FLAGS_mode == "accuracy") {
std::cout << " Reference device: " << FLAGS_reference_device << std::endl;
std::cout << " Target device: " << FLAGS_target_device << std::endl;
} else {
std::cout << " Device: " << FLAGS_d << std::endl;
}
return true;
}
static ICompiled::Ptr compileSimulation(Simulation::Ptr simulation, const bool pipelined, const bool drop_frames) {
LOG_INFO() << "Compile simulation" << std::endl;
if (pipelined) {
return simulation->compilePipelined(drop_frames);
}
return simulation->compileSync(drop_frames);
}
class ThreadRunner {
public:
using F = std::function<void()>;
void add(F&& func) {
m_funcs.push_back(std::move(func));
}
void run();
private:
std::vector<F> m_funcs;
};
void ThreadRunner::run() {
std::vector<std::future<void>> futures;
futures.reserve(m_funcs.size());
for (auto&& func : m_funcs) {
futures.push_back(std::async(std::launch::async, std::move(func)));
}
for (auto& future : futures) {
future.get();
}
}
class Task {
public:
Task(ICompiled::Ptr&& compiled, std::string&& name, ITermCriterion::Ptr&& criterion);
void operator()();
const Result& result() const;
const std::string& name() const;
private:
ICompiled::Ptr m_compiled;
std::string m_name;
ITermCriterion::Ptr m_criterion;
Result m_result;
};
Task::Task(ICompiled::Ptr&& compiled, std::string&& name, ITermCriterion::Ptr&& criterion)
: m_compiled(std::move(compiled)), m_name(std::move(name)), m_criterion(std::move(criterion)) {
}
void Task::operator()() {
try {
m_result = m_compiled->run(m_criterion);
} catch (const std::exception& e) {
m_result = Error{e.what()};
}
}
const Result& Task::result() const {
return m_result;
}
const std::string& Task::name() const {
return m_name;
}
static Simulation::Ptr createSimulation(const std::string& mode, StreamDesc&& stream, const bool inference_only,
const Config& config) {
Simulation::Ptr simulation;
// NB: Common parameters for all simulations
Simulation::Config cfg{stream.name, stream.frames_interval_in_us, config.disable_high_resolution_timer,
std::move(stream.graph), std::move(stream.infer_params_map)};
if (mode == "performance") {
PerformanceSimulation::Options opts{config.initializer, std::move(stream.initializers_map),
std::move(stream.input_data_map), inference_only,
std::move(stream.target_latency)};
simulation = std::make_shared<PerformanceSimulation>(std::move(cfg), std::move(opts));
} else if (mode == "reference") {
CalcRefSimulation::Options opts{config.initializer, std::move(stream.initializers_map),
std::move(stream.input_data_map), std::move(stream.output_data_map)};
simulation = std::make_shared<CalcRefSimulation>(std::move(cfg), std::move(opts));
} else if (mode == "validation") {
ValSimulation::Options opts{config.metric, std::move(stream.metrics_map), std::move(stream.input_data_map),
std::move(stream.output_data_map), std::move(stream.per_iter_outputs_path)};
simulation = std::make_shared<ValSimulation>(std::move(cfg), std::move(opts));
} else if (mode == "accuracy") {
AccuracySimulation::Options opts{FLAGS_reference_device, FLAGS_target_device, config.npu_compiler_type, config.initializer,
std::move(stream.initializers_map), std::move(stream.input_data_map),
std::move(stream.output_data_map), config.metric, std::move(stream.metrics_map)};
simulation = std::make_shared<AccuracySimulation>(std::move(cfg), std::move(opts));
} else {
throw std::logic_error("Unsupported simulation mode: " + mode);
}
ASSERT(simulation);
return simulation;
}
int main(int argc, char* argv[]) {
// NB: Intentionally wrapped into try-catch to display exceptions occur on windows.
try {
std::cout << "Protopipe " << APP_VERSION << std::endl;
if (!parseCommandLine(&argc, &argv)) {
return 0;
}
ReplaceBy replace_by{FLAGS_d};
auto parser = std::make_shared<ScenarioParser>(FLAGS_cfg);
LOG_INFO() << "Parse scenarios from " << FLAGS_cfg << " config file" << std::endl;
auto config = parser->parseScenarios(replace_by);
LOG_INFO() << "Found " << config.scenarios.size() << " scenario(s)" << std::endl;
// NB: Overwrite termination criteria for all scenarios if specified via CLI
ITermCriterion::Ptr global_criterion;
if (FLAGS_niter != 0u) {
LOG_INFO() << "Termination criterion of " << FLAGS_niter << " iteration(s) will be used for all scenarios"
<< std::endl;
global_criterion = std::make_shared<Iterations>(FLAGS_niter);
}
if (FLAGS_t != 0u) {
if (global_criterion) {
// TODO: In fact, it make sense to have them both enabled.
THROW_ERROR("-niter and -t options can't be specified together!");
}
LOG_INFO() << "Termination criterion of " << FLAGS_t << " second(s) will be used for all scenarios"
<< std::endl;
// NB: TimeOut accepts microseconds
global_criterion = std::make_shared<TimeOut>(FLAGS_t * 1'000'000);
}
std::regex filter_regex{FLAGS_exec_filter};
bool any_scenario_failed = false;
for (auto&& scenario : config.scenarios) {
// NB: Skip the scenarios that don't match provided filter pattern
if (!std::regex_match(scenario.name, filter_regex)) {
LOG_INFO() << "Skip the scenario " << scenario.name << " as it doesn't match the -exec_filter=\""
<< FLAGS_exec_filter << "\" pattern" << std::endl;
continue;
}
LOG_INFO() << "Start processing " << scenario.name << std::endl;
ThreadRunner runner;
std::vector<Task> tasks;
tasks.reserve(scenario.streams.size());
for (auto&& stream : scenario.streams) {
auto criterion = stream.criterion;
auto stream_name = stream.name;
if (global_criterion) {
if (criterion) {
LOG_INFO() << "Stream: " << stream_name
<< " termination criterion is overwritten by CLI parameter" << std::endl;
}
criterion = global_criterion->clone();
}
std::shared_ptr<WorkloadTypeInfo> workload_type;
if (stream.workload_type.has_value())
{
workload_type = std::make_shared<WorkloadTypeInfo>();
workload_type->wl_ov = std::make_shared<cv::gapi::wip::ov::WorkloadTypeOV>();
workload_type->wl_onnx= std::make_shared<cv::gapi::onnx::WorkloadTypeONNX>();
workload_type->workload_config = stream.workload_type.value();
criterion->setWorkloadTrigger(workload_type);
}
auto simulation = createSimulation(FLAGS_mode, std::move(stream), FLAGS_inference_only, config);
if (workload_type)
simulation->workload = workload_type;
auto compiled = compileSimulation(simulation, FLAGS_pipeline, FLAGS_drop_frames);
if (simulation->workload) {
simulation->workload->wl_onnx->set(simulation->workload->workload_config.initial_value);
simulation->workload->wl_ov->set(simulation->workload->workload_config.initial_value);
LOG_INFO() << "Setting initial value of workload type to " << simulation->workload->workload_config.initial_value << std::endl;
}
tasks.emplace_back(std::move(compiled), std::move(stream_name), std::move(criterion));
runner.add(std::ref(tasks.back()));
}
LOG_INFO() << "Run " << tasks.size() << " stream(s) asynchronously" << std::endl;
runner.run();
LOG_INFO() << "Execution has finished" << std::endl;
for (const auto& task : tasks) {
if (!task.result()) {
// NB: Scenario failed if any of the streams failed
any_scenario_failed = true;
}
std::cout << "stream " << task.name() << ": " << task.result().str() << std::endl;
}
std::cout << "\n";
}
if (any_scenario_failed) {
return EXIT_FAILURE;
}
} catch (const std::exception& e) {
std::cout << e.what() << std::endl;
throw;
} catch (...) {
std::cout << "Unknown error" << std::endl;
throw;
}
return 0;
}