Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[onert/test] Apply strict build to onert_train #14516

Merged
merged 1 commit into from
Jan 3, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions tests/tools/onert_train/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ endif(HDF5_FOUND)

target_include_directories(onert_train PRIVATE src)

target_link_libraries(onert_train nnfw_common)
target_link_libraries(onert_train nnfw_lib_tflite jsoncpp)
target_link_libraries(onert_train nnfw-dev)
target_link_libraries(onert_train arser)
Expand Down
14 changes: 7 additions & 7 deletions tests/tools/onert_train/src/args.cc
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ void checkPackage(const std::string &package_filename)

// check the value is in the valid_args list and return the corresponded enum
template <typename T>
T checkValidation(const std::string &arg_name, const std::vector<T> &valid_args, int value)
T checkValidation(const std::string &arg_name, const std::vector<T> &valid_args, uint32_t value)
{
for (const auto arg : valid_args)
{
Expand Down Expand Up @@ -271,20 +271,20 @@ void Args::Parse(const int argc, char **argv)
}

_mem_poll = _arser.get<bool>("--mem_poll");
_epoch = _arser.get<int>("--epoch");
_epoch = _arser.get<int32_t>("--epoch");

if (_arser["--batch_size"])
_batch_size = _arser.get<int>("--batch_size");
_batch_size = _arser.get<int32_t>("--batch_size");
if (_arser["--learning_rate"])
_learning_rate = _arser.get<float>("--learning_rate");
if (_arser["--loss"])
_loss_type = checkValidation("loss", valid_loss, _arser.get<int>("--loss"));
_loss_type = checkValidation("loss", valid_loss, _arser.get<int32_t>("--loss"));
if (_arser["--loss_reduction_type"])
_loss_reduction_type = checkValidation("loss_reduction_type", valid_loss_rdt,
_arser.get<int>("--loss_reduction_type"));
if (_arser["--optimizer"])
_optimizer_type = checkValidation("optimizer", valid_optim, _arser.get<int>("--optimizer"));
_metric_type = _arser.get<int>("--metric");
_metric_type = _arser.get<int32_t>("--metric");

_validation_split = _arser.get<float>("--validation_split");
if (_validation_split < 0.f || _validation_split > 1.f)
Expand All @@ -293,7 +293,7 @@ void Args::Parse(const int argc, char **argv)
exit(1);
}

_verbose_level = _arser.get<int>("--verbose_level");
_verbose_level = _arser.get<int32_t>("--verbose_level");

if (_arser["--output_sizes"])
{
Expand Down Expand Up @@ -322,7 +322,7 @@ void Args::Parse(const int argc, char **argv)
}

if (_arser["--num_of_trainable_ops"])
_num_of_trainable_ops = _arser.get<int>("--num_of_trainable_ops");
_num_of_trainable_ops = _arser.get<int32_t>("--num_of_trainable_ops");
}
catch (const std::bad_cast &e)
{
Expand Down
24 changes: 12 additions & 12 deletions tests/tools/onert_train/src/args.h
Original file line number Diff line number Diff line change
Expand Up @@ -53,23 +53,23 @@ class Args
const std::string &getExportCircleFilename(void) const { return _export_circle_filename; }
const std::string &getExportCirclePlusFilename(void) const { return _export_circleplus_filename; }
const std::string &getExportCheckpointFilename(void) const { return _export_checkpoint_filename; }
const bool useSingleModel(void) const { return _use_single_model; }
bool useSingleModel(void) const { return _use_single_model; }
const std::string &getLoadRawInputFilename(void) const { return _load_raw_input_filename; }
const std::string &getLoadRawExpectedFilename(void) const { return _load_raw_expected_filename; }
const bool getMemoryPoll(void) const { return _mem_poll; }
const int getEpoch(void) const { return _epoch; }
const std::optional<int> getBatchSize(void) const { return _batch_size; }
bool getMemoryPoll(void) const { return _mem_poll; }
int32_t getEpoch(void) const { return _epoch; }
const std::optional<int32_t> getBatchSize(void) const { return _batch_size; }
const std::optional<float> getLearningRate(void) const { return _learning_rate; }
const std::optional<NNFW_TRAIN_LOSS> getLossType(void) const { return _loss_type; }
const std::optional<NNFW_TRAIN_LOSS_REDUCTION> getLossReductionType(void) const
{
return _loss_reduction_type;
}
const std::optional<NNFW_TRAIN_OPTIMIZER> getOptimizerType(void) const { return _optimizer_type; }
const int getMetricType(void) const { return _metric_type; }
const float getValidationSplit(void) const { return _validation_split; }
const bool printVersion(void) const { return _print_version; }
const int getVerboseLevel(void) const { return _verbose_level; }
int32_t getMetricType(void) const { return _metric_type; }
float getValidationSplit(void) const { return _validation_split; }
bool printVersion(void) const { return _print_version; }
int32_t getVerboseLevel(void) const { return _verbose_level; }
std::unordered_map<uint32_t, uint32_t> getOutputSizes(void) const { return _output_sizes; }
uint32_t num_of_trainable_ops(void) const { return _num_of_trainable_ops; }

Expand Down Expand Up @@ -109,16 +109,16 @@ class Args
std::string _load_raw_input_filename;
std::string _load_raw_expected_filename;
bool _mem_poll;
int _epoch;
std::optional<int> _batch_size;
int32_t _epoch;
std::optional<int32_t> _batch_size;
std::optional<float> _learning_rate;
std::optional<NNFW_TRAIN_LOSS> _loss_type;
std::optional<NNFW_TRAIN_LOSS_REDUCTION> _loss_reduction_type;
std::optional<NNFW_TRAIN_OPTIMIZER> _optimizer_type;
int _metric_type;
int32_t _metric_type;
float _validation_split;
bool _print_version = false;
int _verbose_level;
int32_t _verbose_level;
std::unordered_map<uint32_t, uint32_t> _output_sizes;
int32_t _num_of_trainable_ops;
};
Expand Down
2 changes: 1 addition & 1 deletion tests/tools/onert_train/src/formatter.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ class Formatter
Formatter(nnfw_session *sess) : session_(sess) {}
virtual void loadInputs(const std::string &filename, std::vector<Allocation> &inputs) = 0;
virtual void dumpOutputs(const std::string &filename, std::vector<Allocation> &outputs) = 0;
virtual std::vector<TensorShape> readTensorShapes(const std::string &filename)
virtual std::vector<TensorShape> readTensorShapes(const std::string & /* filename */)
{
return std::vector<TensorShape>();
};
Expand Down
2 changes: 1 addition & 1 deletion tests/tools/onert_train/src/h5formatter.cc
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ void H5Formatter::dumpOutputs(const std::string &filename, std::vector<Allocatio
nnfw_tensorinfo ti;
NNPR_ENSURE_STATUS(nnfw_output_tensorinfo(session_, i, &ti));
std::vector<hsize_t> dims(ti.rank);
for (uint32_t j = 0; j < ti.rank; ++j)
for (int32_t j = 0; j < ti.rank; ++j)
{
if (ti.dims[j] >= 0)
dims[j] = static_cast<hsize_t>(ti.dims[j]);
Expand Down
8 changes: 4 additions & 4 deletions tests/tools/onert_train/src/measure.h
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ class Measure
}
}

void set(const int epoch, const int step)
void set(const uint32_t epoch, const int32_t step)
{
_step_results.clear();
_step_results.resize(epoch);
Expand Down Expand Up @@ -161,7 +161,7 @@ class Measure
}
}

void run(const int epoch, const int step, const std::function<void()> &func)
void run(const uint32_t epoch, const uint32_t step, const std::function<void()> &func)
{
if (_step_results.empty() || _step_results.size() <= epoch ||
_step_results[epoch].size() <= step)
Expand All @@ -184,7 +184,7 @@ class Measure
return sum;
}

double timeMicros(const int epoch, const AggregateType aggType)
double timeMicros(const uint32_t epoch, const AggregateType aggType)
{
if (_step_results.empty() || _step_results.size() <= epoch)
{
Expand Down Expand Up @@ -218,7 +218,7 @@ class Measure
<< _phase_results[type].time / 1e3 << " ms" << std::endl;
if (i == PhaseType::EXECUTE)
{
for (int j = 0; j < _step_results.size(); ++j)
for (uint32_t j = 0; j < _step_results.size(); ++j)
{
std::cout << "- "
<< "Epoch " << j + 1 << std::setw(12) << std::right << " takes "
Expand Down
2 changes: 1 addition & 1 deletion tests/tools/onert_train/src/metrics.cc
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ float Metrics::categoricalAccuracy(const T *output, const T *expected, uint32_t
uint64_t size)
{
int correct = 0;
for (int b = 0; b < batch; ++b)
for (uint32_t b = 0; b < batch; ++b)
{
int begin_offset = b * size;
int end_offset = begin_offset + size;
Expand Down
2 changes: 1 addition & 1 deletion tests/tools/onert_train/src/nnfw_util.cc
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ namespace onert_train
uint64_t num_elems(const nnfw_tensorinfo *ti)
{
uint64_t n = 1;
for (uint32_t i = 0; i < ti->rank; ++i)
for (int32_t i = 0; i < ti->rank; ++i)
{
assert(ti->dims[i] >= 0);
n *= ti->dims[i];
Expand Down
13 changes: 6 additions & 7 deletions tests/tools/onert_train/src/onert_train.cc
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,6 @@ int main(const int argc, char **argv)
}

// TODO Apply verbose level to phases
const int verbose = args.getVerboseLevel();

// prepare measure tool
Measure measure(args.getMemoryPoll());
Expand Down Expand Up @@ -240,10 +239,10 @@ int main(const int argc, char **argv)
std::vector<float> losses(num_expecteds);
std::vector<float> metrics(num_expecteds);
measure.run(PhaseType::EXECUTE, [&]() {
const int num_step = tdata_length / tri.batch_size;
const int num_epoch = args.getEpoch();
const auto num_step = tdata_length / tri.batch_size;
const auto num_epoch = args.getEpoch();
measure.set(num_epoch, num_step);
for (uint32_t epoch = 0; epoch < num_epoch; ++epoch)
for (int32_t epoch = 0; epoch < num_epoch; ++epoch)
{
//
// TRAINING
Expand Down Expand Up @@ -276,7 +275,7 @@ int main(const int argc, char **argv)

// store loss
Metrics metric(output_data, expected_data, expected_infos);
for (int32_t i = 0; i < num_expecteds; ++i)
for (uint32_t i = 0; i < num_expecteds; ++i)
{
float temp = 0.f;
NNPR_ENSURE_STATUS(nnfw_train_get_loss(session, i, &temp));
Expand Down Expand Up @@ -318,7 +317,7 @@ int main(const int argc, char **argv)
{
std::fill(losses.begin(), losses.end(), 0);
std::fill(metrics.begin(), metrics.end(), 0);
const int num_valid_step = vdata_length / tri.batch_size;
const auto num_valid_step = vdata_length / tri.batch_size;
for (uint32_t n = 0; n < num_valid_step; ++n)
{
// get batchsize validation data
Expand All @@ -344,7 +343,7 @@ int main(const int argc, char **argv)

// get validation loss and accuracy
Metrics metric(output_data, expected_data, expected_infos);
for (int32_t i = 0; i < num_expecteds; ++i)
for (uint32_t i = 0; i < num_expecteds; ++i)
{
float temp = 0.f;
NNPR_ENSURE_STATUS(nnfw_train_get_loss(session, i, &temp));
Expand Down
13 changes: 7 additions & 6 deletions tests/tools/onert_train/src/rawformatter.cc
Original file line number Diff line number Diff line change
Expand Up @@ -19,12 +19,13 @@
#include "nnfw_util.h"

#include <iostream>
#include <filesystem>
#include <fstream>
#include <stdexcept>

namespace onert_train
{
void RawFormatter::loadInputs(const std::string &filename, std::vector<Allocation> &inputs)
void RawFormatter::loadInputs(const std::string &prefix, std::vector<Allocation> &inputs)
{
uint32_t num_inputs;
NNPR_ENSURE_STATUS(nnfw_input_size(session_, &num_inputs));
Expand All @@ -44,18 +45,18 @@ void RawFormatter::loadInputs(const std::string &filename, std::vector<Allocatio
NNPR_ENSURE_STATUS(nnfw_input_tensorinfo(session_, i, &ti));

// allocate memory for data
auto bufsz = bufsize_for(&ti);
inputs[i].alloc(bufsz);
const auto bufsz = bufsize_for(&ti);
const auto filename = prefix + "." + std::to_string(i);
auto filesz = std::filesystem::file_size(filename);

std::ifstream file(filename + "." + std::to_string(i), std::ios::ate | std::ios::binary);
auto filesz = file.tellg();
if (bufsz != filesz)
{
throw std::runtime_error("Input " + std::to_string(i) +
" size does not match: " + std::to_string(bufsz) +
" expected, but " + std::to_string(filesz) + " provided.");
}
file.seekg(0, std::ios::beg);
std::ifstream file(filename, std::ios::in | std::ios::binary);
inputs[i].alloc(bufsz);
file.read(reinterpret_cast<char *>(inputs[i].data()), filesz);
file.close();

Expand Down