Skip to content

Commit

Permalink
[Windows] Compile nntrainer on windows
Browse files Browse the repository at this point in the history
Signed-off-by: Grzegorz Kisala <[email protected]>
  • Loading branch information
gkisalapl committed Jan 24, 2025
1 parent 100f4ed commit e000269
Show file tree
Hide file tree
Showing 17 changed files with 32 additions and 24 deletions.
1 change: 1 addition & 0 deletions nntrainer/compiler/remap_realizer.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@

#include <functional>
#include <memory>
#include <string>
#include <vector>

#include <realizer.h>
Expand Down
5 changes: 2 additions & 3 deletions nntrainer/dataset/databuffer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@
#include <nntrainer_error.h>
#include <nntrainer_log.h>
#include <node_exporter.h>
#include <numeric>
#include <sstream>
#include <stdexcept>
#include <stdio.h>
Expand Down Expand Up @@ -61,9 +62,7 @@ class PropsBufferSize : public nntrainer::PositiveIntegerProperty {
constexpr char USER_DATA[] = "user_data";

DataBuffer::DataBuffer(std::unique_ptr<DataProducer> &&producer_) :
producer(std::move(producer_)),
db_props(new Props()),
user_data(nullptr) {
producer(std::move(producer_)), db_props(new Props()), user_data(nullptr) {
rng.seed(0);
}

Expand Down
2 changes: 1 addition & 1 deletion nntrainer/dataset/dir_data_producers.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ DirDataProducer::finalize(const std::vector<TensorDim> &input_dims,
const auto &dir_path = std::get<props::DirPath>(*dir_data_props).get();

for (const auto &entry : std::filesystem::directory_iterator(dir_path))
class_names.push_back(entry.path());
class_names.push_back(entry.path().string());

num_class = class_names.size();

Expand Down
3 changes: 3 additions & 0 deletions nntrainer/layers/acti_func.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,9 @@
#include <blas_interface.h>
#include <common_properties.h>

#define _USE_MATH_DEFINES
#include <math.h>

namespace nntrainer {

class Tensor;
Expand Down
4 changes: 2 additions & 2 deletions nntrainer/layers/dropout.h
Original file line number Diff line number Diff line change
Expand Up @@ -29,8 +29,8 @@ class DropOutLayer : public Layer {
/**
* @brief Constructor of DropOut Layer
*/
DropOutLayer(float dropout = 0.0) :
Layer(), dropout_rate(props::DropOutRate(dropout)), epsilon(1e-3) {}
DropOutLayer(float dropout = 0.0f) :
Layer(), dropout_rate(props::DropOutRate(dropout)), epsilon(1e-3f) {}

/**
* @brief Destructor of DropOut Layer
Expand Down
2 changes: 1 addition & 1 deletion nntrainer/layers/rnn.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ RNNLayer::RNNLayer() :
props::Unit(), props::HiddenStateActivation() = ActivationType::ACT_TANH,
props::ReturnSequences(), props::DropOutRate(), props::IntegrateBias()),
acti_func(ActivationType::ACT_NONE, true),
epsilon(1e-3) {
epsilon(1e-3f) {
wt_idx.fill(std::numeric_limits<unsigned>::max());
}

Expand Down
2 changes: 1 addition & 1 deletion nntrainer/layers/rnncell.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ RNNCellLayer::RNNCellLayer() :
props::HiddenStateActivation() = ActivationType::ACT_TANH,
props::DropOutRate()),
acti_func(ActivationType::ACT_NONE, true),
epsilon(1e-3) {
epsilon(1e-3f) {
wt_idx.fill(std::numeric_limits<unsigned>::max());
}

Expand Down
1 change: 1 addition & 0 deletions nntrainer/models/dynamic_training_optimization.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
*
*/

#include <numeric>
#include <random>
#include <vector>

Expand Down
16 changes: 8 additions & 8 deletions nntrainer/nntrainer_log.h
Original file line number Diff line number Diff line change
Expand Up @@ -56,27 +56,27 @@
#include <nntrainer_logger.h>

#if !defined(ml_logi)
#define ml_logi(format, args...) \
#define ml_logi(format, ...) \
__nntrainer_log_print(NNTRAINER_LOG_INFO, "(%s:%s:%d) " format, __FILE__, \
__func__, __LINE__, ##args)
__func__, __LINE__, ##__VA_ARGS__)
#endif

#if !defined(ml_logw)
#define ml_logw(format, args...) \
#define ml_logw(format, ...) \
__nntrainer_log_print(NNTRAINER_LOG_WARN, "(%s:%s:%d) " format, __FILE__, \
__func__, __LINE__, ##args)
__func__, __LINE__, ##__VA_ARGS__)
#endif

#if !defined(ml_loge)
#define ml_loge(format, args...) \
#define ml_loge(format, ...) \
__nntrainer_log_print(NNTRAINER_LOG_ERROR, "(%s:%s:%d) " format, __FILE__, \
__func__, __LINE__, ##args)
__func__, __LINE__, ##__VA_ARGS__)
#endif

#if !defined(ml_logd)
#define ml_logd(format, args...) \
#define ml_logd(format, ...) \
__nntrainer_log_print(NNTRAINER_LOG_DEBUG, "(%s:%s:%d) " format, __FILE__, \
__func__, __LINE__, ##args)
__func__, __LINE__, ##__VA_ARGS__)
#endif

#endif
Expand Down
5 changes: 3 additions & 2 deletions nntrainer/nntrainer_logger.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@
#include <sstream>
#include <stdarg.h>
#include <stdexcept>
#include <unistd.h>
#include <util_func.h>

namespace nntrainer {
Expand Down Expand Up @@ -78,7 +77,9 @@ Logger::Logger() : ts_type(NNTRAINER_LOG_TIMESTAMP_SEC) {
<< std::setw(2) << now.tm_sec << ".out";
outputstream.open(ss.str(), std::ios_base::app);
if (!outputstream.good()) {
char buf[256] = {0,};
char buf[256] = {
0,
};
std::string cur_path = std::string(buf);
std::string err_msg =
"Unable to initialize the Logger on path(" + cur_path + ")";
Expand Down
3 changes: 2 additions & 1 deletion nntrainer/optimizers/lr_scheduler_cosine.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,8 @@
*
*/

#include <cmath>
#define _USE_MATH_DEFINES
#include <math.h>

#include <common_properties.h>
#include <lr_scheduler_cosine.h>
Expand Down
1 change: 1 addition & 0 deletions nntrainer/tensor/float_tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@

#include <iomanip>
#include <iostream>
#include <numeric>

#include <blas_interface.h>
#include <float_tensor.h>
Expand Down
1 change: 0 additions & 1 deletion nntrainer/tensor/task_executor.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@
#include <memory>
#include <mutex>
#include <thread>
#include <unistd.h>

#include <task.h>

Expand Down
2 changes: 2 additions & 0 deletions nntrainer/tensor/tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@
* @bug No known bugs except for NYI items
*/

#include <numeric>

#include <char_tensor.h>
#include <float_tensor.h>
#include <lazy_tensor.h>
Expand Down
2 changes: 1 addition & 1 deletion nntrainer/tensor/tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -1745,7 +1745,7 @@ class Tensor {
*/
bool isValid() const { return itensor->isValid(); };

static constexpr float epsilon = 1e-5;
static constexpr float epsilon = 1e-5f;

private:
std::shared_ptr<TensorBase> itensor;
Expand Down
2 changes: 1 addition & 1 deletion nntrainer/tensor/tensor_base.h
Original file line number Diff line number Diff line change
Expand Up @@ -704,7 +704,7 @@ class TensorBase {
*/
virtual bool isValid() const = 0;

static constexpr float epsilon = 1e-5;
static constexpr float epsilon = 1e-5f;

protected:
TensorDim dim;
Expand Down
4 changes: 2 additions & 2 deletions nntrainer/tensor/weight.h
Original file line number Diff line number Diff line change
Expand Up @@ -364,9 +364,9 @@ class Weight : public Var_Grad {
const float getLossScale() { return loss_scale; };

private:
static constexpr float epsilon = 1e-6; /**< epsilon for zero comparison */
static constexpr float epsilon = 1e-6f; /**< epsilon for zero comparison */
static constexpr float epsilon_decay =
1e-8; /**< epsilon for zero comparison */
1e-8f; /**< epsilon for zero comparison */

WeightRegularizer regularizer; /**< regularizer for this variable */
float regularizer_constant; /**< constant factor for regularization */
Expand Down

0 comments on commit e000269

Please sign in to comment.