Skip to content

Commit

Permalink
fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
Alexandr-Solovev committed Nov 19, 2024
1 parent cc85e37 commit 852669f
Show file tree
Hide file tree
Showing 10 changed files with 35 additions and 36 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -91,8 +91,8 @@ std::int32_t most_frequent_element(const std::atomic<std::int32_t> *components,
std::int32_t *rnd_vertex_ids = allocate(vertex_allocator, samples_count);

dal::backend::primitives::daal_engine eng;
dal::backend::primitives::daal_rng<std::int32_t> rn_gen;
rn_gen.uniform(samples_count, rnd_vertex_ids, eng, 0, vertex_count);
dal::backend::primitives::rng<std::int32_t> rn_gen;
rn_gen.uniform_cpu(samples_count, rnd_vertex_ids, eng, 0, vertex_count);

std::int32_t *root_sample_counts = allocate(vertex_allocator, vertex_count);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -396,14 +396,14 @@ sycl::event train_kernel_hist_impl<Float, Bin, Index, Task>::gen_initial_tree_or
Index* const node_list_ptr = node_list_host.get_mutable_data();

for (Index node_idx = 0; node_idx < node_count; ++node_idx) {
pr::daal_rng<Index> rn_gen;
pr::rng<Index> rn_gen;
Index* gen_row_idx_global_ptr =
selected_row_global_ptr + ctx.selected_row_total_count_ * node_idx;
rn_gen.uniform(ctx.selected_row_total_count_,
gen_row_idx_global_ptr,
rng_engine_list[engine_offset + node_idx],
0,
ctx.row_total_count_);
rn_gen.uniform_cpu(ctx.selected_row_total_count_,
gen_row_idx_global_ptr,
rng_engine_list[engine_offset + node_idx],
0,
ctx.row_total_count_);

if (ctx.distr_mode_) {
Index* node_ptr = node_list_ptr + node_idx * impl_const_t::node_prop_count_;
Expand Down Expand Up @@ -483,7 +483,7 @@ train_kernel_hist_impl<Float, Bin, Index, Task>::gen_feature_list(

auto node_vs_tree_map_list_host = node_vs_tree_map_list.to_host(queue_);

pr::daal_rng<Index> rn_gen;
pr::rng<Index> rn_gen;
auto tree_map_ptr = node_vs_tree_map_list_host.get_mutable_data();
if (ctx.selected_ftr_count_ != ctx.column_count_) {
for (Index node = 0; node < node_count; ++node) {
Expand Down Expand Up @@ -524,7 +524,7 @@ train_kernel_hist_impl<Float, Bin, Index, Task>::gen_random_thresholds(

auto node_vs_tree_map_list_host = node_vs_tree_map.to_host(queue_);

pr::daal_rng<Float> rn_gen;
pr::rng<Float> rn_gen;
auto tree_map_ptr = node_vs_tree_map_list_host.get_mutable_data();

// Create arrays for random generated bins
Expand All @@ -537,11 +537,11 @@ train_kernel_hist_impl<Float, Bin, Index, Task>::gen_random_thresholds(

// Generate random bins for selected features
for (Index node = 0; node < node_count; ++node) {
rn_gen.uniform(ctx.selected_ftr_count_,
random_bins_host_ptr + node * ctx.selected_ftr_count_,
rng_engine_list[tree_map_ptr[node]],
0.0f,
1.0f);
rn_gen.uniform_cpu(ctx.selected_ftr_count_,
random_bins_host_ptr + node * ctx.selected_ftr_count_,
rng_engine_list[tree_map_ptr[node]],
0.0f,
1.0f);
}
auto event_rnd_generate =
random_bins_com.assign_from_host(queue_, random_bins_host_ptr, random_bins_com.get_count());
Expand Down Expand Up @@ -1660,12 +1660,12 @@ sycl::event train_kernel_hist_impl<Float, Bin, Index, Task>::compute_results(

const Float div1 = Float(1) / Float(built_tree_count + tree_idx_in_block + 1);

pr::daal_rng<Index> rn_gen;
pr::rng<Index> rn_gen;

for (Index column_idx = 0; column_idx < ctx.column_count_; ++column_idx) {
rn_gen.shuffle(oob_row_count,
permutation_ptr,
engine_arr[built_tree_count + tree_idx_in_block]);
rn_gen.shuffle_cpu(oob_row_count,
permutation_ptr,
engine_arr[built_tree_count + tree_idx_in_block]);
const Float oob_err_perm = compute_oob_error_perm(ctx,
model_manager,
data_host,
Expand Down
2 changes: 1 addition & 1 deletion cpp/oneapi/dal/algo/louvain/backend/cpu/louvain_data.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ struct louvain_data {
value_type m;

daal_engine<engine_list::mt2203> eng;
daal_rng<std::int32_t> rn_gen;
rng<std::int32_t> rn_gen;

const std::int64_t vertex_count;
const std::int64_t edge_count;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -206,7 +206,7 @@ inline Float move_nodes(const dal::preview::detail::topology<IndexType>& t,
ld.random_order[index] = index;
}
// random shuffle
ld.rn_gen.uniform(t._vertex_count, ld.index, ld.eng, 0, t._vertex_count);
ld.rn_gen.uniform_cpu(t._vertex_count, ld.index, ld.eng, 0, t._vertex_count);
for (std::int64_t index = 0; index < t._vertex_count; ++index) {
std::swap(ld.random_order[index], ld.random_order[ld.index[index]]);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -572,13 +572,13 @@ class logloss_test : public te::float_algo_fixture<std::tuple_element_t<0, Param
const std::int64_t p = hessian_host.get_dimension(0) - 1;
const std::int64_t dim = fit_intercept ? p + 1 : p;

primitives::daal_rng<float_t> rn_gen;
primitives::rng<float_t> rn_gen;
auto vec_host =
ndarray<float_t, 1>::empty(this->get_queue(), { dim }, sycl::usm::alloc::host);

for (std::int32_t ij = 0; ij < num_checks; ++ij) {
primitives::daal_engine eng(2007 + dim * num_checks + ij);
rn_gen.uniform(dim, vec_host.get_mutable_data(), eng, -1.0, 1.0);
rn_gen.uniform_cpu(dim, vec_host.get_mutable_data(), eng, -1.0, 1.0);
auto vec_gpu = vec_host.to_device(this->get_queue());
auto out_vector =
ndarray<float_t, 1>::empty(this->get_queue(), { dim }, sycl::usm::alloc::device);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ class logloss_spmd_test : public logloss_test<Param> {
std::int64_t num_checks = 5;

std::vector<ndarray<float_t, 1>> vecs_host(num_checks), vecs_gpu(num_checks);
daal_rng<float_t> rn_gen;
rng<float_t> rn_gen;
for (std::int64_t ij = 0; ij < num_checks; ++ij) {
daal_engine eng(2007 + dim * num_checks + ij);
vecs_host[ij] =
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,9 +43,9 @@ class cg_solver_test : public te::float_algo_fixture<Param> {
x_host_ = ndarray<float_t, 1>::empty(this->get_queue(), { n_ }, sycl::usm::alloc::host);
b_host_ = ndarray<float_t, 1>::empty(this->get_queue(), { n_ }, sycl::usm::alloc::host);

primitives::daal_rng<float_t> rn_gen;
primitives::rng<float_t> rn_gen;
primitives::daal_engine eng(4014 + n_);
rn_gen.uniform(n_, x_host_.get_mutable_data(), eng, -1.0, 1.0);
rn_gen.uniform_cpu(n_, x_host_.get_mutable_data(), eng, -1.0, 1.0);

create_stable_matrix(this->get_queue(), A_host_);

Expand Down
6 changes: 3 additions & 3 deletions cpp/oneapi/dal/backend/primitives/optimizers/test/fixture.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -133,11 +133,11 @@ void create_stable_matrix(sycl::queue& queue,
ONEDAL_ASSERT(A.get_dimension(1) == n);
auto J = ndarray<Float, 2>::empty(queue, { n, n }, sycl::usm::alloc::host);
auto eigen_values = ndarray<Float, 1>::empty(queue, { n }, sycl::usm::alloc::host);
primitives::daal_rng<Float> rn_gen;
primitives::rng<Float> rn_gen;
primitives::daal_engine eng(2007 + n);

rn_gen.uniform(n * n, J.get_mutable_data(), eng, -1.0, 1.0);
rn_gen.uniform(n, eigen_values.get_mutable_data(), eng, bottom_eig, top_eig);
rn_gen.uniform_cpu(n * n, J.get_mutable_data(), eng, -1.0, 1.0);
rn_gen.uniform_cpu(n, eigen_values.get_mutable_data(), eng, bottom_eig, top_eig);

// orthogonalize matrix J
gram_schmidt(J);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -56,10 +56,10 @@ class newton_cg_test : public te::float_algo_fixture<Param> {
ndarray<std::int32_t, 1>::empty(this->get_queue(), { n_ + 1 }, sycl::usm::alloc::host);
auto params_host =
ndarray<float_t, 1>::empty(this->get_queue(), { p_ + 1 }, sycl::usm::alloc::host);
primitives::daal_rng<float_t> rn_gen;
primitives::rng<float_t> rn_gen;
primitives::daal_engine eng(2007 + n);
rn_gen.uniform(n_ * p_, X_host.get_mutable_data(), eng, -10.0, 10.0);
rn_gen.uniform(p_ + 1, params_host.get_mutable_data(), eng, -5.0, 5.0);
rn_gen.uniform_cpu(n_ * p_, X_host.get_mutable_data(), eng, -10.0, 10.0);
rn_gen.uniform_cpu(p_ + 1, params_host.get_mutable_data(), eng, -5.0, 5.0);
for (std::int64_t i = 0; i < n_; ++i) {
float_t val = 0;
for (std::int64_t j = 0; j < p_; ++j) {
Expand Down Expand Up @@ -144,7 +144,7 @@ class newton_cg_test : public te::float_algo_fixture<Param> {
auto b_host = ndarray<float_t, 1>::empty(this->get_queue(), { n_ }, sycl::usm::alloc::host);
primitives::rng<float_t> rn_gen;
primitives::engine eng(4014 + n_);
rn_gen.uniform(n_, solution_.get_mutable_data(), eng, -1.0, 1.0);
rn_gen.uniform_cpu(n_, solution_.get_mutable_data(), eng, -1.0, 1.0);

create_stable_matrix(this->get_queue(), A_host, float_t(0.1), float_t(5.0));

Expand All @@ -164,7 +164,7 @@ class newton_cg_test : public te::float_algo_fixture<Param> {
auto buffer = ndarray<float_t, 1>::empty(this->get_queue(), { n_ }, sycl::usm::alloc::host);

for (std::int32_t test_num = 0; test_num < 5; ++test_num) {
rn_gen.uniform(n_, x_host.get_mutable_data(), eng, -1.0, 1.0);
rn_gen.uniform_cpu(n_, x_host.get_mutable_data(), eng, -1.0, 1.0);
auto x_gpu = x_host.to_device(this->get_queue());
auto compute_event_vec = func_->update_x(x_gpu, true, {});
wait_or_pass(compute_event_vec).wait_and_throw();
Expand Down
3 changes: 1 addition & 2 deletions cpp/oneapi/dal/backend/primitives/rng/rng.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -146,8 +146,7 @@ class rng {
Type* dst,
onedal_engine<EngineType>& engine_,
const event_vector& deps = {});
};

#endif
};

}; // namespace oneapi::dal::backend::primitives

0 comments on commit 852669f

Please sign in to comment.