From 8cd251dc415da6e40a41a71c80e8cc2e40486c27 Mon Sep 17 00:00:00 2001 From: yuval-io <105581454+Yuval-Ariel@users.noreply.github.com> Date: Sun, 28 Jan 2024 14:03:30 +0200 Subject: [PATCH 01/10] zlib: Update ci and Makefile using a different link (#820) The link in zlib.net is broken every time they release a new version which breaks our download links. Replace those links with a stable link in https://github.com/madler --- .github/workflows/build_windows.yml | 5 +---- .github/workflows/ci_pipeline.yml | 5 +---- HISTORY.md | 1 + Makefile | 16 +++++++++++++++- 4 files changed, 18 insertions(+), 9 deletions(-) diff --git a/.github/workflows/build_windows.yml b/.github/workflows/build_windows.yml index 4d649aba30..310e1ba069 100644 --- a/.github/workflows/build_windows.yml +++ b/.github/workflows/build_windows.yml @@ -99,10 +99,7 @@ jobs: shell: powershell run: | cd $Env:THIRDPARTY_HOME - curl https://zlib.net/zlib13.zip -o zlib13.zip - Expand-Archive -Path zlib13.zip -DestinationPath zlib-tmp - mv .\zlib-tmp\zlib-1.3\ . - rmdir zlib-tmp + git clone https://github.com/madler/zlib.git -b v1.3 zlib-1.3 cd zlib-1.3\contrib\vstudio\vc14 devenv zlibvc.sln /upgrade cp ../../../zlib.h . diff --git a/.github/workflows/ci_pipeline.yml b/.github/workflows/ci_pipeline.yml index 886393e3f5..263c0df69b 100644 --- a/.github/workflows/ci_pipeline.yml +++ b/.github/workflows/ci_pipeline.yml @@ -201,10 +201,7 @@ jobs: shell: powershell run: | cd $Env:THIRDPARTY_HOME - curl https://zlib.net/zlib13.zip -o zlib13.zip - Expand-Archive -Path zlib13.zip -DestinationPath zlib-tmp - mv .\zlib-tmp\zlib-1.3\ . - rmdir zlib-tmp + git clone https://github.com/madler/zlib.git -b v1.3 zlib-1.3 cd zlib-1.3\contrib\vstudio\vc14 devenv zlibvc.sln /upgrade cp ../../../zlib.h . diff --git a/HISTORY.md b/HISTORY.md index db2af7c1bc..1947a2a954 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -33,6 +33,7 @@ RocksDB has a value of 10 by default and we've added the option to randomize the * Remove leftover references to ROCKSDB_LITE (#755). * Options: Set compaction_readahead_size default to 0. The current default of 2Mb is not optimal for most of our use cases. Having a value of 0 means that the FS will use its default size for prefetching (true only with https://github.com/speedb-io/speedb/pull/788). * Options: Set level_compaction_dynamic_level_bytes as false by default. This flag is not working properly with Speedb. see https://github.com/speedb-io/speedb/issues/786 for more details. +* zlib: Update links to zlib 1.3 in CI and Makefile since the link in zlib.net is dead. ## Hazlenut 2.7.0 (27/10/2023) Based on RocksDB 8.1.1 diff --git a/Makefile b/Makefile index 081c11a75d..58fc949248 100644 --- a/Makefile +++ b/Makefile @@ -1,3 +1,17 @@ +# Copyright (C) 2023 Speedb Ltd. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + # Copyright (c) 2011 The LevelDB Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. See the AUTHORS file for names of contributors. @@ -2003,7 +2017,7 @@ SHA256_CMD = sha256sum ZLIB_VER ?= 1.3 ZLIB_SHA256 ?= ff0ba4c292013dbc27530b3a81e1f9a813cd39de01ca5e0f8bf355702efa593e -ZLIB_DOWNLOAD_BASE ?= http://zlib.net +ZLIB_DOWNLOAD_BASE ?= https://github.com/madler/zlib/releases/download/v1.3 BZIP2_VER ?= 1.0.8 BZIP2_SHA256 ?= ab5a03176ee106d3f0fa90e381da478ddae405918153cca248e682cd0c4a2269 BZIP2_DOWNLOAD_BASE ?= http://sourceware.org/pub/bzip2 From 94e7a4fbe103ed081b1a4783bea9e0d65db8c4d3 Mon Sep 17 00:00:00 2001 From: udi-speedb <106253580+udi-speedb@users.noreply.github.com> Date: Sun, 28 Jan 2024 17:32:38 +0200 Subject: [PATCH 02/10] 812 stress test error initiateflushesthread assertion fail num running flushes 0 (#817) Proactive Flushes: Have the initiator return a correct answer when requested to initiate a flush --- HISTORY.md | 1 + db/db_impl/db_impl.h | 12 ++-- db/db_impl/db_impl_compaction_flush.cc | 88 ++++++++++++++++++-------- memtable/write_buffer_manager.cc | 6 +- memtable/write_buffer_manager_test.cc | 3 +- 5 files changed, 75 insertions(+), 35 deletions(-) diff --git a/HISTORY.md b/HISTORY.md index 1947a2a954..825f22e84e 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -28,6 +28,7 @@ db_stress has been updated as well to take into account that some memtables do n * stress test: Fix TestIterateAgainstExpected not supporting 0 iterations. TestIterateAgainstExpected was not designed to support value of 0 in FLAGS_num_iterations. RocksDB has a value of 10 by default and we've added the option to randomize the values from 0 to 100 in https://github.com/speedb-io/speedb/commit/434692a63318036a3995a53001337f18bf467903 * Add more checks for using db_stress with --enable_speedb_features=true +* Proactive Flushes: Have the initiator return a correct answer when it was requested to initate a flush (#812). ### Miscellaneous * Remove leftover references to ROCKSDB_LITE (#755). diff --git a/db/db_impl/db_impl.h b/db/db_impl/db_impl.h index b041844c60..36be4d2906 100644 --- a/db/db_impl/db_impl.h +++ b/db/db_impl/db_impl.h @@ -462,9 +462,9 @@ class DBImpl : public DB { // flush initiated by the write buffer manager to free some space bool InitiateMemoryManagerFlushRequest(size_t min_size_to_flush); - bool InitiateMemoryManagerFlushRequestAtomicFlush( + size_t InitiateMemoryManagerFlushRequestAtomicFlush( size_t min_size_to_flush, const FlushOptions& flush_options); - bool InitiateMemoryManagerFlushRequestNonAtomicFlush( + size_t InitiateMemoryManagerFlushRequestNonAtomicFlush( size_t min_size_to_flush, const FlushOptions& flush_options); virtual SequenceNumber GetLatestSequenceNumber() const override; @@ -1995,7 +1995,8 @@ class DBImpl : public DB { // Force current memtable contents to be flushed. Status FlushMemTable(ColumnFamilyData* cfd, const FlushOptions& options, FlushReason flush_reason, - bool entered_write_thread = false); + bool entered_write_thread = false, + size_t* num_flushes_initiated = nullptr); // Atomic-flush memtables from quanlified CFs among `provided_candidate_cfds` // (if non-empty) or amomg all column families and atomically record the @@ -2003,7 +2004,8 @@ class DBImpl : public DB { Status AtomicFlushMemTables( const FlushOptions& options, FlushReason flush_reason, const autovector& provided_candidate_cfds = {}, - bool entered_write_thread = false); + bool entered_write_thread = false, + size_t* num_flushes_initiated = nullptr); // Wait until flushing this column family won't stall writes Status WaitUntilFlushWouldNotStallWrites(ColumnFamilyData* cfd, @@ -2156,7 +2158,7 @@ class DBImpl : public DB { void GenerateFlushRequest(const autovector& cfds, FlushReason flush_reason, FlushRequest* req); - void SchedulePendingFlush(const FlushRequest& req); + bool SchedulePendingFlush(const FlushRequest& req); void SchedulePendingCompaction(ColumnFamilyData* cfd); void SchedulePendingPurge(std::string fname, std::string dir_to_sync, diff --git a/db/db_impl/db_impl_compaction_flush.cc b/db/db_impl/db_impl_compaction_flush.cc index 8baf079565..514b4ab5ab 100644 --- a/db/db_impl/db_impl_compaction_flush.cc +++ b/db/db_impl/db_impl_compaction_flush.cc @@ -2333,7 +2333,12 @@ void DBImpl::GenerateFlushRequest(const autovector& cfds, Status DBImpl::FlushMemTable(ColumnFamilyData* cfd, const FlushOptions& flush_options, FlushReason flush_reason, - bool entered_write_thread) { + bool entered_write_thread, + size_t* num_flushes_initiated) { + if (num_flushes_initiated != nullptr) { + *num_flushes_initiated = 0U; + } + // This method should not be called if atomic_flush is true. assert(!immutable_db_options_.atomic_flush); if (!flush_options.wait && write_controller_->IsStopped()) { @@ -2447,7 +2452,10 @@ Status DBImpl::FlushMemTable(ColumnFamilyData* cfd, } } for (const auto& req : flush_reqs) { - SchedulePendingFlush(req); + bool pushed_req = SchedulePendingFlush(req); + if (pushed_req && (num_flushes_initiated != nullptr)) { + ++(*num_flushes_initiated); + } } MaybeScheduleFlushOrCompaction(); } @@ -2486,8 +2494,13 @@ Status DBImpl::FlushMemTable(ColumnFamilyData* cfd, Status DBImpl::AtomicFlushMemTables( const FlushOptions& flush_options, FlushReason flush_reason, const autovector& provided_candidate_cfds, - bool entered_write_thread) { + bool entered_write_thread, size_t* num_flushes_initiated) { assert(immutable_db_options_.atomic_flush); + + if (num_flushes_initiated != nullptr) { + *num_flushes_initiated = 0U; + } + if (!flush_options.wait && write_controller_->IsStopped()) { std::ostringstream oss; oss << "Writes have been stopped, thus unable to perform manual flush. " @@ -2598,7 +2611,10 @@ Status DBImpl::AtomicFlushMemTables( } } GenerateFlushRequest(cfds, flush_reason, &flush_req); - SchedulePendingFlush(flush_req); + bool pushed_req = SchedulePendingFlush(flush_req); + if (pushed_req && (num_flushes_initiated != nullptr)) { + ++(*num_flushes_initiated); + } MaybeScheduleFlushOrCompaction(); } @@ -3014,14 +3030,17 @@ ColumnFamilyData* DBImpl::PickCompactionFromQueue( return cfd; } -void DBImpl::SchedulePendingFlush(const FlushRequest& flush_req) { +bool DBImpl::SchedulePendingFlush(const FlushRequest& flush_req) { mutex_.AssertHeld(); if (reject_new_background_jobs_) { - return; + return false; } if (flush_req.cfd_to_max_mem_id_to_persist.empty()) { - return; + return false; } + + bool pushed_req = false; + if (!immutable_db_options_.atomic_flush) { // For the non-atomic flush case, we never schedule multiple column // families in the same flush request. @@ -3035,6 +3054,7 @@ void DBImpl::SchedulePendingFlush(const FlushRequest& flush_req) { cfd->set_queued_for_flush(true); ++unscheduled_flushes_; flush_queue_.push_back(flush_req); + pushed_req = true; } } else { for (auto& iter : flush_req.cfd_to_max_mem_id_to_persist) { @@ -3043,7 +3063,10 @@ void DBImpl::SchedulePendingFlush(const FlushRequest& flush_req) { } ++unscheduled_flushes_; flush_queue_.push_back(flush_req); + pushed_req = true; } + + return pushed_req; } void DBImpl::SchedulePendingCompaction(ColumnFamilyData* cfd) { @@ -3273,11 +3296,6 @@ Status DBImpl::BackgroundFlush(bool* made_progress, JobContext* job_context, bg_job_limits.max_compactions, bg_flush_scheduled_, bg_compaction_scheduled_); } - *reason = bg_flush_args[0].flush_reason_; - if (write_buffer_manager_) { - write_buffer_manager_->FlushStarted( - *reason == FlushReason::kWriteBufferManagerInitiated); - } status = FlushMemTablesToOutputFiles(bg_flush_args, made_progress, job_context, log_buffer, thread_pri); @@ -3325,6 +3343,12 @@ void DBImpl::BackgroundCallFlush(Env::Priority thread_pri) { Status s = BackgroundFlush(&made_progress, &job_context, &log_buffer, &reason, &flush_rescheduled_to_retain_udt, thread_pri); + + if (write_buffer_manager_) { + write_buffer_manager_->FlushStarted( + reason == FlushReason::kWriteBufferManagerInitiated); + } + if (s.IsTryAgain() && flush_rescheduled_to_retain_udt) { bg_cv_.SignalAll(); // In case a waiter can proceed despite the error mutex_.Unlock(); @@ -4351,16 +4375,20 @@ bool DBImpl::InitiateMemoryManagerFlushRequest(size_t min_size_to_flush) { flush_options.allow_write_stall = true; flush_options.wait = false; + size_t num_flushes_initiated = 0U; if (immutable_db_options_.atomic_flush) { - return InitiateMemoryManagerFlushRequestAtomicFlush(min_size_to_flush, - flush_options); + num_flushes_initiated = InitiateMemoryManagerFlushRequestAtomicFlush( + min_size_to_flush, flush_options); } else { - return InitiateMemoryManagerFlushRequestNonAtomicFlush(min_size_to_flush, - flush_options); + num_flushes_initiated = InitiateMemoryManagerFlushRequestNonAtomicFlush( + min_size_to_flush, flush_options); } + + // TODO - Have Proactive Flushes handle num_flushes_initiated > 1 + return (num_flushes_initiated > 0U); } -bool DBImpl::InitiateMemoryManagerFlushRequestAtomicFlush( +size_t DBImpl::InitiateMemoryManagerFlushRequestAtomicFlush( size_t min_size_to_flush, const FlushOptions& flush_options) { assert(immutable_db_options_.atomic_flush); @@ -4370,7 +4398,7 @@ bool DBImpl::InitiateMemoryManagerFlushRequestAtomicFlush( SelectColumnFamiliesForAtomicFlush(&cfds); if (cfds.empty()) { - return false; + return 0U; } // min_size_to_flush may be 0. @@ -4391,7 +4419,7 @@ bool DBImpl::InitiateMemoryManagerFlushRequestAtomicFlush( } } if (total_size_to_flush < min_size_to_flush) { - return false; + return 0U; } } } @@ -4404,17 +4432,21 @@ bool DBImpl::InitiateMemoryManagerFlushRequestAtomicFlush( TEST_SYNC_POINT( "DBImpl::InitiateMemoryManagerFlushRequestAtomicFlush::BeforeFlush"); + size_t num_flushes_initiated = 0U; auto s = AtomicFlushMemTables( - flush_options, FlushReason::kWriteBufferManagerInitiated, cfds); + flush_options, FlushReason::kWriteBufferManagerInitiated, cfds, + false /* entered_write_thread */, &num_flushes_initiated); ROCKS_LOG_INFO( immutable_db_options_.info_log, "write buffer manager initiated Atomic flush finished, status: %s", s.ToString().c_str()); - return s.ok(); + + assert(s.ok() || (num_flushes_initiated == 0)); + return num_flushes_initiated; } -bool DBImpl::InitiateMemoryManagerFlushRequestNonAtomicFlush( +size_t DBImpl::InitiateMemoryManagerFlushRequestNonAtomicFlush( size_t min_size_to_flush, const FlushOptions& flush_options) { assert(immutable_db_options_.atomic_flush == false); @@ -4456,7 +4488,7 @@ bool DBImpl::InitiateMemoryManagerFlushRequestNonAtomicFlush( } if (cfd_to_flush == nullptr) { - return false; + return 0U; } orig_cfd_to_flush = cfd_to_flush; @@ -4503,15 +4535,19 @@ bool DBImpl::InitiateMemoryManagerFlushRequestNonAtomicFlush( TEST_SYNC_POINT( "DBImpl::InitiateMemoryManagerFlushRequestNonAtomicFlush::BeforeFlush"); - auto s = FlushMemTable(cfd_to_flush, flush_options, - FlushReason::kWriteBufferManagerInitiated); + size_t num_flushes_initiated = 0U; + + auto s = FlushMemTable( + cfd_to_flush, flush_options, FlushReason::kWriteBufferManagerInitiated, + false /* entered_write_thread */, &num_flushes_initiated); ROCKS_LOG_INFO( immutable_db_options_.info_log, "[%s] write buffer manager initialize flush finished, status: %s\n", cfd_to_flush->GetName().c_str(), s.ToString().c_str()); - return s.ok(); + assert(s.ok() || (num_flushes_initiated == 0)); + return num_flushes_initiated; } } // namespace ROCKSDB_NAMESPACE diff --git a/memtable/write_buffer_manager.cc b/memtable/write_buffer_manager.cc index 9d8bfe600e..bdfc389798 100644 --- a/memtable/write_buffer_manager.cc +++ b/memtable/write_buffer_manager.cc @@ -809,9 +809,9 @@ void WriteBufferManager::FlushEnded(bool /* wbm_initiated */) { // the WBM will not be aware of the number of running flushes at the time // it is enabled. The counter will become valid once all of the flushes // that were running when it was enabled will have completed. - if (num_running_flushes_ > 0U) { - --num_running_flushes_; - } + assert(num_running_flushes_ > 0U); + --num_running_flushes_; + size_t curr_memory_used = memory_usage(); RecalcFlushInitiationSize(); ReevaluateNeedForMoreFlushesLockHeld(curr_memory_used); diff --git a/memtable/write_buffer_manager_test.cc b/memtable/write_buffer_manager_test.cc index 035490a5d3..893893b6dc 100644 --- a/memtable/write_buffer_manager_test.cc +++ b/memtable/write_buffer_manager_test.cc @@ -905,7 +905,8 @@ TEST_P(WriteBufferManagerFlushInitiationTest, DISABLED_FlushInitiationSteps) { DeregisterInitiator(initiator_id); } -TEST_P(WriteBufferManagerFlushInitiationTest, RegisteringLate) { +// TODO - The test is flaky. Investigate why and either fix it or remvoe it +TEST_P(WriteBufferManagerFlushInitiationTest, DISABLED_RegisteringLate) { // Reach the 1st step, but no registered initiators wbm_->ReserveMem(flush_step_size_); IncNumFlushesToInitiate(); From 536f77974f867a78971c1a7bc0c5240730630375 Mon Sep 17 00:00:00 2001 From: yuval-io <105581454+Yuval-Ariel@users.noreply.github.com> Date: Mon, 29 Jan 2024 13:25:57 +0200 Subject: [PATCH 03/10] stress test: Disable IsDone which conflicts with the trace file (#816) Adding a trace file by default in PR https://github.com/speedb-io/speedb/pull/797 has revealed some incompatibilities between the trace file and several configurations (more details in https://github.com/speedb-io/speedb/issues/813). Keep the trace file and remove the IsDone assertion. --- HISTORY.md | 1 + db_stress_tool/expected_state.cc | 10 +++++++++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/HISTORY.md b/HISTORY.md index 825f22e84e..f8ab24ab3f 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -29,6 +29,7 @@ db_stress has been updated as well to take into account that some memtables do n RocksDB has a value of 10 by default and we've added the option to randomize the values from 0 to 100 in https://github.com/speedb-io/speedb/commit/434692a63318036a3995a53001337f18bf467903 * Add more checks for using db_stress with --enable_speedb_features=true * Proactive Flushes: Have the initiator return a correct answer when it was requested to initate a flush (#812). +* stress test: Adding a trace file by default in PR https://github.com/speedb-io/speedb/pull/797 has revealed some incompatibilities between the trace file and several configurations (more details in https://github.com/speedb-io/speedb/issues/813). Keep the trace file and remove the IsDone assertion. ### Miscellaneous * Remove leftover references to ROCKSDB_LITE (#755). diff --git a/db_stress_tool/expected_state.cc b/db_stress_tool/expected_state.cc index 443e021454..9fa8d83bbb 100644 --- a/db_stress_tool/expected_state.cc +++ b/db_stress_tool/expected_state.cc @@ -396,7 +396,15 @@ class ExpectedStateTraceRecordHandler : public TraceRecord::Handler, state_(state), buffered_writes_(nullptr) {} - ~ExpectedStateTraceRecordHandler() { assert(IsDone()); } + ~ExpectedStateTraceRecordHandler() { + fprintf( + stderr, + "WARNING: ~ExpectedStateTraceRecordHandler - num_write_ops_: %" PRIu64 + " max_write_ops_: %" PRIu64 "\n", + num_write_ops_, max_write_ops_); + // assert(IsDone()) + ; + } // True if we have already reached the limit on write operations to apply. bool IsDone() { return num_write_ops_ == max_write_ops_; } From ed41ab505b07363a838932a79c415a49eb29468e Mon Sep 17 00:00:00 2001 From: ofriedma <48631098+ofriedma@users.noreply.github.com> Date: Mon, 29 Jan 2024 14:51:55 +0200 Subject: [PATCH 04/10] stress test: Fix flag validation in Enable Speedb Features (#814) Fixes a bug where ValidateEnableSpeedbFlags() checks for non existing parameters in db_stress --- HISTORY.md | 1 + db_stress_tool/db_stress_test_base.cc | 28 ++++++++++++++------------- 2 files changed, 16 insertions(+), 13 deletions(-) diff --git a/HISTORY.md b/HISTORY.md index f8ab24ab3f..e08c640192 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -16,6 +16,7 @@ Based on RocksDB 8.6.7 * LOG Enhancement: Have a separate LOG entry per CF Stats. This ensures that no CF stats data is lost in case the size of the combined CF stats text exceeds the LOG's threshold (#534). ### Bug Fixes +* Fix a bug in db_stress where non existence parameters have checked with enable_speedb_features. * Added IsRefreshIterSupported() to memtable_rep, to publish if the memtable support Refresh() of the iterator. Refresh() will return status NotSupported for memtables that do not support Refresh(). IsAllowRefresh() has been added. diff --git a/db_stress_tool/db_stress_test_base.cc b/db_stress_tool/db_stress_test_base.cc index abcdf86ca1..fd3be38ad0 100644 --- a/db_stress_tool/db_stress_test_base.cc +++ b/db_stress_tool/db_stress_test_base.cc @@ -147,19 +147,21 @@ bool is_default(const char* flag_name) { } void ValidateEnableSpeedbFlags() { - std::vector confilct_flags = { - "num_high_pri_threads", "num_bottom_pri_threads", - "num_bottom_pri_threads", "num_low_pri_threads", - "max_background_compactions", "max_background_flushes", "cache_size", - "cache_type", - // Assume simcache_size default is disabled simcache - "simcache_size", "memtablerep", "pinning_policy", - "scoped_pinning_capacity", "use_ribbon_filter", "bloom_bits", - "allow_wbm_stalls", "db_write_buffer_size", "initiate_wbm_flushes", - "bytes_per_sync", "use_dynamic_delay", "memtable_bloom_size_ratio", - "whole_key_filtering", "optimize_filters_for_hits", - "max_num_parallel_flushes", "start_delay_percent", - "max_num_parallel_flushes", "use_blob_cache"}; + std::vector confilct_flags = {"max_background_compactions", + "max_background_flushes", + "cache_size", + "cache_type", + "memtablerep", + "pinning_policy", + "bloom_bits", + "allow_wbm_stalls", + "db_write_buffer_size", + "initiate_wbm_flushes", + "bytes_per_sync", + "use_dynamic_delay", + "start_delay_percent", + "max_num_parallel_flushes", + "use_blob_cache"}; if (FLAGS_enable_speedb_features && !FLAGS_crash_test) { if (is_default("max_background_jobs") || is_default("total_ram_size")) { From c328f0ebc33db0166cd2a2844ff44c8034e30e5b Mon Sep 17 00:00:00 2001 From: GitHub Runner Bot <> Date: Wed, 31 Jan 2024 11:30:08 +0000 Subject: [PATCH 05/10] release: publish version 2.8.0 --- speedb/version.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/speedb/version.h b/speedb/version.h index f50314d836..2b3b29709d 100644 --- a/speedb/version.h +++ b/speedb/version.h @@ -16,7 +16,7 @@ #pragma once #define SPEEDB_MAJOR 2 -#define SPEEDB_MINOR 7 +#define SPEEDB_MINOR 8 #define SPEEDB_PATCH 0 namespace ROCKSDB_NAMESPACE { From b9f951f9b4054ca1701e30dd60519fafa5aa8f0e Mon Sep 17 00:00:00 2001 From: yuval-io <105581454+Yuval-Ariel@users.noreply.github.com> Date: Mon, 12 Feb 2024 09:47:01 +0200 Subject: [PATCH 06/10] Update HISTORY.md for 2.8.0 (#830) --- HISTORY.md | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/HISTORY.md b/HISTORY.md index e08c640192..0f6a749d95 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -1,6 +1,16 @@ # Speedb Change Log ## Unreleased + +### New Features + +### Enhancements + +### Bug Fixes + +### Miscellaneous + +## Incaberry 2.8.0 (31/1/2024) Based on RocksDB 8.6.7 ### New Features @@ -38,7 +48,7 @@ RocksDB has a value of 10 by default and we've added the option to randomize the * Options: Set level_compaction_dynamic_level_bytes as false by default. This flag is not working properly with Speedb. see https://github.com/speedb-io/speedb/issues/786 for more details. * zlib: Update links to zlib 1.3 in CI and Makefile since the link in zlib.net is dead. -## Hazlenut 2.7.0 (27/10/2023) +## Hazelnut 2.7.0 (27/10/2023) Based on RocksDB 8.1.1 ### New Features From b2c5bd52ddca164b454e45c2f810c62a5734d578 Mon Sep 17 00:00:00 2001 From: yuval-io <105581454+Yuval-Ariel@users.noreply.github.com> Date: Tue, 13 Feb 2024 12:33:37 +0200 Subject: [PATCH 07/10] WriteController: Remove redundant setting delay reports with single db (#831) Also fixed cf delay msgs to be more informative --- HISTORY.md | 2 ++ db/column_family.cc | 10 +++++----- db/write_controller.cc | 4 ++++ 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/HISTORY.md b/HISTORY.md index 0f6a749d95..a80bd9ca6c 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -9,6 +9,8 @@ ### Bug Fixes ### Miscellaneous +* WriteController logging: Remove redundant reports when WC is not shared between dbs + ## Incaberry 2.8.0 (31/1/2024) Based on RocksDB 8.6.7 diff --git a/db/column_family.cc b/db/column_family.cc index 7be55f5569..1d8e7499e7 100644 --- a/db/column_family.cc +++ b/db/column_family.cc @@ -1123,8 +1123,8 @@ WriteStallCondition ColumnFamilyData::RecalculateWriteStallConditions( ROCKS_LOG_WARN( ioptions_.logger, "[%s] Stalling writes because we have %d immutable memtables " - "(waiting for flush), max_write_buffer_number is set to %d " - "rate %" PRIu64, + "(waiting for flush), max_write_buffer_number is set to %d. " + "delayed write rate: %" PRIu64, name_.c_str(), imm()->NumNotFlushed(), mutable_cf_options.max_write_buffer_number, write_controller->delayed_write_rate()); @@ -1146,8 +1146,8 @@ WriteStallCondition ColumnFamilyData::RecalculateWriteStallConditions( 1); } ROCKS_LOG_WARN(ioptions_.logger, - "[%s] Stalling writes because we have %d level-0 files " - "rate %" PRIu64, + "[%s] Stalling writes because we have %d level-0 files. " + "delayed write rate: %" PRIu64, name_.c_str(), vstorage->l0_delay_trigger_count(), write_controller->delayed_write_rate()); } else if (write_stall_condition == WriteStallCondition::kDelayed && @@ -1175,7 +1175,7 @@ WriteStallCondition ColumnFamilyData::RecalculateWriteStallConditions( ROCKS_LOG_WARN( ioptions_.logger, "[%s] Stalling writes because of estimated pending compaction " - "bytes %" PRIu64 " rate %" PRIu64, + "bytes %" PRIu64 ". delayed write rate: %" PRIu64, name_.c_str(), vstorage->estimated_compaction_needed_bytes(), write_controller->delayed_write_rate()); } else { diff --git a/db/write_controller.cc b/db/write_controller.cc index 64e4acd5a3..ce5487d6f4 100644 --- a/db/write_controller.cc +++ b/db/write_controller.cc @@ -149,6 +149,10 @@ void WriteController::HandleNewDelayReq(void* client_id, { std::lock_guard logger_lock(loggers_map_mu_); + // The below WARN msg is intended only when the WC is shared among loggers. + if (loggers_to_client_ids_map_.size() == 1) { + return; + } for (auto& logger_and_clients : loggers_to_client_ids_map_) { ROCKS_LOG_WARN(logger_and_clients.first.get(), "WC setting delay of %" PRIu64 From 2bb49eb23b5ff6c81bfc221dfe14b10e0c065cc2 Mon Sep 17 00:00:00 2001 From: maxb-io <105273783+maxb-io@users.noreply.github.com> Date: Thu, 15 Feb 2024 12:21:02 +0200 Subject: [PATCH 08/10] added push and pull_request_target triggers to the check lic and hist workflow (#835) --- .github/workflows/check_license_and_history.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/check_license_and_history.yml b/.github/workflows/check_license_and_history.yml index 6f67c82415..5fef42e087 100644 --- a/.github/workflows/check_license_and_history.yml +++ b/.github/workflows/check_license_and_history.yml @@ -3,8 +3,9 @@ name: Check License and History on: # this workflow is planned to be called by the ci_pipeline and it will compare the PR files with the main workflow_call: workflow_dispatch: - #pull_request_review: - # types: [submitted] + push: + pull_request_target: + jobs: changedfiles: From 4d3c98884c7bb2def2cd37414cdd3a97ddf9a04c Mon Sep 17 00:00:00 2001 From: udi-speedb <106253580+udi-speedb@users.noreply.github.com> Date: Tue, 20 Feb 2024 15:05:51 +0200 Subject: [PATCH 09/10] LOG: Align pinning policy options with block_cache/metadata_cache options (#805) --- HISTORY.md | 2 ++ plugin/speedb/pinning_policy/scoped_pinning_policy.cc | 7 +++---- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/HISTORY.md b/HISTORY.md index a80bd9ca6c..c70734a61b 100644 --- a/HISTORY.md +++ b/HISTORY.md @@ -7,6 +7,7 @@ ### Enhancements ### Bug Fixes +* LOG Consistency:Display the pinning policy options same as block cache options / metadata cache options (#804). ### Miscellaneous * WriteController logging: Remove redundant reports when WC is not shared between dbs @@ -44,6 +45,7 @@ RocksDB has a value of 10 by default and we've added the option to randomize the * Proactive Flushes: Have the initiator return a correct answer when it was requested to initate a flush (#812). * stress test: Adding a trace file by default in PR https://github.com/speedb-io/speedb/pull/797 has revealed some incompatibilities between the trace file and several configurations (more details in https://github.com/speedb-io/speedb/issues/813). Keep the trace file and remove the IsDone assertion. + ### Miscellaneous * Remove leftover references to ROCKSDB_LITE (#755). * Options: Set compaction_readahead_size default to 0. The current default of 2Mb is not optimal for most of our use cases. Having a value of 0 means that the FS will use its default size for prefetching (true only with https://github.com/speedb-io/speedb/pull/788). diff --git a/plugin/speedb/pinning_policy/scoped_pinning_policy.cc b/plugin/speedb/pinning_policy/scoped_pinning_policy.cc index 61c29c6257..943c4f5f8f 100644 --- a/plugin/speedb/pinning_policy/scoped_pinning_policy.cc +++ b/plugin/speedb/pinning_policy/scoped_pinning_policy.cc @@ -77,17 +77,16 @@ std::string ScopedPinningPolicy::GetPrintableOptions() const { const int kBufferSize = 200; char buffer[kBufferSize]; - snprintf(buffer, kBufferSize, - " pinning_policy.capacity: %" ROCKSDB_PRIszt "\n", + snprintf(buffer, kBufferSize, " capacity: %" ROCKSDB_PRIszt "\n", options_.capacity); ret.append(buffer); snprintf(buffer, kBufferSize, - " pinning_policy.last_level_with_data_percent: %" PRIu32 "\n", + " last_level_with_data_percent: %" PRIu32 "\n", options_.last_level_with_data_percent); ret.append(buffer); - snprintf(buffer, kBufferSize, " pinning_policy.mid_percent: %" PRIu32 "\n", + snprintf(buffer, kBufferSize, " mid_percent: %" PRIu32 "\n", options_.mid_percent); ret.append(buffer); From 95300a1cad5db593595a99993c41df5a45c22d1b Mon Sep 17 00:00:00 2001 From: maxb-io <105273783+maxb-io@users.noreply.github.com> Date: Wed, 21 Feb 2024 09:21:05 +0200 Subject: [PATCH 10/10] 836 add jtest to ci (#837) * added java unit test step to linux-build job to the ci pipeline and renamed the job to linux-build-and-UnitTest --- .github/workflows/ci_pipeline.yml | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci_pipeline.yml b/.github/workflows/ci_pipeline.yml index 263c0df69b..6dba3bc586 100644 --- a/.github/workflows/ci_pipeline.yml +++ b/.github/workflows/ci_pipeline.yml @@ -275,7 +275,7 @@ jobs: ROCKSDB_DISABLE_JEMALLOC=1 PORTABLE=1 DEBUG_LEVEL=0 make -j 4 rocksdbjavastatic - Linux-build: + Linux-build-and-UnitTest: if: ${{ (always() && !failure() && !cancelled()) && (github.event.review.state == 'approved' || github.event_name == 'workflow_dispatch' || startsWith(github.ref, 'refs/heads/release')) }} needs: [Build] runs-on: ubuntu-latest @@ -317,6 +317,11 @@ jobs: make clean SPDB_RELEASE_BUILD=1 LIB_MODE=static DEBUG_LEVEL=0 PORTABLE=1 JAVA_HOME=/usr/lib/jvm/java-openjdk make -j$(nproc) rocksdbjavastatic + - name: Java Unit test + run: | + make clean + JAVA_HOME=/usr/lib/jvm/java-openjdk make -j$(nproc) jtest + - name: Build db_bench run: | yum install -y gflags-devel @@ -364,7 +369,7 @@ jobs: CI-all: if: ${{ github.event.review.state == 'approved' || github.event_name == 'workflow_dispatch' || startsWith(github.ref, 'refs/heads/release') }} - needs: [Check-Licence-And-History, Build, QA-Tests, Fuzz, Linux-Arm-build, Linux-build, Macos-build, Windows-build-test] + needs: [Check-Licence-And-History, Build, QA-Tests, Fuzz, Linux-Arm-build, Linux-build-and-UnitTest, Macos-build, Windows-build-test] runs-on: ubuntu-latest steps: - name: Summary