From 866e68ed2712cd0f7b1fee99e94d3c48b720c100 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20Uzarski?= Date: Wed, 12 Feb 2025 13:15:34 +0100 Subject: [PATCH 1/4] ccm: add support for updateconf --- scylla/tests/ccm_integration/ccm/cluster.rs | 99 +++++++++++++++++++++ scylla/tests/ccm_integration/ccm/mod.rs | 55 ++++++++++++ 2 files changed, 154 insertions(+) diff --git a/scylla/tests/ccm_integration/ccm/cluster.rs b/scylla/tests/ccm_integration/ccm/cluster.rs index f1d7ba2709..566f20b015 100644 --- a/scylla/tests/ccm_integration/ccm/cluster.rs +++ b/scylla/tests/ccm_integration/ccm/cluster.rs @@ -263,6 +263,56 @@ impl Node { env } + /// Executes `ccm updateconf` and applies it for this node. + /// It accepts the key-value pairs to update the configuration. + /// + /// ### Example + /// ``` + /// # use crate::ccm::cluster::Node; + /// # async fn check_only_compiles(node: &Node) -> Result<(), Box> { + /// let args = [ + /// ("client_encryption_options.enabled", "true"), + /// ("client_encryption_options.certificate", "db.cert"), + /// ("client_encryption_options.keyfile", "db.key"), + /// ]; + /// + /// node.updateconf(args).await? + /// # Ok(()) + /// # } + /// ``` + /// + /// The code above is equivalent to the following scylla.yaml: + /// ```yaml + /// client_encryption_options: + /// enabled: true + /// certificate: db.cert + /// keyfile: db.key + /// ``` + pub(crate) async fn updateconf( + &self, + key_values: impl IntoIterator, + ) -> Result<(), Error> + where + K: AsRef, + V: AsRef, + { + let config_dir = &self.config_dir; + let mut args: Vec = vec![ + self.opts.name(), + "updateconf".to_string(), + "--config-dir".to_string(), + config_dir.to_string_lossy().into_owned(), + ]; + for (k, v) in key_values.into_iter() { + args.push(format!("{}:{}", k.as_ref(), v.as_ref())); + } + + self.logged_cmd + .run_command("ccm", &args, RunOptions::new()) + .await?; + Ok(()) + } + /// This method starts the node. User can provide optional [`NodeStartOptions`] to control the behavior of the node start. /// If `None` is provided, the default options are used (see the implementation of Default for [`NodeStartOptions`]). pub(crate) async fn start(&mut self, opts: Option) -> Result<(), Error> { @@ -580,6 +630,55 @@ impl Cluster { Ok(()) } + /// Executes `ccm updateconf` and applies it for all nodes in the cluster. + /// It accepts the key-value pairs to update the configuration. + /// + /// ### Example + /// ``` + /// # use crate::ccm::cluster::Cluster; + /// # async fn check_only_compiles(cluster: &Cluster) -> Result<(), Box> { + /// let args = [ + /// ("client_encryption_options.enabled", "true"), + /// ("client_encryption_options.certificate", "db.cert"), + /// ("client_encryption_options.keyfile", "db.key"), + /// ]; + /// + /// cluster.updateconf(args).await? + /// # Ok(()) + /// # } + /// ``` + /// + /// The code above is equivalent to the following scylla.yaml: + /// ```yaml + /// client_encryption_options: + /// enabled: true + /// certificate: db.cert + /// keyfile: db.key + /// ``` + pub(crate) async fn updateconf( + &self, + key_values: impl IntoIterator, + ) -> Result<(), Error> + where + K: AsRef, + V: AsRef, + { + let config_dir = self.config_dir(); + let mut args: Vec = vec![ + "updateconf".to_string(), + "--config-dir".to_string(), + config_dir.to_string_lossy().into_owned(), + ]; + for (k, v) in key_values.into_iter() { + args.push(format!("{}:{}", k.as_ref(), v.as_ref())); + } + + self.logged_cmd + .run_command("ccm", &args, RunOptions::new()) + .await?; + Ok(()) + } + fn get_ccm_env(&self) -> HashMap { let mut env: HashMap = HashMap::new(); env.insert( diff --git a/scylla/tests/ccm_integration/ccm/mod.rs b/scylla/tests/ccm_integration/ccm/mod.rs index a495ba6f30..2d1e42da2f 100644 --- a/scylla/tests/ccm_integration/ccm/mod.rs +++ b/scylla/tests/ccm_integration/ccm/mod.rs @@ -81,3 +81,58 @@ where test_body(Arc::clone(&wrapper.0)).await; std::mem::drop(wrapper); } + +/// Run a CCM test with a custom configuration logic before the cluster starts. +/// +/// ### Example +/// ``` +/// # use crate::ccm::cluster::Cluster; +/// # use crate::ccm::run_ccm_test_with_configuration; +/// # use std::sync::{Arc, Mutex}; +/// async fn configure_cluster(cluster: Cluster) -> Cluster { +/// // Do some configuration here +/// cluster.updateconf([("foo", "bar")]).await.expect("failed to update conf"); +/// cluster +/// } +/// +/// async fn test(cluster: Arc>) { +/// let cluster = cluster.lock().await; +/// let session = cluster.make_session_builder().await.build().await.unwrap(); +/// +/// println!("Succesfully connected to the cluster!"); +/// } +/// +/// run_ccm_test_with_configuration(ClusterOptions::default, configure_cluster, test).await; +/// ``` +pub(crate) async fn run_ccm_test_with_configuration( + make_cluster_options: C, + configure: Conf, + test_body: T, +) where + C: FnOnce() -> ClusterOptions, + Conf: FnOnce(Cluster) -> ConfFut, + ConfFut: Future, + T: FnOnce(Arc>) -> TFut, + TFut: Future, +{ + let cluster_options = make_cluster_options(); + let mut cluster = Cluster::new(cluster_options) + .await + .expect("Failed to create cluster"); + cluster.init().await.expect("failed to initialize cluster"); + cluster = configure(cluster).await; + cluster.start(None).await.expect("failed to start cluster"); + + struct ClusterWrapper(Arc>); + impl Drop for ClusterWrapper { + fn drop(&mut self) { + if std::thread::panicking() && *TEST_KEEP_CLUSTER_ON_FAILURE { + println!("Test failed, keep cluster alive, TEST_KEEP_CLUSTER_ON_FAILURE=true"); + self.0.blocking_lock().set_keep_on_drop(true); + } + } + } + let wrapper = ClusterWrapper(Arc::new(tokio::sync::Mutex::new(cluster))); + test_body(Arc::clone(&wrapper.0)).await; + std::mem::drop(wrapper); +} From 3335c1f5e333734aaad31a3cf2c1e2363a8c47db Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20Uzarski?= Date: Wed, 12 Feb 2025 13:16:32 +0100 Subject: [PATCH 2/4] ccm: make run_ccm_tests use run_ccm_test_with_configuration --- scylla/tests/ccm_integration/ccm/mod.rs | 25 ++++++------------------- 1 file changed, 6 insertions(+), 19 deletions(-) diff --git a/scylla/tests/ccm_integration/ccm/mod.rs b/scylla/tests/ccm_integration/ccm/mod.rs index 2d1e42da2f..95a7195025 100644 --- a/scylla/tests/ccm_integration/ccm/mod.rs +++ b/scylla/tests/ccm_integration/ccm/mod.rs @@ -61,25 +61,12 @@ where T: FnOnce(Arc>) -> Fut, Fut: Future, { - let cluster_options = make_cluster_options(); - let mut cluster = Cluster::new(cluster_options) - .await - .expect("Failed to create cluster"); - cluster.init().await.expect("failed to initialize cluster"); - cluster.start(None).await.expect("failed to start cluster"); - - struct ClusterWrapper(Arc>); - impl Drop for ClusterWrapper { - fn drop(&mut self) { - if std::thread::panicking() && *TEST_KEEP_CLUSTER_ON_FAILURE { - println!("Test failed, keep cluster alive, TEST_KEEP_CLUSTER_ON_FAILURE=true"); - self.0.blocking_lock().set_keep_on_drop(true); - } - } - } - let wrapper = ClusterWrapper(Arc::new(tokio::sync::Mutex::new(cluster))); - test_body(Arc::clone(&wrapper.0)).await; - std::mem::drop(wrapper); + run_ccm_test_with_configuration( + make_cluster_options, + |cluster| async move { cluster }, + test_body, + ) + .await } /// Run a CCM test with a custom configuration logic before the cluster starts. From bc94af63d531b972972ee3053eef59cfdaea41ad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20Uzarski?= Date: Wed, 12 Feb 2025 13:35:46 +0100 Subject: [PATCH 3/4] ccm: add possibility to enable tls --- scylla/tests/ccm_integration/ccm/cluster.rs | 25 ++++++++ scylla/tests/ccm_integration/ccm/mod.rs | 67 ++++++++++++++++++++ scylla/tests/ccm_integration/main.rs | 2 + scylla/tests/ccm_integration/tls.rs | 70 +++++++++++++++++++++ 4 files changed, 164 insertions(+) create mode 100644 scylla/tests/ccm_integration/tls.rs diff --git a/scylla/tests/ccm_integration/ccm/cluster.rs b/scylla/tests/ccm_integration/ccm/cluster.rs index 566f20b015..3f2d688c2a 100644 --- a/scylla/tests/ccm_integration/ccm/cluster.rs +++ b/scylla/tests/ccm_integration/ccm/cluster.rs @@ -2,6 +2,7 @@ use crate::ccm::{IP_ALLOCATOR, ROOT_CCM_DIR}; use super::ip_allocator::NetPrefix; use super::logged_cmd::{LoggedCmd, RunOptions}; +use super::{DB_TLS_CERT_PATH, DB_TLS_KEY_PATH}; use anyhow::{Context, Error}; use scylla::client::session_builder::SessionBuilder; use std::collections::HashMap; @@ -313,6 +314,18 @@ impl Node { Ok(()) } + /// Configures TLS based on the paths provided in the environment variables `DB_TLS_CERT_PATH` and `DB_TLS_KEY_PATH`. + /// If the paths are not provided, the default certificate and key are taken from `./test/tls/db.crt` and `./test/tls/db.key`. + pub(crate) async fn configure_tls(&self) -> Result<(), Error> { + let args = [ + ("client_encryption_options.enabled", "true"), + ("client_encryption_options.certificate", &DB_TLS_CERT_PATH), + ("client_encryption_options.keyfile", &DB_TLS_KEY_PATH), + ]; + + self.updateconf(args).await + } + /// This method starts the node. User can provide optional [`NodeStartOptions`] to control the behavior of the node start. /// If `None` is provided, the default options are used (see the implementation of Default for [`NodeStartOptions`]). pub(crate) async fn start(&mut self, opts: Option) -> Result<(), Error> { @@ -679,6 +692,18 @@ impl Cluster { Ok(()) } + /// Configures TLS based on the paths provided in the environment variables `DB_TLS_CERT_PATH` and `DB_TLS_KEY_PATH`. + /// If the paths are not provided, the default certificate and key are taken from `./test/tls/db.crt` and `./test/tls/db.key`. + pub(crate) async fn configure_tls(&self) -> Result<(), Error> { + let args = [ + ("client_encryption_options.enabled", "true"), + ("client_encryption_options.certificate", &DB_TLS_CERT_PATH), + ("client_encryption_options.keyfile", &DB_TLS_KEY_PATH), + ]; + + self.updateconf(args).await + } + fn get_ccm_env(&self) -> HashMap { let mut env: HashMap = HashMap::new(); env.insert( diff --git a/scylla/tests/ccm_integration/ccm/mod.rs b/scylla/tests/ccm_integration/ccm/mod.rs index 95a7195025..ca1d6ad33a 100644 --- a/scylla/tests/ccm_integration/ccm/mod.rs +++ b/scylla/tests/ccm_integration/ccm/mod.rs @@ -55,6 +55,73 @@ static ROOT_CCM_DIR: LazyLock = LazyLock::new(|| { ccm_root_dir }); +pub(crate) static DB_TLS_CERT_PATH: LazyLock = LazyLock::new(|| { + let cargo_manifest_dir = env!("CARGO_MANIFEST_DIR"); + let db_cert_path_env = std::env::var("DB_TLS_CERT_PATH"); + let db_cert_path = match db_cert_path_env { + Ok(x) => x, + Err(e) => { + info!( + "DB_TLS_CERT_PATH env malformed or not present: {}. Using {}/../test/tls/db.crt for db cert.", + e, cargo_manifest_dir + ); + cargo_manifest_dir.to_string() + "/../test/tls/db.crt" + } + }; + + let path = PathBuf::from(&db_cert_path); + if !path.try_exists().unwrap() { + panic!("DB cert file {:?} not found", path); + } + + db_cert_path +}); + +pub(crate) static DB_TLS_KEY_PATH: LazyLock = LazyLock::new(|| { + let cargo_manifest_dir = env!("CARGO_MANIFEST_DIR"); + let db_key_path_env = std::env::var("DB_TLS_KEY_PATH"); + let db_key_path = match db_key_path_env { + Ok(x) => x, + Err(e) => { + info!( + "DB_TLS_KEY_PATH env malformed or not present: {}. Using {}/../test/tls/db.key for db key.", + e, cargo_manifest_dir + ); + cargo_manifest_dir.to_string() + "/../test/tls/db.key" + } + }; + + let path = PathBuf::from(&db_key_path); + if !path.try_exists().unwrap() { + panic!("DB key file {:?} not found", path); + } + + db_key_path +}); + +#[cfg(feature = "ssl")] +pub(crate) static CA_TLS_CERT_PATH: LazyLock = LazyLock::new(|| { + let cargo_manifest_dir = env!("CARGO_MANIFEST_DIR"); + let ca_cert_path_env = std::env::var("CA_TLS_CERT_PATH"); + let ca_cert_path = match ca_cert_path_env { + Ok(x) => x, + Err(e) => { + info!( + "CA_TLS_CERT_PATH env malformed or not present: {}. Using {}/../test/tls/ca.crt for ca cert.", + e, cargo_manifest_dir + ); + cargo_manifest_dir.to_string() + "/../test/tls/ca.crt" + } + }; + + let path = PathBuf::from(&ca_cert_path); + if !path.try_exists().unwrap() { + panic!("DB cert file {:?} not found", path); + } + + ca_cert_path +}); + pub(crate) async fn run_ccm_test(make_cluster_options: C, test_body: T) where C: FnOnce() -> ClusterOptions, diff --git a/scylla/tests/ccm_integration/main.rs b/scylla/tests/ccm_integration/main.rs index 0139d795d0..d74298f762 100644 --- a/scylla/tests/ccm_integration/main.rs +++ b/scylla/tests/ccm_integration/main.rs @@ -3,3 +3,5 @@ mod common; pub(crate) mod ccm; mod test_example; +#[cfg(feature = "ssl")] +mod tls; diff --git a/scylla/tests/ccm_integration/tls.rs b/scylla/tests/ccm_integration/tls.rs new file mode 100644 index 0000000000..844518e488 --- /dev/null +++ b/scylla/tests/ccm_integration/tls.rs @@ -0,0 +1,70 @@ +use std::path::PathBuf; +use std::sync::Arc; + +use openssl::ssl::{SslContextBuilder, SslMethod, SslVerifyMode}; +use tokio::sync::Mutex; + +use crate::ccm::cluster::{Cluster, ClusterOptions}; +use crate::ccm::{run_ccm_test_with_configuration, CA_TLS_CERT_PATH, CLUSTER_VERSION}; +use crate::common::utils::setup_tracing; + +fn cluster_1_node() -> ClusterOptions { + ClusterOptions { + name: "cluster_1_node".to_string(), + version: CLUSTER_VERSION.clone(), + nodes: vec![1], + ..ClusterOptions::default() + } +} + +#[tokio::test] +#[cfg_attr(not(ccm_tests), ignore)] +async fn test_tls_cluster_one_node_connects() { + setup_tracing(); + async fn test(cluster: Arc>) { + let mut context_builder = + SslContextBuilder::new(SslMethod::tls()).expect("Failed to create ssl context builder"); + let ca_dir = tokio::fs::canonicalize(PathBuf::from(&*CA_TLS_CERT_PATH)) + .await + .expect("Failed to find ca cert file"); + context_builder + .set_ca_file(ca_dir.as_path()) + .expect("Failed to set ca file"); + context_builder.set_verify(SslVerifyMode::PEER); + + let cluster = cluster.lock().await; + let session = cluster + .make_session_builder() + .await + .ssl_context(Some(context_builder.build())) + .build() + .await + .unwrap(); + + let rows = session + .query_unpaged("select data_center from system.local", &[]) + .await + .expect("failed to execute query") + .into_rows_result() + .expect("failed to get rows") + .rows::<(String,)>() + .expect("failed to deserialize rows") + .map(|res| res.map(|row| row.0)) + .collect::, _>>() + .unwrap(); + println!("{:?}", rows); + } + + run_ccm_test_with_configuration( + cluster_1_node, + |cluster| async move { + cluster + .configure_tls() + .await + .expect("failed to configure tls"); + cluster + }, + test, + ) + .await; +} From 3bc936266bb13a0c57583a6475c87230a4481fb6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miko=C5=82aj=20Uzarski?= Date: Wed, 12 Feb 2025 13:39:59 +0100 Subject: [PATCH 4/4] ci: remove SSL workflow and dockerfiles related to it I also added a clippy check with "ssl" feature enabled to the rust.yml workflow. Originally, the check was done in tls.yml (which is now removed). --- .github/workflows/rust.yml | 2 + .github/workflows/tls.yml | 44 --- test/tls/Dockerfile | 3 - test/tls/docker-compose-tls.yml | 28 -- test/tls/scylla.yaml | 626 -------------------------------- 5 files changed, 2 insertions(+), 701 deletions(-) delete mode 100644 .github/workflows/tls.yml delete mode 100644 test/tls/Dockerfile delete mode 100644 test/tls/docker-compose-tls.yml delete mode 100644 test/tls/scylla.yaml diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 51aad3c6d2..f3efba883a 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -47,6 +47,8 @@ jobs: run: cargo check --all-targets --manifest-path "scylla/Cargo.toml" --features "full-serialization" - name: Cargo check with all features run: cargo check --all-targets --manifest-path "scylla/Cargo.toml" --all-features + - name: Cargo check with ssl feature + run: cargo check --all-targets --manifest-path "scylla/Cargo.toml" --features "ssl" - name: Cargo check with secrecy-08 feature run: cargo check --all-targets --manifest-path "scylla/Cargo.toml" --features "secrecy-08" - name: Cargo check with chrono-04 feature diff --git a/.github/workflows/tls.yml b/.github/workflows/tls.yml deleted file mode 100644 index e588a1ed2d..0000000000 --- a/.github/workflows/tls.yml +++ /dev/null @@ -1,44 +0,0 @@ -name: SSL - -on: - push: - branches: - - main - - 'branch-*' - pull_request: - branches: - - '**' - -env: - CARGO_TERM_COLOR: always - RUST_BACKTRACE: full - -jobs: - tls: - runs-on: ubuntu-latest - timeout-minutes: 60 - env: - working-directory: ./scylla - steps: - - uses: actions/checkout@v3 - - - name: Update rust toolchain - run: rustup update - - - name: Check - run: cargo check --verbose --features "ssl" - working-directory: ${{env.working-directory}} - - - name: Start the cluster - run: docker compose -f test/tls/docker-compose-tls.yml up -d - - - name: Run tests - run: SCYLLA_URI=172.44.0.2 RUST_LOG=trace cargo run --example tls - - - name: Stop the cluster - if: ${{ always() }} - run: docker compose -f test/tls/docker-compose-tls.yml stop - - - name: Print the cluster logs - if: ${{ always() }} - run: docker compose -f test/tls/docker-compose-tls.yml logs diff --git a/test/tls/Dockerfile b/test/tls/Dockerfile deleted file mode 100644 index d72aeef0ff..0000000000 --- a/test/tls/Dockerfile +++ /dev/null @@ -1,3 +0,0 @@ -FROM scylladb/scylla - -COPY db.key db.crt scylla.yaml /etc/scylla/ diff --git a/test/tls/docker-compose-tls.yml b/test/tls/docker-compose-tls.yml deleted file mode 100644 index 13c855dfd1..0000000000 --- a/test/tls/docker-compose-tls.yml +++ /dev/null @@ -1,28 +0,0 @@ -networks: - public: - name: scylla_rust_driver_tls_public - driver: bridge - ipam: - driver: default - config: - - subnet: 172.44.0.0/24 - -services: - scylla: - networks: - public: - ipv4_address: 172.44.0.2 - build: . - command: | - --skip-wait-for-gossip-to-settle 0 - --ring-delay-ms 0 - --smp 1 - --memory 512M - ports: - - "9042:9042" - - "9142:9142" - healthcheck: - test: [ "CMD", "cqlsh", "scylla", "-e", "select * from system.local" ] - interval: 5s - timeout: 5s - retries: 60 diff --git a/test/tls/scylla.yaml b/test/tls/scylla.yaml deleted file mode 100644 index 2fed5732fe..0000000000 --- a/test/tls/scylla.yaml +++ /dev/null @@ -1,626 +0,0 @@ -# Scylla storage config YAML - -####################################### -# This file is split to two sections: -# 1. Supported parameters -# 2. Unsupported parameters: reserved for future use or backwards -# compatibility. -# Scylla will only read and use the first segment -####################################### - -### Supported Parameters - -# The name of the cluster. This is mainly used to prevent machines in -# one logical cluster from joining another. -# It is recommended to change the default value when creating a new cluster. -# You can NOT modify this value for an existing cluster -#cluster_name: 'Test Cluster' - -# This defines the number of tokens randomly assigned to this node on the ring -# The more tokens, relative to other nodes, the larger the proportion of data -# that this node will store. You probably want all nodes to have the same number -# of tokens assuming they have equal hardware capability. -num_tokens: 256 - -# Directory where Scylla should store all its files, which are commitlog, -# data, hints, view_hints and saved_caches subdirectories. All of these -# subs can be overridden by the respective options below. -# If unset, the value defaults to /var/lib/scylla -# workdir: /var/lib/scylla - -# Directory where Scylla should store data on disk. -# data_file_directories: -# - /var/lib/scylla/data - -# commit log. when running on magnetic HDD, this should be a -# separate spindle than the data directories. -# commitlog_directory: /var/lib/scylla/commitlog - -# schema commit log. A special commitlog instance -# used for schema and system tables. -# When running on magnetic HDD, this should be a -# separate spindle than the data directories. -# schema_commitlog_directory: /var/lib/scylla/commitlog/schema - -# commitlog_sync may be either "periodic" or "batch." -# -# When in batch mode, Scylla won't ack writes until the commit log -# has been fsynced to disk. It will wait -# commitlog_sync_batch_window_in_ms milliseconds between fsyncs. -# This window should be kept short because the writer threads will -# be unable to do extra work while waiting. (You may need to increase -# concurrent_writes for the same reason.) -# -# commitlog_sync: batch -# commitlog_sync_batch_window_in_ms: 2 -# -# the other option is "periodic" where writes may be acked immediately -# and the CommitLog is simply synced every commitlog_sync_period_in_ms -# milliseconds. -commitlog_sync: periodic -commitlog_sync_period_in_ms: 10000 - -# The size of the individual commitlog file segments. A commitlog -# segment may be archived, deleted, or recycled once all the data -# in it (potentially from each columnfamily in the system) has been -# flushed to sstables. -# -# The default size is 32, which is almost always fine, but if you are -# archiving commitlog segments (see commitlog_archiving.properties), -# then you probably want a finer granularity of archiving; 8 or 16 MB -# is reasonable. -commitlog_segment_size_in_mb: 32 - -# The size of the individual schema commitlog file segments. -# -# The default size is 128, which is 4 times larger than the default -# size of the data commitlog. It's because the segment size puts -# a limit on the mutation size that can be written at once, and some -# schema mutation writes are much larger than average. -schema_commitlog_segment_size_in_mb: 128 - -# seed_provider class_name is saved for future use. -# A seed address is mandatory. -seed_provider: - # The addresses of hosts that will serve as contact points for the joining node. - # It allows the node to discover the cluster ring topology on startup (when - # joining the cluster). - # Once the node has joined the cluster, the seed list has no function. - - class_name: org.apache.cassandra.locator.SimpleSeedProvider - parameters: - # In a new cluster, provide the address of the first node. - # In an existing cluster, specify the address of at least one existing node. - # If you specify addresses of more than one node, use a comma to separate them. - # For example: ",," - - seeds: "127.0.0.1" - -# Address to bind to and tell other Scylla nodes to connect to. -# You _must_ change this if you want multiple nodes to be able to communicate! -# -# If you leave broadcast_address (below) empty, then setting listen_address -# to 0.0.0.0 is wrong as other nodes will not know how to reach this node. -# If you set broadcast_address, then you can set listen_address to 0.0.0.0. -listen_address: localhost - -# Address to broadcast to other Scylla nodes -# Leaving this blank will set it to the same value as listen_address -# broadcast_address: 1.2.3.4 - - -# When using multiple physical network interfaces, set this to true to listen on broadcast_address -# in addition to the listen_address, allowing nodes to communicate in both interfaces. -# Ignore this property if the network configuration automatically routes between the public and private networks such as EC2. -# -# listen_on_broadcast_address: false - -# port for the CQL native transport to listen for clients on -# For security reasons, you should not expose this port to the internet. Firewall it if needed. -# To disable the CQL native transport, remove this option and configure native_transport_port_ssl. -native_transport_port: 9042 - -# Like native_transport_port, but clients are forwarded to specific shards, based on the -# client-side port numbers. -native_shard_aware_transport_port: 19042 - -# Enabling native transport encryption in client_encryption_options allows you to either use -# encryption for the standard port or to use a dedicated, additional port along with the unencrypted -# standard native_transport_port. -# Enabling client encryption and keeping native_transport_port_ssl disabled will use encryption -# for native_transport_port. Setting native_transport_port_ssl to a different value -# from native_transport_port will use encryption for native_transport_port_ssl while -# keeping native_transport_port unencrypted. -#native_transport_port_ssl: 9142 - -# Like native_transport_port_ssl, but clients are forwarded to specific shards, based on the -# client-side port numbers. -#native_shard_aware_transport_port_ssl: 19142 - -# How long the coordinator should wait for read operations to complete -read_request_timeout_in_ms: 5000 - -# How long the coordinator should wait for writes to complete -write_request_timeout_in_ms: 2000 -# how long a coordinator should continue to retry a CAS operation -# that contends with other proposals for the same row -cas_contention_timeout_in_ms: 1000 - -# phi value that must be reached for a host to be marked down. -# most users should never need to adjust this. -# phi_convict_threshold: 8 - -# IEndpointSnitch. The snitch has two functions: -# - it teaches Scylla enough about your network topology to route -# requests efficiently -# - it allows Scylla to spread replicas around your cluster to avoid -# correlated failures. It does this by grouping machines into -# "datacenters" and "racks." Scylla will do its best not to have -# more than one replica on the same "rack" (which may not actually -# be a physical location) -# -# IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER, -# YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS -# ARE PLACED. -# -# Out of the box, Scylla provides -# - SimpleSnitch: -# Treats Strategy order as proximity. This can improve cache -# locality when disabling read repair. Only appropriate for -# single-datacenter deployments. -# - GossipingPropertyFileSnitch -# This should be your go-to snitch for production use. The rack -# and datacenter for the local node are defined in -# cassandra-rackdc.properties and propagated to other nodes via -# gossip. If cassandra-topology.properties exists, it is used as a -# fallback, allowing migration from the PropertyFileSnitch. -# - PropertyFileSnitch: -# Proximity is determined by rack and data center, which are -# explicitly configured in cassandra-topology.properties. -# - Ec2Snitch: -# Appropriate for EC2 deployments in a single Region. Loads Region -# and Availability Zone information from the EC2 API. The Region is -# treated as the datacenter, and the Availability Zone as the rack. -# Only private IPs are used, so this will not work across multiple -# Regions. -# - Ec2MultiRegionSnitch: -# Uses public IPs as broadcast_address to allow cross-region -# connectivity. (Thus, you should set seed addresses to the public -# IP as well.) You will need to open the storage_port or -# ssl_storage_port on the public IP firewall. (For intra-Region -# traffic, Scylla will switch to the private IP after -# establishing a connection.) -# - RackInferringSnitch: -# Proximity is determined by rack and data center, which are -# assumed to correspond to the 3rd and 2nd octet of each node's IP -# address, respectively. Unless this happens to match your -# deployment conventions, this is best used as an example of -# writing a custom Snitch class and is provided in that spirit. -# -# You can use a custom Snitch by setting this to the full class name -# of the snitch, which will be assumed to be on your classpath. -endpoint_snitch: SimpleSnitch - -# The address or interface to bind the native transport server to. -# -# Set rpc_address OR rpc_interface, not both. Interfaces must correspond -# to a single address, IP aliasing is not supported. -# -# Leaving rpc_address blank has the same effect as on listen_address -# (i.e. it will be based on the configured hostname of the node). -# -# Note that unlike listen_address, you can specify 0.0.0.0, but you must also -# set broadcast_rpc_address to a value other than 0.0.0.0. -# -# For security reasons, you should not expose this port to the internet. Firewall it if needed. -# -# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address -# you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4 -# address will be used. If true the first ipv6 address will be used. Defaults to false preferring -# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6. -rpc_address: localhost -# rpc_interface: eth1 -# rpc_interface_prefer_ipv6: false - -# port for REST API server -api_port: 10000 - -# IP for the REST API server -api_address: 127.0.0.1 - -# Log WARN on any batch size exceeding this value. 128 kiB per batch by default. -# Caution should be taken on increasing the size of this threshold as it can lead to node instability. -batch_size_warn_threshold_in_kb: 128 - -# Fail any multiple-partition batch exceeding this value. 1 MiB (8x warn threshold) by default. -batch_size_fail_threshold_in_kb: 1024 - -# Authentication backend, identifying users -# Out of the box, Scylla provides org.apache.cassandra.auth.{AllowAllAuthenticator, -# PasswordAuthenticator}. -# -# - AllowAllAuthenticator performs no checks - set it to disable authentication. -# - PasswordAuthenticator relies on username/password pairs to authenticate -# users. It keeps usernames and hashed passwords in system_auth.credentials table. -# Please increase system_auth keyspace replication factor if you use this authenticator. -# - com.scylladb.auth.TransitionalAuthenticator requires username/password pair -# to authenticate in the same manner as PasswordAuthenticator, but improper credentials -# result in being logged in as an anonymous user. Use for upgrading clusters' auth. -# authenticator: AllowAllAuthenticator - -# Authorization backend, implementing IAuthorizer; used to limit access/provide permissions -# Out of the box, Scylla provides org.apache.cassandra.auth.{AllowAllAuthorizer, -# CassandraAuthorizer}. -# -# - AllowAllAuthorizer allows any action to any user - set it to disable authorization. -# - CassandraAuthorizer stores permissions in system_auth.permissions table. Please -# increase system_auth keyspace replication factor if you use this authorizer. -# - com.scylladb.auth.TransitionalAuthorizer wraps around the CassandraAuthorizer, using it for -# authorizing permission management. Otherwise, it allows all. Use for upgrading -# clusters' auth. -# authorizer: AllowAllAuthorizer - -# initial_token allows you to specify tokens manually. While you can use # it with -# vnodes (num_tokens > 1, above) -- in which case you should provide a -# comma-separated list -- it's primarily used when adding nodes # to legacy clusters -# that do not have vnodes enabled. -# initial_token: - -# RPC address to broadcast to drivers and other Scylla nodes. This cannot -# be set to 0.0.0.0. If left blank, this will be set to the value of -# rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must -# be set. -# broadcast_rpc_address: 1.2.3.4 - -# Uncomment to enable experimental features -# experimental_features: -# - udf -# - alternator-streams -# - broadcast-tables -# - keyspace-storage-options - -# The directory where hints files are stored if hinted handoff is enabled. -# hints_directory: /var/lib/scylla/hints - -# The directory where hints files are stored for materialized-view updates -# view_hints_directory: /var/lib/scylla/view_hints - -# See https://docs.scylladb.com/architecture/anti-entropy/hinted-handoff -# May either be "true" or "false" to enable globally, or contain a list -# of data centers to enable per-datacenter. -# hinted_handoff_enabled: DC1,DC2 -# hinted_handoff_enabled: true - -# this defines the maximum amount of time a dead host will have hints -# generated. After it has been dead this long, new hints for it will not be -# created until it has been seen alive and gone down again. -# max_hint_window_in_ms: 10800000 # 3 hours - - -# Validity period for permissions cache (fetching permissions can be an -# expensive operation depending on the authorizer, CassandraAuthorizer is -# one example). Defaults to 10000, set to 0 to disable. -# Will be disabled automatically for AllowAllAuthorizer. -# permissions_validity_in_ms: 10000 - -# Refresh interval for permissions cache (if enabled). -# After this interval, cache entries become eligible for refresh. Upon next -# access, an async reload is scheduled and the old value returned until it -# completes. If permissions_validity_in_ms is non-zero, then this also must have -# a non-zero value. Defaults to 2000. It's recommended to set this value to -# be at least 3 times smaller than the permissions_validity_in_ms. -# permissions_update_interval_in_ms: 2000 - -# The partitioner is responsible for distributing groups of rows (by -# partition key) across nodes in the cluster. You should leave this -# alone for new clusters. The partitioner can NOT be changed without -# reloading all data, so when upgrading you should set this to the -# same partitioner you were already using. -# -# Murmur3Partitioner is currently the only supported partitioner, -# -partitioner: org.apache.cassandra.dht.Murmur3Partitioner - -# Total space to use for commitlogs. -# -# If space gets above this value (it will round up to the next nearest -# segment multiple), Scylla will flush every dirty CF in the oldest -# segment and remove it. So a small total commitlog space will tend -# to cause more flush activity on less-active columnfamilies. -# -# A value of -1 (default) will automatically equate it to the total amount of memory -# available for Scylla. -commitlog_total_space_in_mb: -1 - -# TCP port, for commands and data -# For security reasons, you should not expose this port to the internet. Firewall it if needed. -# storage_port: 7000 - -# SSL port, for encrypted communication. Unused unless enabled in -# encryption_options -# For security reasons, you should not expose this port to the internet. Firewall it if needed. -# ssl_storage_port: 7001 - -# listen_interface: eth0 -# listen_interface_prefer_ipv6: false - -# Whether to start the native transport server. -# Please note that the address on which the native transport is bound is the -# same as the rpc_address. The port however is different and specified below. -# start_native_transport: true - -# The maximum size of allowed frame. Frame (requests) larger than this will -# be rejected as invalid. The default is 256MB. -# native_transport_max_frame_size_in_mb: 256 - -# enable or disable keepalive on rpc/native connections -# rpc_keepalive: true - -# Set to true to have Scylla create a hard link to each sstable -# flushed or streamed locally in a backups/ subdirectory of the -# keyspace data. Removing these links is the operator's -# responsibility. -# incremental_backups: false - -# Whether or not to take a snapshot before each compaction. Be -# careful using this option, since Scylla won't clean up the -# snapshots for you. Mostly useful if you're paranoid when there -# is a data format change. -# snapshot_before_compaction: false - -# Whether or not a snapshot is taken of the data before keyspace truncation -# or dropping of column families. The STRONGLY advised default of true -# should be used to provide data safety. If you set this flag to false, you will -# lose data on truncation or drop. -# auto_snapshot: true - -# When executing a scan, within or across a partition, we need to keep the -# tombstones seen in memory so we can return them to the coordinator, which -# will use them to make sure other replicas also know about the deleted rows. -# With workloads that generate a lot of tombstones, this can cause performance -# problems and even exhaust the server heap. -# (http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets) -# Adjust the thresholds here if you understand the dangers and want to -# scan more tombstones anyway. These thresholds may also be adjusted at runtime -# using the StorageService mbean. -# tombstone_warn_threshold: 1000 -# tombstone_failure_threshold: 100000 - -# Granularity of the collation index of rows within a partition. -# Increase if your rows are large, or if you have a very large -# number of rows per partition. The competing goals are these: -# 1) a smaller granularity means more index entries are generated -# and looking up rows within the partition by collation column -# is faster -# 2) but, Scylla will keep the collation index in memory for hot -# rows (as part of the key cache), so a larger granularity means -# you can cache more hot rows -# column_index_size_in_kb: 64 - -# Auto-scaling of the promoted index prevents running out of memory -# when the promoted index grows too large (due to partitions with many rows -# vs. too small column_index_size_in_kb). When the serialized representation -# of the promoted index grows by this threshold, the desired block size -# for this partition (initialized to column_index_size_in_kb) -# is doubled, to decrease the sampling resolution by half. -# -# To disable promoted index auto-scaling, set the threshold to 0. -# column_index_auto_scale_threshold_in_kb: 10240 - -# Log a warning when writing partitions larger than this value -# compaction_large_partition_warning_threshold_mb: 1000 - -# Log a warning when writing rows larger than this value -# compaction_large_row_warning_threshold_mb: 10 - -# Log a warning when writing cells larger than this value -# compaction_large_cell_warning_threshold_mb: 1 - -# Log a warning when row number is larger than this value -# compaction_rows_count_warning_threshold: 100000 - -# Log a warning when writing a collection containing more elements than this value -# compaction_collection_elements_count_warning_threshold: 10000 - -# How long the coordinator should wait for seq or index scans to complete -# range_request_timeout_in_ms: 10000 -# How long the coordinator should wait for writes to complete -# counter_write_request_timeout_in_ms: 5000 -# How long a coordinator should continue to retry a CAS operation -# that contends with other proposals for the same row -# cas_contention_timeout_in_ms: 1000 -# How long the coordinator should wait for truncates to complete -# (This can be much longer, because unless auto_snapshot is disabled -# we need to flush first so we can snapshot before removing the data.) -# truncate_request_timeout_in_ms: 60000 -# The default timeout for other, miscellaneous operations -# request_timeout_in_ms: 10000 - -# Enable or disable inter-node encryption. -# You must also generate keys and provide the appropriate key and trust store locations and passwords. -# -# The available internode options are : all, none, dc, rack -# If set to dc scylla will encrypt the traffic between the DCs -# If set to rack scylla will encrypt the traffic between the racks -# -# SSL/TLS algorithm and ciphers used can be controlled by -# the priority_string parameter. Info on priority string -# syntax and values is available at: -# https://gnutls.org/manual/html_node/Priority-Strings.html -# -# The require_client_auth parameter allows you to -# restrict access to service based on certificate -# validation. Client must provide a certificate -# accepted by the used trust store to connect. -# -# server_encryption_options: -# internode_encryption: none -# certificate: conf/scylla.crt -# keyfile: conf/scylla.key -# truststore: -# certficate_revocation_list: -# require_client_auth: False -# priority_string: - -# enable or disable client/server encryption. -# client_encryption_options: -# enabled: false -# certificate: conf/scylla.crt -# keyfile: conf/scylla.key -# truststore: -# certficate_revocation_list: -# require_client_auth: False -# priority_string: - -# internode_compression controls whether traffic between nodes is -# compressed. -# can be: all - all traffic is compressed -# dc - traffic between different datacenters is compressed -# none - nothing is compressed. -# internode_compression: none - -# Enable or disable tcp_nodelay for inter-dc communication. -# Disabling it will result in larger (but fewer) network packets being sent, -# reducing overhead from the TCP protocol itself, at the cost of increasing -# latency if you block for cross-datacenter responses. -# inter_dc_tcp_nodelay: false - -# Relaxation of environment checks. -# -# Scylla places certain requirements on its environment. If these requirements are -# not met, performance and reliability can be degraded. -# -# These requirements include: -# - A filesystem with good support for asynchronous I/O (AIO). Currently, -# this means XFS. -# -# false: strict environment checks are in place; do not start if they are not met. -# true: relaxed environment checks; performance and reliability may degraade. -# -# developer_mode: false - - -# Idle-time background processing -# -# Scylla can perform certain jobs in the background while the system is otherwise idle, -# freeing processor resources when there is other work to be done. -# -# defragment_memory_on_idle: true -# -# prometheus port -# By default, Scylla opens prometheus API port on port 9180 -# setting the port to 0 will disable the prometheus API. -# prometheus_port: 9180 -# -# prometheus address -# Leaving this blank will set it to the same value as listen_address. -# This means that by default, Scylla listens to the prometheus API on the same -# listening address (and therefore network interface) used to listen for -# internal communication. If the monitoring node is not in this internal -# network, you can override prometheus_address explicitly - e.g., setting -# it to 0.0.0.0 to listen on all interfaces. -# prometheus_address: 1.2.3.4 - -# Distribution of data among cores (shards) within a node -# -# Scylla distributes data within a node among shards, using a round-robin -# strategy: -# [shard0] [shard1] ... [shardN-1] [shard0] [shard1] ... [shardN-1] ... -# -# Scylla versions 1.6 and below used just one repetition of the pattern; -# this interfered with data placement among nodes (vnodes). -# -# Scylla versions 1.7 and above use 4096 repetitions of the pattern; this -# provides for better data distribution. -# -# the value below is log (base 2) of the number of repetitions. -# -# Set to 0 to avoid rewriting all data when upgrading from Scylla 1.6 and -# below. -# -# Keep at 12 for new clusters. -murmur3_partitioner_ignore_msb_bits: 12 - -# Use on a new, parallel algorithm for performing aggregate queries. -# Set to `false` to fall-back to the old algorithm. -# enable_parallelized_aggregation: true - -# Time for which task manager task is kept in memory after it completes. -# task_ttl_in_seconds: 0 - -# In materialized views, restrictions are allowed only on the view's primary key columns. -# In old versions Scylla mistakenly allowed IS NOT NULL restrictions on columns which were not part -# of the view's primary key. These invalid restrictions were ignored. -# This option controls the behavior when someone tries to create a view with such invalid IS NOT NULL restrictions. -# -# Can be true, false, or warn. -# * `true`: IS NOT NULL is allowed only on the view's primary key columns, -# trying to use it on other columns will cause an error, as it should. -# * `false`: Scylla accepts IS NOT NULL restrictions on regular columns, but they're silently ignored. -# It's useful for backwards compatibility. -# * `warn`: The same as false, but there's a warning about invalid view restrictions. -# -# To preserve backwards compatibility on old clusters, Scylla's default setting is `warn`. -# New clusters have this option set to `true` by scylla.yaml (which overrides the default `warn`) -# to make sure that trying to create an invalid view causes an error. -strict_is_not_null_in_views: true - -# The Unix Domain Socket the node uses for maintenance socket. -# The possible options are: -# * ignore: the node will not open the maintenance socket, -# * workdir: the node will open the maintenance socket on the path /cql.m, -# where is a path defined by the workdir configuration option, -# * : the node will open the maintenance socket on the path . -maintenance_socket: ignore - -# If set to true, configuration parameters defined with LiveUpdate option can be updated in runtime with CQL -# by updating system.config virtual table. If we don't want any configuration parameter to be changed in runtime -# via CQL, this option should be set to false. This parameter doesn't impose any limits on other mechanisms updating -# configuration parameters in runtime, e.g. sending SIGHUP or using API. This option should be set to false -# e.g. for cloud users, for whom scylla's configuration should be changed only by support engineers. -# live_updatable_config_params_changeable_via_cql: true - -# **************** -# * GUARDRAILS * -# **************** - -# Guardrails to warn or fail when Replication Factor is smaller/greater than the threshold. -# Please note that the value of 0 is always allowed, -# which means that having no replication at all, i.e. RF = 0, is always valid. -# A guardrail value smaller than 0, e.g. -1, means that the guardrail is disabled. -# Commenting out a guardrail also means it is disabled. -# minimum_replication_factor_fail_threshold: -1 -# minimum_replication_factor_warn_threshold: 3 -# maximum_replication_factor_warn_threshold: -1 -# maximum_replication_factor_fail_threshold: -1 - -# Guardrails to warn about or disallow creating a keyspace with specific replication strategy. -# Each of these 2 settings is a list storing replication strategies considered harmful. -# The replication strategies to choose from are: -# 1) SimpleStrategy, -# 2) NetworkTopologyStrategy, -# 3) LocalStrategy, -# 4) EverywhereStrategy -# -# replication_strategy_warn_list: -# - SimpleStrategy -# replication_strategy_fail_list: - -# Enables the tablets feature. -# When enabled, newly created keyspaces will have tablets enabled by default. -# That can be explicitly disabled in the CREATE KEYSPACE query -# by using the `tablets = {'enabled': false}` replication option. -# -# When the tablets feature is disabled, there is no way to enable tablets -# per keyspace. -# -# Note that creating keyspaces with tablets enabled is irreversible. -# Disabling the tablets feature may impact existing keyspaces that were created with tablets. -# For example, the tablets map would remain "frozen" and will not respond to topology changes -# like adding, removing, or replacing nodes, or to replication factor changes. -enable_tablets: true -api_ui_dir: /opt/scylladb/swagger-ui/dist/ -api_doc_dir: /opt/scylladb/api/api-doc/ - -client_encryption_options: - enabled: true - certificate: /etc/scylla/db.crt - keyfile: /etc/scylla/db.key