Skip to content

Commit

Permalink
H-3674: Allow skipping link validation (#5736)
Browse files Browse the repository at this point in the history
  • Loading branch information
TimDiekmann authored Nov 26, 2024
1 parent fb858eb commit 7d56ea5
Show file tree
Hide file tree
Showing 24 changed files with 240 additions and 175 deletions.
21 changes: 13 additions & 8 deletions apps/hash-graph/src/subcommand/migrate.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ use clap::Parser;
use error_stack::{Report, ResultExt as _};
use hash_graph_authorization::NoAuthorization;
use hash_graph_postgres_store::store::{
DatabaseConnectionInfo, DatabasePoolConfig, PostgresStorePool,
DatabaseConnectionInfo, DatabasePoolConfig, PostgresStorePool, PostgresStoreSettings,
};
use hash_graph_store::{migration::StoreMigration as _, pool::StorePool as _};
use tokio_postgres::NoTls;
Expand All @@ -20,13 +20,18 @@ pub struct MigrateArgs {
}

pub async fn migrate(args: MigrateArgs) -> Result<(), Report<GraphError>> {
let pool = PostgresStorePool::new(&args.db_info, &args.pool_config, NoTls)
.await
.change_context(GraphError)
.map_err(|report| {
tracing::error!(error = ?report, "Failed to connect to database");
report
})?;
let pool = PostgresStorePool::new(
&args.db_info,
&args.pool_config,
NoTls,
PostgresStoreSettings::default(),
)
.await
.change_context(GraphError)
.map_err(|report| {
tracing::error!(error = ?report, "Failed to connect to database");
report
})?;

pool.acquire(NoAuthorization, None)
.await
Expand Down
21 changes: 13 additions & 8 deletions apps/hash-graph/src/subcommand/reindex_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ use clap::Parser;
use error_stack::{Report, ResultExt as _, ensure};
use hash_graph_authorization::NoAuthorization;
use hash_graph_postgres_store::store::{
DatabaseConnectionInfo, DatabasePoolConfig, PostgresStorePool,
DatabaseConnectionInfo, DatabasePoolConfig, PostgresStorePool, PostgresStoreSettings,
};
use hash_graph_store::{
data_type::DataTypeStore as _, entity::EntityStore as _, entity_type::EntityTypeStore as _,
Expand Down Expand Up @@ -40,13 +40,18 @@ pub struct ReindexOperations {
}

pub async fn reindex_cache(args: ReindexCacheArgs) -> Result<(), Report<GraphError>> {
let pool = PostgresStorePool::new(&args.db_info, &args.pool_config, NoTls)
.await
.change_context(GraphError)
.map_err(|report| {
tracing::error!(error = ?report, "Failed to connect to database");
report
})?;
let pool = PostgresStorePool::new(
&args.db_info,
&args.pool_config,
NoTls,
PostgresStoreSettings::default(),
)
.await
.change_context(GraphError)
.map_err(|report| {
tracing::error!(error = ?report, "Failed to connect to database");
report
})?;

let mut store = pool
.acquire(NoAuthorization, None)
Expand Down
29 changes: 21 additions & 8 deletions apps/hash-graph/src/subcommand/server.rs
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ use hash_graph_authorization::{
zanzibar::ZanzibarClient,
};
use hash_graph_postgres_store::store::{
DatabaseConnectionInfo, DatabasePoolConfig, PostgresStorePool,
DatabaseConnectionInfo, DatabasePoolConfig, PostgresStorePool, PostgresStoreSettings,
};
use hash_graph_store::pool::StorePool;
use hash_graph_type_fetcher::FetchingPool;
Expand Down Expand Up @@ -178,6 +178,12 @@ pub struct ServerArgs {
/// The port of the Temporal server.
#[clap(long, env = "HASH_TEMPORAL_SERVER_PORT", default_value_t = 7233)]
pub temporal_port: u16,

/// Skips the validation of links when creating/updating entities.
///
/// This should only be used in development environments.
#[clap(long)]
pub skip_link_validation: bool,
}

fn server_rpc<S, A>(
Expand Down Expand Up @@ -239,13 +245,20 @@ pub async fn server(args: ServerArgs) -> Result<(), Report<GraphError>> {
.change_context(GraphError);
}

let pool = PostgresStorePool::new(&args.db_info, &args.pool_config, NoTls)
.await
.change_context(GraphError)
.map_err(|report| {
tracing::error!(error = ?report, "Failed to connect to database");
report
})?;
let pool = PostgresStorePool::new(
&args.db_info,
&args.pool_config,
NoTls,
PostgresStoreSettings {
validate_links: !args.skip_link_validation,
},
)
.await
.change_context(GraphError)
.map_err(|report| {
tracing::error!(error = ?report, "Failed to connect to database");
report
})?;
_ = pool
.acquire(NoAuthorization, None)
.await
Expand Down
27 changes: 17 additions & 10 deletions apps/hash-graph/src/subcommand/snapshot.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ use hash_graph_authorization::{
};
use hash_graph_postgres_store::{
snapshot::{SnapshotDumpSettings, SnapshotEntry, SnapshotStore},
store::{DatabaseConnectionInfo, DatabasePoolConfig, PostgresStorePool},
store::{DatabaseConnectionInfo, DatabasePoolConfig, PostgresStorePool, PostgresStoreSettings},
};
use hash_graph_store::pool::StorePool as _;
use tokio::io;
Expand Down Expand Up @@ -105,13 +105,18 @@ pub struct SnapshotArgs {
pub async fn snapshot(args: SnapshotArgs) -> Result<(), Report<GraphError>> {
SnapshotEntry::install_error_stack_hook();

let pool = PostgresStorePool::new(&args.db_info, &args.pool_config, NoTls)
.await
.change_context(GraphError)
.map_err(|report| {
tracing::error!(error = ?report, "Failed to connect to database");
report
})?;
let mut pool = PostgresStorePool::new(
&args.db_info,
&args.pool_config,
NoTls,
PostgresStoreSettings::default(),
)
.await
.change_context(GraphError)
.map_err(|report| {
tracing::error!(error = ?report, "Failed to connect to database");
report
})?;

let skip_authorization = match &args.command {
SnapshotCommand::Dump(args) => args.no_relations,
Expand Down Expand Up @@ -168,6 +173,8 @@ pub async fn snapshot(args: SnapshotArgs) -> Result<(), Report<GraphError>> {
tracing::info!("Snapshot dumped successfully");
}
SnapshotCommand::Restore(args) => {
pool.settings.validate_links = !args.skip_validation;

let read =
FramedRead::new(io::BufReader::new(io::stdin()), JsonLinesDecoder::default());
if let Some(authorization) = authorization {
Expand All @@ -180,7 +187,7 @@ pub async fn snapshot(args: SnapshotArgs) -> Result<(), Report<GraphError>> {
report
})?,
)
.restore_snapshot(read, 10_000, !args.skip_validation)
.restore_snapshot(read, 10_000)
.await
} else {
SnapshotStore::new(pool.acquire(NoAuthorization, None).await
Expand All @@ -189,7 +196,7 @@ pub async fn snapshot(args: SnapshotArgs) -> Result<(), Report<GraphError>> {
tracing::error!(error = ?report, "Failed to acquire database connection");
report
})?)
.restore_snapshot(read, 10_000, !args.skip_validation)
.restore_snapshot(read, 10_000)
.await
}
.change_context(GraphError)
Expand Down
21 changes: 13 additions & 8 deletions apps/hash-graph/src/subcommand/test_server.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ use hash_graph_authorization::{
};
use hash_graph_postgres_store::{
snapshot::SnapshotEntry,
store::{DatabaseConnectionInfo, DatabasePoolConfig, PostgresStorePool},
store::{DatabaseConnectionInfo, DatabasePoolConfig, PostgresStorePool, PostgresStoreSettings},
};
use reqwest::Client;
use tokio::{net::TcpListener, time::timeout};
Expand Down Expand Up @@ -71,13 +71,18 @@ pub async fn test_server(args: TestServerArgs) -> Result<(), Report<GraphError>>
.change_context(GraphError);
}

let pool = PostgresStorePool::new(&args.db_info, &args.pool_config, NoTls)
.await
.change_context(GraphError)
.map_err(|report| {
tracing::error!(error = ?report, "Failed to connect to database");
report
})?;
let pool = PostgresStorePool::new(
&args.db_info,
&args.pool_config,
NoTls,
PostgresStoreSettings::default(),
)
.await
.change_context(GraphError)
.map_err(|report| {
tracing::error!(error = ?report, "Failed to connect to database");
report
})?;

let mut spicedb_client = SpiceDbOpenApi::new(
format!("{}:{}", args.spicedb_host, args.spicedb_http_port),
Expand Down
3 changes: 3 additions & 0 deletions libs/@local/graph/api/openapi/openapi.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Loading

0 comments on commit 7d56ea5

Please sign in to comment.