diff --git a/LATEST_RELEASE b/LATEST_RELEASE index c2c0004f..66784322 100644 --- a/LATEST_RELEASE +++ b/LATEST_RELEASE @@ -1 +1 @@ -0.3.5 +0.3.8 diff --git a/VERSION b/VERSION index 449d7e73..940ac09a 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.3.6 +0.3.9 diff --git a/apps/app/src/main/scala/org/lfdecentralizedtrust/splice/config/SpliceConfig.scala b/apps/app/src/main/scala/org/lfdecentralizedtrust/splice/config/SpliceConfig.scala index 40f88f0c..ac19790b 100644 --- a/apps/app/src/main/scala/org/lfdecentralizedtrust/splice/config/SpliceConfig.scala +++ b/apps/app/src/main/scala/org/lfdecentralizedtrust/splice/config/SpliceConfig.scala @@ -606,6 +606,8 @@ object SpliceConfig { deriveReader[MigrateValidatorPartyConfig] implicit val validatorConfigReader: ConfigReader[ValidatorAppBackendConfig] = deriveReader[ValidatorAppBackendConfig].emap { conf => + val participantIdentifier = + ValidatorCantonIdentifierConfig.resolvedNodeIdentifierConfig(conf).participant for { _ <- Either.cond( !conf.svValidator || conf.validatorPartyHint.isEmpty, @@ -617,6 +619,16 @@ object SpliceConfig { (), ConfigValidationFailed("Validator party hint must be specified for non-SV validators"), ) + _ <- Either.cond( + conf.participantBootstrappingDump.forall( + _.newParticipantIdentifier == Some(participantIdentifier) + ), + (), + ConfigValidationFailed( + s"New participant identifier in bootstrap dump config ${conf.participantBootstrappingDump + .map(_.newParticipantIdentifier)} must match participant node identifier $participantIdentifier" + ), + ) } yield conf } implicit val validatorClientConfigReader: ConfigReader[ValidatorAppClientConfig] = diff --git a/apps/app/src/main/scala/org/lfdecentralizedtrust/splice/console/SvAppReference.scala b/apps/app/src/main/scala/org/lfdecentralizedtrust/splice/console/SvAppReference.scala index 4a2de8d5..4f2c3322 100644 --- a/apps/app/src/main/scala/org/lfdecentralizedtrust/splice/console/SvAppReference.scala +++ b/apps/app/src/main/scala/org/lfdecentralizedtrust/splice/console/SvAppReference.scala @@ -399,18 +399,6 @@ class SvAppBackendReference( httpCommand(HttpSvAdminAppClient.GetMediatorNodeStatus()) } - def signSynchronizerBootstrappingState(domainIdPrefix: String): Unit = - consoleEnvironment.run { - httpCommand( - HttpSvSoftDomainMigrationPocAppClient.SignSynchronizerBootstrappingState(domainIdPrefix) - ) - } - - def initializeSynchronizer(domainIdPrefix: String): Unit = - consoleEnvironment.run { - httpCommand(HttpSvSoftDomainMigrationPocAppClient.InitializeSynchronizer(domainIdPrefix)) - } - def reconcileSynchronizerDamlState(domainIdPrefix: String): Unit = consoleEnvironment.run { httpCommand( @@ -440,13 +428,7 @@ class SvAppBackendReference( ) def sequencerClient(domainId: DomainId): SequencerClientReference = { - val synchronizerConfig = config.synchronizerNodes.get(domainId.uid.identifier.str) match { - case Some(synchronizer) => synchronizer - case None => - config.localSynchronizerNode.getOrElse( - throw new RuntimeException("No sequencer admin connection configured for SV App") - ) - } + val synchronizerConfig = synchronizerConfigForDomain(domainId) new SequencerClientReference( consoleEnvironment, s"sequencer client for $name for domain $domainId", @@ -463,11 +445,20 @@ class SvAppBackendReference( ) } + def mediatorClient(domainId: DomainId): MediatorClientReference = { + val synchronizerConfig: SvSynchronizerNodeConfig = synchronizerConfigForDomain(domainId) + new MediatorClientReference( + consoleEnvironment, + s"mediator client for $name for domain $domainId", + synchronizerConfig.mediator.toCantonConfig, + ) + } + def mediatorClient(domainAlias: DomainAlias): MediatorClientReference = { val synchronizerConfig: SvSynchronizerNodeConfig = synchronizerConfigForDomain(domainAlias) new MediatorClientReference( consoleEnvironment, - s"mediator client for $name", + s"mediator client for $name for domain $domainAlias", synchronizerConfig.mediator.toCantonConfig, ) } @@ -482,4 +473,15 @@ class SvAppBackendReference( } synchronizerConfig } + + private def synchronizerConfigForDomain(domainId: DomainId) = { + val synchronizerConfig = config.synchronizerNodes.get(domainId.uid.identifier.str) match { + case Some(synchronizer) => synchronizer + case None => + config.localSynchronizerNode.getOrElse( + throw new RuntimeException("No sequencer admin connection configured for SV App") + ) + } + synchronizerConfig + } } diff --git a/apps/app/src/pack/examples/sv-helm/standalone-validator-values.yaml b/apps/app/src/pack/examples/sv-helm/standalone-validator-values.yaml index 7b46f2d7..0270f35f 100644 --- a/apps/app/src/pack/examples/sv-helm/standalone-validator-values.yaml +++ b/apps/app/src/pack/examples/sv-helm/standalone-validator-values.yaml @@ -9,7 +9,7 @@ onboardingSecretFrom: optional: false # Party ID hint for the validator operator party, should be of format --, # e.g. digitalAsset-finance-1 -validatorPartyHint: "" +validatorPartyHint: "YOUR_VALIDATOR_PARTY_HINT" # MIGRATION_START # Replace MIGRATION_ID with the migration ID of the global synchronizer. @@ -23,17 +23,16 @@ persistence: secretName: postgres-secrets host: postgres -# PARTICIPANT_BOOTSTRAP_DUMP_START # Uncomment the following block if you want to restore from a participant dump -# participantIdentitiesDumpImport: -# secretName: participant-bootstrap-dump -# PARTICIPANT_BOOTSTRAP_DUMP_END +# and recover your balance # PARTICIPANT_BOOTSTRAP_MIGRATE_TO_NEW_PARTICIPANT_START -# Uncomment the following line if you want to migrate the validator party to a new participant -# newParticipantIdentifier: put-some-new-string-never-used-before +# participantIdentitiesDumpImport: +# secretName: participant-bootstrap-dump +# # Make sure to also adjust nodeIdentifier to the same value +# newParticipantIdentifier: put-some-new-string-never-used-before # migrateValidatorParty: true # PARTICIPANT_BOOTSTRAP_MIGRATE_TO_NEW_PARTICIPANT_END -# Replace YOUR_VALIDATOR_NAME with the name you provided for your validator identity. +# Replace YOUR_VALIDATOR_NODE_NAME with the name you provided for your validator identity. # This value will be used for the node identifier of your participant. -nodeIdentifier: YOUR_VALIDATOR_NAME +nodeIdentifier: "YOUR_VALIDATOR_NODE_NAME" diff --git a/apps/app/src/test/resources/include/self-hosted-validator-disable-json-api.conf b/apps/app/src/test/resources/include/self-hosted-validator-disable-json-api.conf index 00560b62..c7b70370 100644 --- a/apps/app/src/test/resources/include/self-hosted-validator-disable-json-api.conf +++ b/apps/app/src/test/resources/include/self-hosted-validator-disable-json-api.conf @@ -1 +1 @@ -canton.participants.validatorParticipant.http-ledger-api-experimental = null +canton.participants.validatorParticipant.http-ledger-api = null diff --git a/apps/app/src/test/resources/include/validator-participant.conf b/apps/app/src/test/resources/include/validator-participant.conf index b51a37af..d0110ed7 100644 --- a/apps/app/src/test/resources/include/validator-participant.conf +++ b/apps/app/src/test/resources/include/validator-participant.conf @@ -32,7 +32,7 @@ canton { enable-verbose-hashing = true } } - http-ledger-api-experimental { + http-ledger-api { server { port = 7575 address = 0.0.0.0 diff --git a/apps/app/src/test/resources/simple-topology-canton-simtime.conf b/apps/app/src/test/resources/simple-topology-canton-simtime.conf index db29ef3d..21795152 100644 --- a/apps/app/src/test/resources/simple-topology-canton-simtime.conf +++ b/apps/app/src/test/resources/simple-topology-canton-simtime.conf @@ -28,21 +28,21 @@ _aliceParticipant_client { admin-api.port = 15502 ledger-api.port = 15501 # Unused so just disable it - http-ledger-api-experimental = null + http-ledger-api = null } _bobParticipant_client { admin-api.port = 15602 ledger-api.port = 15601 # Unused so just disable it - http-ledger-api-experimental = null + http-ledger-api = null } _splitwellParticipant_client { admin-api.port = 15702 ledger-api.port = 15701 # Unused so just disable it - http-ledger-api-experimental = null + http-ledger-api = null } _sv1Sequencer_client { diff --git a/apps/app/src/test/resources/simple-topology-canton.conf b/apps/app/src/test/resources/simple-topology-canton.conf index 7d4dbac0..ae436b98 100644 --- a/apps/app/src/test/resources/simple-topology-canton.conf +++ b/apps/app/src/test/resources/simple-topology-canton.conf @@ -26,7 +26,7 @@ _sv4Participant_client { _aliceParticipant_client { admin-api.port = 5502 ledger-api.port = 5501 - http-ledger-api-experimental { + http-ledger-api { server.port = 6201 allow-insecure-tokens = true } @@ -35,7 +35,7 @@ _aliceParticipant_client { _bobParticipant_client { admin-api.port = 5602 ledger-api.port = 5601 - http-ledger-api-experimental { + http-ledger-api { server.port = 6301 allow-insecure-tokens = true } @@ -44,7 +44,7 @@ _bobParticipant_client { _splitwellParticipant_client { admin-api.port = 5702 ledger-api.port = 5701 - http-ledger-api-experimental { + http-ledger-api { server.port = 6401 allow-insecure-tokens = true } diff --git a/apps/app/src/test/resources/unavailable-validator-topology-canton.conf b/apps/app/src/test/resources/unavailable-validator-topology-canton.conf index 79d7bec9..b24fd359 100644 --- a/apps/app/src/test/resources/unavailable-validator-topology-canton.conf +++ b/apps/app/src/test/resources/unavailable-validator-topology-canton.conf @@ -3,7 +3,7 @@ include required("include/participants.conf") _validatorParticipant_client { admin-api.port = 5902 ledger-api.port = 5901 - http-ledger-api-experimental { + http-ledger-api { server.port = 6901 allow-insecure-tokens = true } diff --git a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/AnsIntegrationTest.scala b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/AnsIntegrationTest.scala index fd65cd35..fcfacd50 100644 --- a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/AnsIntegrationTest.scala +++ b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/AnsIntegrationTest.scala @@ -26,6 +26,7 @@ import org.lfdecentralizedtrust.splice.sv.automation.delegatebased.{ } import org.lfdecentralizedtrust.splice.wallet.automation.SubscriptionReadyForPaymentTrigger import com.digitalasset.canton.config.NonNegativeFiniteDuration +import com.digitalasset.canton.console.CommandFailure import com.digitalasset.canton.logging.SuppressionRule import com.digitalasset.canton.topology.PartyId import org.scalatest.Assertion @@ -156,7 +157,11 @@ class AnsIntegrationTest extends IntegrationTest with WalletTestUtil with Trigge requestAndPayForEntry(aliceRefs, testEntryName) eventually() { - val entry = sv1ScanBackend.lookupEntryByName(testEntryName) + val entry = + try sv1ScanBackend.lookupEntryByName(testEntryName) + catch { + case e: CommandFailure if e.getMessage contains "Entry with name" => fail(e) + } entry.name shouldBe testEntryName entry.user shouldBe aliceRefs.userParty.toProtoPrimitive } diff --git a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/DockerComposeValidatorFrontendIntegrationTest.scala b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/DockerComposeValidatorFrontendIntegrationTest.scala index 0dbe36e3..60bb02af 100644 --- a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/DockerComposeValidatorFrontendIntegrationTest.scala +++ b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/DockerComposeValidatorFrontendIntegrationTest.scala @@ -250,7 +250,7 @@ class DockerComposeValidatorFrontendIntegrationTest } "docker-compose based validator with auth works" in { _ => - val validatorUserPassword = sys.env(s"VALIDATOR_WEB_UI_PASSWORD") + val validatorUserPassword = sys.env(s"COMPOSE_VALIDATOR_WEB_UI_PASSWORD") withComposeValidator( extraClue = "with auth", @@ -263,13 +263,13 @@ class DockerComposeValidatorFrontendIntegrationTest eventuallySucceeds()(go to s"http://wallet.localhost") completeAuth0LoginWithAuthorization( "http://wallet.localhost", - "admin@validator.com", + "admin@compose-validator.com", validatorUserPassword, () => seleniumText(find(id("logged-in-user"))) should startWith(partyHint), ) completeAuth0LoginWithAuthorization( "http://ans.localhost", - "admin@validator.com", + "admin@compose-validator.com", validatorUserPassword, () => seleniumText(find(id("logged-in-user"))) should startWith(partyHint), ) diff --git a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/SoftDomainMigrationIntegrationTest.scala b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/SoftDomainMigrationIntegrationTest.scala index eaca1dfc..1f439fab 100644 --- a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/SoftDomainMigrationIntegrationTest.scala +++ b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/SoftDomainMigrationIntegrationTest.scala @@ -324,17 +324,21 @@ class SoftDomainMigrationIntegrationTest val dsoRules = sv1Backend.getDsoInfo().dsoRules clue("Bootstrap new domain") { - clue("Sign bootstrapping state") { - val signed = env.svs.local.map { sv => - Future { sv.signSynchronizerBootstrappingState(prefix) } + clue("Wait for signed topology state to appear") { + env.svs.local.map { sv => + eventually() { + sv.participantClient.topology.domain_parameters + .list(filterDomain = "global-domain-new") should not be empty + } } - signed.foreach(_.futureValue) } - clue("Initialize synchronizer nodes") { - val initialized = env.svs.local.map { sv => - Future { sv.initializeSynchronizer(prefix) } + clue("Wait for synchronizer to be initialized") { + env.svs.local.map { sv => + eventually() { + sv.sequencerClient(newDomainId).health.status.isActive shouldBe Some(true) + sv.mediatorClient(newDomainId).health.status.isActive shouldBe Some(true) + } } - initialized.foreach(_.futureValue) } clue("New synchronizer is registered in DsoRules config") { val (_, dsoRulesVoteRequest) = actAndCheck( diff --git a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/ValidatorReonboardingIntegrationTest.scala b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/ValidatorReonboardingIntegrationTest.scala index 04621302..3c77f296 100644 --- a/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/ValidatorReonboardingIntegrationTest.scala +++ b/apps/app/src/test/scala/org/lfdecentralizedtrust/splice/integration/tests/ValidatorReonboardingIntegrationTest.scala @@ -53,6 +53,12 @@ class ValidatorReonboardingIntegrationTest val dumpPath = Files.createTempFile("participant-dump", ".json") + val aliceValidatorLocalRestartName = "aliceValidatorLocalRestart" + + private def aliceValidatorLocalRestart(implicit env: SpliceTestConsoleEnvironment) = v( + aliceValidatorLocalRestartName + ) + private def aliceValidatorLocalWalletClient(implicit env: SpliceTestConsoleEnvironment) = wc("aliceValidatorWalletLocal") @@ -97,25 +103,33 @@ class ValidatorReonboardingIntegrationTest ), ), ) + val aliceValidatorConfigNewBase = aliceValidatorConfig + .copy( + adminApi = + aliceValidatorConfig.adminApi.copy(internalPort = Some(Port.tryCreate(27603))), + storage = aliceValidatorConfig.storage match { + case c: SpliceDbConfig.Postgres => + c.copy( + config = c.config + .withValue( + "properties.databaseName", + ConfigValueFactory.fromAnyRef("splice_apps_reonboard"), + ) + ) + case _ => throw new IllegalArgumentException("Only Postgres is supported") + }, + cantonIdentifierConfig = Some( + ValidatorCantonIdentifierConfig( + participant = "aliceValidatorLocalNewForValidatorReonboardingIT" + ) + ), + ) config.copy( validatorApps = config.validatorApps + (InstanceName.tryCreate("aliceValidator") -> aliceValidatorConfig) + (InstanceName.tryCreate("aliceValidatorLocal") -> { - aliceValidatorConfig + aliceValidatorConfigNewBase .copy( - adminApi = - aliceValidatorConfig.adminApi.copy(internalPort = Some(Port.tryCreate(27603))), - storage = aliceValidatorConfig.storage match { - case c: SpliceDbConfig.Postgres => - c.copy( - config = c.config - .withValue( - "properties.databaseName", - ConfigValueFactory.fromAnyRef("splice_apps_reonboard"), - ) - ) - case _ => throw new IllegalArgumentException("Only Postgres is supported") - }, participantBootstrappingDump = Some( ParticipantBootstrapDumpConfig .File( @@ -134,7 +148,8 @@ class ValidatorReonboardingIntegrationTest ) ), ) - }), + }) + + (InstanceName.tryCreate(aliceValidatorLocalRestartName) -> aliceValidatorConfigNewBase), walletAppClients = config.walletAppClients + ( InstanceName.tryCreate("aliceValidatorWalletLocal") -> { val aliceValidatorWalletConfig = @@ -313,6 +328,11 @@ class ValidatorReonboardingIntegrationTest .loneElement .effectiveAmount shouldBe lockedAmount } + + clue("Restart validator without migration config") { + aliceValidatorLocalBackend.stop() + aliceValidatorLocalRestart.startSync() + } } } } diff --git a/apps/common/frontend/src/components/Header.tsx b/apps/common/frontend/src/components/Header.tsx index 6a584a41..2623dac7 100644 --- a/apps/common/frontend/src/components/Header.tsx +++ b/apps/common/frontend/src/components/Header.tsx @@ -4,13 +4,14 @@ import * as React from 'react'; import { Fragment } from 'react'; import { NavLink } from 'react-router-dom'; +import { WarningAmberRounded } from '@mui/icons-material'; import { Badge, Stack, Toolbar } from '@mui/material'; import Typography, { TypographyOwnProps } from '@mui/material/Typography'; interface HeaderProps extends React.PropsWithChildren { title: string; titleVariant?: TypographyOwnProps['variant']; - navLinks?: { name: string; path: string; badgeCount?: number }[]; + navLinks?: { name: string; path: string; badgeCount?: number; hasAlert?: boolean }[]; noBorder?: boolean; } @@ -58,12 +59,23 @@ const Header: React.FC = ({ children, title, titleVariant, navLinks > {navLink.name} - + + {navLink.badgeCount ? ( + + ) : navLink.hasAlert ? ( + } + /> + ) : ( + <> + )} ))} diff --git a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/config/ParticipantBootstrapDumpConfig.scala b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/config/ParticipantBootstrapDumpConfig.scala index 4651625e..31a6cac2 100644 --- a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/config/ParticipantBootstrapDumpConfig.scala +++ b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/config/ParticipantBootstrapDumpConfig.scala @@ -7,6 +7,7 @@ import java.nio.file.Path sealed abstract class ParticipantBootstrapDumpConfig { def description: String + def newParticipantIdentifier: Option[String] } object ParticipantBootstrapDumpConfig { diff --git a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/environment/AppConnection.scala b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/environment/AppConnection.scala index 0062cee9..2d56cec6 100644 --- a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/environment/AppConnection.scala +++ b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/environment/AppConnection.scala @@ -215,7 +215,7 @@ abstract class HttpAppConnection( retryProvider.getValueWithRetries( RetryFor.WaitingOnInitDependency, "app_version", - s"app version of ${config.url}", + s"app version of ${config.url}${basePath}", getHttpAppVersionInfo(), logger, ) diff --git a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/environment/MediatorAdminConnection.scala b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/environment/MediatorAdminConnection.scala index b1621441..12fe38f9 100644 --- a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/environment/MediatorAdminConnection.scala +++ b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/environment/MediatorAdminConnection.scala @@ -60,8 +60,7 @@ class MediatorAdminConnection( MediatorAdministrationCommands.Initialize( domainId, SequencerConnections.single(sequencerConnection), - // TODO(#10985) Consider enabling this. - SequencerConnectionValidation.Disabled, + SequencerConnectionValidation.All, ) ) diff --git a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/environment/ParticipantAdminConnection.scala b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/environment/ParticipantAdminConnection.scala index 830eedf8..390b2fbe 100644 --- a/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/environment/ParticipantAdminConnection.scala +++ b/apps/common/src/main/scala/org/lfdecentralizedtrust/splice/environment/ParticipantAdminConnection.scala @@ -154,8 +154,7 @@ class ParticipantAdminConnection( ParticipantAdminCommands.DomainConnectivity.RegisterDomain( config, handshakeOnly, - // TODO(#10985) Consider enabling this - SequencerConnectionValidation.Disabled, + SequencerConnectionValidation.All, ) ) @@ -348,8 +347,7 @@ class ParticipantAdminConnection( runCmd( ParticipantAdminCommands.DomainConnectivity.ModifyDomainConnection( config, - // TODO(#10985) Consider enabling this - SequencerConnectionValidation.Disabled, + SequencerConnectionValidation.All, ) ) diff --git a/apps/splitwell/frontend/src/App.tsx b/apps/splitwell/frontend/src/App.tsx index 7bf4d16b..55c129e6 100644 --- a/apps/splitwell/frontend/src/App.tsx +++ b/apps/splitwell/frontend/src/App.tsx @@ -65,7 +65,8 @@ const Providers: React.FC = ({ children }) => { return (is404or409 || isDomainConnectionError) && failureCount < 10; }, - retryDelay: 500, + // Exponential backoff up to a maximum of 30 seconds + retryDelay: attemptIndex => Math.min(1000 * 1.5 ** attemptIndex, 30000), }, }, }); diff --git a/apps/sv/frontend/src/components/Layout.tsx b/apps/sv/frontend/src/components/Layout.tsx index c656f255..2cb8e88d 100644 --- a/apps/sv/frontend/src/components/Layout.tsx +++ b/apps/sv/frontend/src/components/Layout.tsx @@ -30,7 +30,7 @@ const Layout: React.FC = (props: LayoutProps) => { vr => vr.payload.votes.entriesArray().find(e => e[1].sv === svPartyId) === undefined ); const electionContextQuery = useElectionContext(); - const electionRequests = electionContextQuery?.data?.ranking; + const hasElectionRequest = (electionContextQuery?.data?.ranking?.length ?? 0) > 0; return ( @@ -60,7 +60,7 @@ const Layout: React.FC = (props: LayoutProps) => { { name: 'Information', path: 'dso' }, { name: 'Validator Onboarding', path: 'validator-onboarding' }, { name: `${config.spliceInstanceNames.amuletName} Price`, path: 'amulet-price' }, - { name: 'Delegate Election', path: 'delegate', badgeCount: electionRequests?.length }, + { name: 'Delegate Election', path: 'delegate', hasAlert: hasElectionRequest }, { name: 'Governance', path: 'votes', badgeCount: actionsPending?.length }, ]} > diff --git a/apps/sv/src/main/openapi/sv-internal.yaml b/apps/sv/src/main/openapi/sv-internal.yaml index 00ea1bda..e2a68388 100644 --- a/apps/sv/src/main/openapi/sv-internal.yaml +++ b/apps/sv/src/main/openapi/sv-internal.yaml @@ -587,38 +587,6 @@ paths: schema: "$ref": "../../../../common/src/main/openapi/common-internal.yaml#/components/schemas/GetDsoInfoResponse" - # TODO(#13301) Replace by automation - /v0/synchronizer/{domain_id_prefix}/sign_bootstrapping_state: - post: - tags: [sv] - x-jvm-package: sv_soft_domain_migration_poc - operationId: "signSynchronizerBootstrappingState" - parameters: - - name: "domain_id_prefix" - in: path - required: true - schema: - type: string - responses: - "200": - description: ok - - # TODO(#13301) Replace by automation - /v0/synchronizer/{domain_id_prefix}/initialize: - post: - tags: [sv] - x-jvm-package: sv_soft_domain_migration_poc - operationId: "initializeSynchronizer" - parameters: - - name: "domain_id_prefix" - in: path - required: true - schema: - type: string - responses: - "200": - description: ok - # TODO(#13301) Replace by automation /v0/synchronizer/{domain_id_prefix}/reconcile-daml-state: post: diff --git a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/SvApp.scala b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/SvApp.scala index 748fd5e4..b2c4385d 100644 --- a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/SvApp.scala +++ b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/SvApp.scala @@ -553,7 +553,6 @@ class SvApp( Seq( new HttpSvSoftDomainMigrationPocHandler( dsoAutomation, - localSynchronizerNode, extraSynchronizerNodes, participantAdminConnection, config.domainMigrationId, diff --git a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/admin/api/client/commands/HttpSvSoftDomainMigrationPocAppClient.scala b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/admin/api/client/commands/HttpSvSoftDomainMigrationPocAppClient.scala index 8d448d23..e1e27900 100644 --- a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/admin/api/client/commands/HttpSvSoftDomainMigrationPocAppClient.scala +++ b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/admin/api/client/commands/HttpSvSoftDomainMigrationPocAppClient.scala @@ -37,33 +37,6 @@ object HttpSvSoftDomainMigrationPocAppClient { ) } - case class SignSynchronizerBootstrappingState(domainIdPrefix: String) - extends BaseCommand[http.SignSynchronizerBootstrappingStateResponse, Unit] { - override def submitRequest( - client: Client, - headers: List[HttpHeader], - ): EitherT[Future, Either[ - Throwable, - HttpResponse, - ], http.SignSynchronizerBootstrappingStateResponse] = - client.signSynchronizerBootstrappingState(domainIdPrefix, headers = headers) - override def handleOk()(implicit decoder: TemplateJsonDecoder) = { - case http.SignSynchronizerBootstrappingStateResponse.OK => Right(()) - } - } - - case class InitializeSynchronizer(domainIdPrefix: String) - extends BaseCommand[http.InitializeSynchronizerResponse, Unit] { - override def submitRequest( - client: Client, - headers: List[HttpHeader], - ): EitherT[Future, Either[Throwable, HttpResponse], http.InitializeSynchronizerResponse] = - client.initializeSynchronizer(domainIdPrefix, headers = headers) - override def handleOk()(implicit decoder: TemplateJsonDecoder) = { - case http.InitializeSynchronizerResponse.OK => Right(()) - } - } - case class ReconcileSynchronizerDamlState(domainIdPrefix: String) extends BaseCommand[http.ReconcileSynchronizerDamlStateResponse, Unit] { override def submitRequest( diff --git a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/admin/http/HttpSvSoftDomainMigrationPocHandler.scala b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/admin/http/HttpSvSoftDomainMigrationPocHandler.scala index 8f74acf6..3ddacbd8 100644 --- a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/admin/http/HttpSvSoftDomainMigrationPocHandler.scala +++ b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/admin/http/HttpSvSoftDomainMigrationPocHandler.scala @@ -4,65 +4,26 @@ package org.lfdecentralizedtrust.splice.sv.admin.http import org.lfdecentralizedtrust.splice.auth.AuthExtractor.TracedUser -import org.lfdecentralizedtrust.splice.config.{ - Thresholds, - NetworkAppClientConfig, - SharedSpliceAppParameters, -} -import org.lfdecentralizedtrust.splice.environment.{ - ParticipantAdminConnection, - RetryFor, - RetryProvider, -} -import org.lfdecentralizedtrust.splice.environment.TopologyAdminConnection.TopologyTransactionType -import org.lfdecentralizedtrust.splice.http.HttpClient +import org.lfdecentralizedtrust.splice.config.SharedSpliceAppParameters +import org.lfdecentralizedtrust.splice.environment.{ParticipantAdminConnection, RetryProvider} import org.lfdecentralizedtrust.splice.http.v0.sv_soft_domain_migration_poc as v0 import org.lfdecentralizedtrust.splice.http.v0.sv_soft_domain_migration_poc.SvSoftDomainMigrationPocResource import org.lfdecentralizedtrust.splice.store.AppStoreWithIngestion -import org.lfdecentralizedtrust.splice.scan.admin.api.client.SingleScanConnection -import org.lfdecentralizedtrust.splice.scan.config.ScanAppClientConfig -import org.lfdecentralizedtrust.splice.sv.{ExtraSynchronizerNode, LocalSynchronizerNode} +import org.lfdecentralizedtrust.splice.sv.ExtraSynchronizerNode import org.lfdecentralizedtrust.splice.sv.store.SvDsoStore import org.lfdecentralizedtrust.splice.sv.onboarding.SynchronizerNodeReconciler -import org.lfdecentralizedtrust.splice.util.TemplateJsonDecoder -import com.daml.nonempty.NonEmpty -import com.digitalasset.canton.config.{CommunityCryptoConfig, CommunityCryptoProvider} -import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} -import com.digitalasset.canton.data.CantonTimestamp import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.time.Clock -import com.digitalasset.canton.topology.{DomainId, ForceFlag, ParticipantId, UniqueIdentifier} -import com.digitalasset.canton.topology.processing.{EffectiveTime, SequencedTime} -import com.digitalasset.canton.topology.store.{ - StoredTopologyTransaction, - StoredTopologyTransactions, - TimeQuery, - TopologyStoreId, -} -import StoredTopologyTransaction.GenericStoredTopologyTransaction -import com.digitalasset.canton.topology.transaction.{ - DomainParametersState, - MediatorDomainState, - SequencerDomainState, - TopologyMapping, -} -import com.digitalasset.canton.topology.transaction.SignedTopologyTransaction.GenericSignedTopologyTransaction -import com.digitalasset.canton.tracing.{Spanning, TraceContext} -import io.opentelemetry.api.trace.Tracer -import cats.syntax.either.* -import cats.syntax.traverse.* -import com.digitalasset.canton.version.ProtocolVersion +import com.digitalasset.canton.topology.{DomainId, ParticipantId, UniqueIdentifier} +import com.digitalasset.canton.topology.store.{TopologyStoreId} +import com.digitalasset.canton.tracing.Spanning import io.grpc.Status -import org.apache.pekko.stream.Materializer - import scala.concurrent.{ExecutionContextExecutor, Future} import scala.jdk.CollectionConverters.* -import scala.jdk.OptionConverters.* // TODO(#13301) Validate that topology reads return the right amount of data class HttpSvSoftDomainMigrationPocHandler( dsoStoreWithIngestion: AppStoreWithIngestion[SvDsoStore], - localSynchronizerNode: Option[LocalSynchronizerNode], synchronizerNodes: Map[String, ExtraSynchronizerNode], participantAdminConnection: ParticipantAdminConnection, migrationId: Long, @@ -72,360 +33,13 @@ class HttpSvSoftDomainMigrationPocHandler( protected val loggerFactory: NamedLoggerFactory, val amuletAppParameters: SharedSpliceAppParameters, )(implicit - ec: ExecutionContextExecutor, - mat: Materializer, - tracer: Tracer, - httpClient: HttpClient, - templateJsonDecoder: TemplateJsonDecoder, + ec: ExecutionContextExecutor ) extends v0.SvSoftDomainMigrationPocHandler[TracedUser] with Spanning with NamedLogging { - private val workflowId = this.getClass.getSimpleName private val dsoStore = dsoStoreWithIngestion.store - private def getScanUrls()(implicit tc: TraceContext): Future[Seq[String]] = { - for { - // TODO(#13301) We should use the internal URL for the SV’s own scan to avoid a loopback requirement - dsoRulesWithSvNodeStates <- dsoStore.getDsoRulesWithSvNodeStates() - } yield dsoRulesWithSvNodeStates.svNodeStates.values - .flatMap( - _.payload.state.synchronizerNodes.asScala.values - .flatMap(_.scan.toScala.toList.map(_.publicUrl)) - ) - .toList - // sorted to make it deterministic - .sorted - } - - private def withScanConnection[T]( - url: String - )(f: SingleScanConnection => Future[T])(implicit tc: TraceContext): Future[T] = - SingleScanConnection.withSingleScanConnection( - ScanAppClientConfig( - NetworkAppClientConfig( - url - ) - ), - amuletAppParameters.upgradesConfig, - clock, - retryProvider, - loggerFactory, - )(f) - - /** Read the existing decentralized namespace definition including all prerequisite key txs from the existing domain store. - */ - private def getDecentralizedNamespaceDefinitionTransactions()(implicit - tc: TraceContext - ): Future[Seq[GenericSignedTopologyTransaction]] = for { - decentralizedSynchronizerId <- dsoStore.getAmuletRulesDomain()(tc) - namespaceDefinitions <- participantAdminConnection.listDecentralizedNamespaceDefinition( - decentralizedSynchronizerId, - decentralizedSynchronizerId.uid.namespace, - timeQuery = TimeQuery.Range(None, None), - ) - identityTransactions <- namespaceDefinitions - .flatMap(_.mapping.owners) - .toSet - .toList - .traverse { namespace => - participantAdminConnection.listAllTransactions( - TopologyStoreId.DomainStore(decentralizedSynchronizerId), - TimeQuery.Range(None, None), - includeMappings = Set( - TopologyMapping.Code.OwnerToKeyMapping, - TopologyMapping.Code.NamespaceDelegation, - ), - filterNamespace = Some(namespace), - ) - } - .map(_.flatten) - decentralizedNamespaceDefinition <- participantAdminConnection.listAllTransactions( - TopologyStoreId.DomainStore(decentralizedSynchronizerId), - TimeQuery.Range(None, None), - includeMappings = Set(TopologyMapping.Code.DecentralizedNamespaceDefinition), - ) - } yield (identityTransactions ++ decentralizedNamespaceDefinition).map(_.transaction) - - override def signSynchronizerBootstrappingState( - respond: SvSoftDomainMigrationPocResource.SignSynchronizerBootstrappingStateResponse.type - )( - domainIdPrefix: String - )( - extracted: TracedUser - ): Future[SvSoftDomainMigrationPocResource.SignSynchronizerBootstrappingStateResponse] = { - implicit val TracedUser(_, traceContext) = extracted - withSpan(s"$workflowId.signSynchronizerBootstrappingState") { _ => _ => - for { - scanUrls <- getScanUrls() - synchronizerIdentities <- scanUrls.traverse { url => - withScanConnection(url)(_.getSynchronizerIdentities(domainIdPrefix)) - } - domainId = DomainId( - UniqueIdentifier.tryCreate( - domainIdPrefix, - dsoStore.key.dsoParty.uid.namespace, - ) - ) - sequencers = synchronizerIdentities.map(_.sequencerId) - mediators = synchronizerIdentities.map(_.mediatorId) - existingSynchronizer = localSynchronizerNode.getOrElse( - throw Status.INTERNAL.withDescription("Missing synchronizer").asRuntimeException() - ) - decentralizedSynchronizerId <- dsoStore.getAmuletRulesDomain()(traceContext) - // for now we just copy the parameters from the existing domain. - parameters <- existingSynchronizer.sequencerAdminConnection.getDomainParametersState( - decentralizedSynchronizerId - ) - domainParameters = DomainParametersState( - domainId, - parameters.mapping.parameters, - ) - sequencerDomainState = SequencerDomainState - .create( - domainId, - Thresholds.sequencerConnectionsSizeThreshold(sequencers.size), - sequencers, - Seq.empty, - ) - .valueOr(err => - throw Status.INTERNAL - .withDescription(s"Failed to construct SequencerDomainState: $err") - .asRuntimeException - ) - mediatorDomainState = MediatorDomainState - .create( - domainId, - NonNegativeInt.zero, - Thresholds.mediatorDomainStateThreshold(mediators.size), - mediators, - Seq.empty, - ) - .valueOr(err => - throw Status.INTERNAL - .withDescription(s"Failed to construct MediatorDomainState: $err") - .asRuntimeException - ) - participantId <- participantAdminConnection.getParticipantId() - decentralizedNamespaceTxs <- getDecentralizedNamespaceDefinitionTransactions() - _ <- participantAdminConnection.addTopologyTransactions( - TopologyStoreId.AuthorizedStore, - decentralizedNamespaceTxs, - ForceFlag.AlienMember, - ) - signedBy = participantId.uid.namespace.fingerprint - _ <- retryProvider.ensureThatB( - RetryFor.ClientCalls, - "domain_parameters", - "domain parameters are signed", - for { - proposalsExist <- participantAdminConnection - .listDomainParametersState( - TopologyStoreId.AuthorizedStore, - domainId, - TopologyTransactionType.AllProposals, - TimeQuery.HeadState, - ) - .map(_.nonEmpty) - authorizedExist <- - participantAdminConnection - .listDomainParametersState( - TopologyStoreId.AuthorizedStore, - domainId, - TopologyTransactionType.AuthorizedState, - TimeQuery.HeadState, - ) - .map(_.nonEmpty) - } yield proposalsExist || authorizedExist, - participantAdminConnection - .proposeMapping( - TopologyStoreId.AuthorizedStore, - domainParameters, - serial = PositiveInt.one, - isProposal = true, - ) - .map(_ => ()), - logger, - ) - // add sequencer keys, note that in 3.0 not adding these does not fail but in 3.1 it will - _ <- participantAdminConnection.addTopologyTransactions( - TopologyStoreId.AuthorizedStore, - synchronizerIdentities.flatMap(_.sequencerIdentityTransactions), - ForceFlag.AlienMember, - ) - _ <- retryProvider.ensureThatB( - RetryFor.ClientCalls, - "sequencer_domain_state", - "sequencer domain state is signed", - for { - proposalsExist <- participantAdminConnection - .listSequencerDomainState( - TopologyStoreId.AuthorizedStore, - domainId, - TimeQuery.HeadState, - true, - ) - .map(_.nonEmpty) - authorizedExist <- - participantAdminConnection - .listSequencerDomainState( - TopologyStoreId.AuthorizedStore, - domainId, - TimeQuery.HeadState, - false, - ) - .map(_.nonEmpty) - } yield proposalsExist || authorizedExist, - participantAdminConnection - .proposeMapping( - TopologyStoreId.AuthorizedStore, - sequencerDomainState, - serial = PositiveInt.one, - isProposal = true, - ) - .map(_ => ()), - logger, - ) - // add mediator keys, note that in 3.0 not adding these does not fail but in 3.1 it will - _ <- participantAdminConnection.addTopologyTransactions( - TopologyStoreId.AuthorizedStore, - synchronizerIdentities.flatMap(_.mediatorIdentityTransactions), - ForceFlag.AlienMember, - ) - _ <- retryProvider.ensureThatB( - RetryFor.ClientCalls, - "mediator_domain_state", - "mediator domain state is signed", - for { - proposalsExist <- participantAdminConnection - .listMediatorDomainState( - TopologyStoreId.AuthorizedStore, - domainId, - true, - ) - .map(_.nonEmpty) - authorizedExist <- - participantAdminConnection - .listMediatorDomainState( - TopologyStoreId.AuthorizedStore, - domainId, - false, - ) - .map(_.nonEmpty) - } yield proposalsExist || authorizedExist, - participantAdminConnection - .proposeMapping( - TopologyStoreId.AuthorizedStore, - mediatorDomainState, - serial = PositiveInt.one, - isProposal = true, - ) - .map(_ => ()), - logger, - ) - } yield SvSoftDomainMigrationPocResource.SignSynchronizerBootstrappingStateResponse.OK - } - } - - // Takes a list of (ordered) signed topology transactions and turns them into - // StoredTopologyTransactions ensuring that only the latest serial has validUntil = None - private def toStoredTopologyBootstrapTransactions( - ts: Seq[GenericSignedTopologyTransaction] - ): Seq[GenericStoredTopologyTransaction] = - ts.foldRight( - (Set.empty[TopologyMapping.MappingHash], Seq.empty[GenericStoredTopologyTransaction]) - ) { case (tx, (newerMappings, acc)) => - ( - newerMappings + tx.transaction.mapping.uniqueKey, - StoredTopologyTransaction( - SequencedTime(CantonTimestamp.MinValue.immediateSuccessor), - EffectiveTime(CantonTimestamp.MinValue.immediateSuccessor), - Option.when(newerMappings.contains(tx.transaction.mapping.uniqueKey))( - EffectiveTime(CantonTimestamp.MinValue.immediateSuccessor) - ), - tx.copy(isProposal = false), - ) +: acc, - ) - }._2 - -// TODO(#13301) Add safeguards that data written to authorized store is sensible - override def initializeSynchronizer( - respond: SvSoftDomainMigrationPocResource.InitializeSynchronizerResponse.type - )( - domainIdPrefix: String - )( - extracted: TracedUser - ): Future[SvSoftDomainMigrationPocResource.InitializeSynchronizerResponse] = { - implicit val TracedUser(_, traceContext) = extracted - val domainId = DomainId( - UniqueIdentifier.tryCreate( - domainIdPrefix, - dsoStore.key.dsoParty.uid.namespace, - ) - ) - for { - scanUrls <- getScanUrls() - decentralizedNamespaceTxs <- getDecentralizedNamespaceDefinitionTransactions() - synchronizerIdentities <- scanUrls - .traverse { url => - logger.info(s"Querying synchronizer identities from $url") - withScanConnection(url)(_.getSynchronizerIdentities(domainIdPrefix)) - } - bootstrappingStates <- scanUrls - .traverse { url => - logger.info(s"Querying bootstrapping transactions from $url") - withScanConnection(url)(_.getSynchronizerBootstrappingTransactions(domainIdPrefix)) - } - .map( - NonEmpty - .from(_) - .getOrElse( - throw Status.INTERNAL.withDescription("Empty list of scan urls").asRuntimeException() - ) - ) - domainParameters = bootstrappingStates - .map(_.domainParameters) - .reduceLeft((a, b) => a.addSignatures(b.signatures.toSeq)) - sequencerDomainState = bootstrappingStates - .map(_.sequencerDomainState) - .reduceLeft((a, b) => a.addSignatures(b.signatures.toSeq)) - mediatorDomainState = bootstrappingStates - .map(_.mediatorDomainState) - .reduceLeft((a, b) => a.addSignatures(b.signatures.toSeq)) - node = synchronizerNodes(domainIdPrefix) - bootstrapTransactions = toStoredTopologyBootstrapTransactions( - decentralizedNamespaceTxs ++ - synchronizerIdentities.flatMap(_.sequencerIdentityTransactions) ++ - synchronizerIdentities.flatMap(_.mediatorIdentityTransactions) ++ - Seq( - domainParameters, - sequencerDomainState, - mediatorDomainState, - ) - ) - staticDomainParameters = node.parameters - .toStaticDomainParameters( - CommunityCryptoConfig(provider = CommunityCryptoProvider.Jce), - ProtocolVersion.v32, - ) - .valueOr(err => - throw new IllegalArgumentException(s"Invalid domain parameters config: $err") - ) - _ = logger.info(s"Initializing sequencer") - _ <- node.sequencerAdminConnection.initializeFromBeginning( - StoredTopologyTransactions( - bootstrapTransactions - ), - staticDomainParameters, - ) - _ = logger.info(s"Initializing mediator") - _ <- node.mediatorAdminConnection.initialize( - domainId, - LocalSynchronizerNode.toSequencerConnection(node.sequencerPublicApi), - ) - } yield SvSoftDomainMigrationPocResource.InitializeSynchronizerResponse.OK - } - override def reconcileSynchronizerDamlState( respond: SvSoftDomainMigrationPocResource.ReconcileSynchronizerDamlStateResponse.type )(domainIdPrefix: String)( diff --git a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/automation/SvDsoAutomationService.scala b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/automation/SvDsoAutomationService.scala index 0f1ea59e..02989465 100644 --- a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/automation/SvDsoAutomationService.scala +++ b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/automation/SvDsoAutomationService.scala @@ -19,8 +19,7 @@ import org.lfdecentralizedtrust.splice.codegen.java.splice.round.{ IssuingMiningRound, OpenMiningRound, } -import org.lfdecentralizedtrust.splice.config.UpgradesConfig -import org.lfdecentralizedtrust.splice.config.SpliceInstanceNamesConfig +import org.lfdecentralizedtrust.splice.config.{SpliceInstanceNamesConfig, UpgradesConfig} import org.lfdecentralizedtrust.splice.environment.* import org.lfdecentralizedtrust.splice.http.HttpClient import org.lfdecentralizedtrust.splice.store.{ @@ -53,6 +52,7 @@ import com.digitalasset.canton.config.ClientConfig import com.digitalasset.canton.logging.NamedLoggerFactory import com.digitalasset.canton.time.{Clock, WallClock} import com.digitalasset.canton.tracing.TraceContext +import io.grpc.Status import io.opentelemetry.api.trace.Tracer import monocle.Monocle.toAppliedFocusOps import org.apache.pekko.stream.Materializer @@ -245,6 +245,48 @@ class SvDsoAutomationService( config.mediatorDeduplicationTimeout, ) ) + + if (config.supportsSoftDomainMigrationPoc) { + registerTrigger( + new AmuletConfigReassignmentTrigger( + triggerContext, + dsoStore, + connection, + dsoStore.key.dsoParty, + Seq[ConstrainedTemplate]( + AmuletRules.COMPANION, + OpenMiningRound.COMPANION, + IssuingMiningRound.COMPANION, + ), + (tc: TraceContext) => dsoStore.lookupAmuletRules()(tc), + ) + ) + + registerTrigger( + new SignSynchronizerBootstrappingStateTrigger( + dsoStore, + participantAdminConnection, + triggerContext, + localSynchronizerNode.getOrElse( + throw Status.INTERNAL + .withDescription("Soft domain migrations require a configured synchronizer node") + .asRuntimeException + ), + extraSynchronizerNodes, + upgradesConfig, + ) + ) + + registerTrigger( + new InitializeSynchronizerTrigger( + dsoStore, + participantAdminConnection, + triggerContext, + extraSynchronizerNodes, + upgradesConfig, + ) + ) + } } def registerTrafficReconciliationTriggers(): Unit = { @@ -288,22 +330,6 @@ class SvDsoAutomationService( registerTrigger(restartDsoDelegateBasedAutomationTrigger) - if (config.supportsSoftDomainMigrationPoc) { - registerTrigger( - new AmuletConfigReassignmentTrigger( - triggerContext, - dsoStore, - connection, - dsoStore.key.dsoParty, - Seq[ConstrainedTemplate]( - AmuletRules.COMPANION, - OpenMiningRound.COMPANION, - IssuingMiningRound.COMPANION, - ), - (tc: TraceContext) => dsoStore.lookupAmuletRules()(tc), - ) - ) - } registerTrigger(new AssignTrigger(triggerContext, dsoStore, connection, store.key.dsoParty)) registerTrigger( new AnsSubscriptionInitialPaymentTrigger( @@ -375,7 +401,6 @@ class SvDsoAutomationService( ) ) } - } private val localSequencerClientContext: Option[LocalSequencerClientContext] = @@ -501,5 +526,7 @@ object SvDsoAutomationService extends AutomationServiceCompanion { aTrigger[ReconcileDynamicDomainParametersTrigger], aTrigger[TransferCommandCounterTrigger], aTrigger[ExternalPartyAmuletRulesTrigger], + aTrigger[SignSynchronizerBootstrappingStateTrigger], + aTrigger[InitializeSynchronizerTrigger], ) } diff --git a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/automation/singlesv/InitializeSynchronizerTrigger.scala b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/automation/singlesv/InitializeSynchronizerTrigger.scala new file mode 100644 index 00000000..a5ed6bfc --- /dev/null +++ b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/automation/singlesv/InitializeSynchronizerTrigger.scala @@ -0,0 +1,239 @@ +// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package org.lfdecentralizedtrust.splice.sv.automation.singlesv + +import cats.syntax.either.* +import cats.syntax.traverse.* +import cats.syntax.traverseFilter.* +import com.daml.nonempty.NonEmpty +import com.digitalasset.canton.config.{CommunityCryptoConfig, CommunityCryptoProvider} +import com.digitalasset.canton.data.CantonTimestamp +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.topology.{DomainId, UniqueIdentifier} +import com.digitalasset.canton.topology.processing.{EffectiveTime, SequencedTime} +import com.digitalasset.canton.topology.store.{ + StoredTopologyTransaction, + StoredTopologyTransactions, +} +import StoredTopologyTransaction.GenericStoredTopologyTransaction +import com.digitalasset.canton.topology.transaction.TopologyMapping +import com.digitalasset.canton.topology.transaction.SignedTopologyTransaction.GenericSignedTopologyTransaction +import com.digitalasset.canton.version.ProtocolVersion +import io.grpc.Status +import io.opentelemetry.api.trace.Tracer +import org.apache.pekko.stream.Materializer +import org.lfdecentralizedtrust.splice.automation.{ + PollingParallelTaskExecutionTrigger, + TaskOutcome, + TaskSuccess, + TriggerContext, +} +import org.lfdecentralizedtrust.splice.config.UpgradesConfig +import org.lfdecentralizedtrust.splice.environment.{ParticipantAdminConnection, RetryFor} +import org.lfdecentralizedtrust.splice.http.HttpClient +import org.lfdecentralizedtrust.splice.scan.admin.api.client.commands.HttpScanSoftDomainMigrationPocAppClient.{ + SynchronizerBootstrappingTransactions, + SynchronizerIdentities, +} +import org.lfdecentralizedtrust.splice.sv.{ExtraSynchronizerNode, LocalSynchronizerNode} +import org.lfdecentralizedtrust.splice.sv.store.SvDsoStore +import org.lfdecentralizedtrust.splice.util.TemplateJsonDecoder +import scala.concurrent.{ExecutionContextExecutor, Future} + +import InitializeSynchronizerTrigger.Task + +class InitializeSynchronizerTrigger( + override val dsoStore: SvDsoStore, + participantAdminConnection: ParticipantAdminConnection, + override protected val context: TriggerContext, + synchronizerNodes: Map[String, ExtraSynchronizerNode], + override val upgradesConfig: UpgradesConfig, +)(implicit + ec: ExecutionContextExecutor, + httpClient: HttpClient, + mat: Materializer, + templateJsonDecoder: TemplateJsonDecoder, + tracer: Tracer, +) extends PollingParallelTaskExecutionTrigger[Task] + with SoftMigrationTrigger { + + protected def retrieveTasks()(implicit tc: TraceContext): Future[Seq[Task]] = + for { + scanUrls <- getScanUrls() + // TODO(#17032): Handle failures properly + // TODO(#17032): consider whether we only want to initialize the ones in requiredSynchronizers in AmuletConfig + synchronizersWithOnlineNodes <- synchronizerNodes.keys.toList.traverseFilter { prefix => + val node = synchronizerNodes(prefix) + (for { + sequencerInitialized <- node.sequencerAdminConnection.getStatus.map( + _.successOption.fold(false)(_.active) + ) + mediatorInitialized <- node.mediatorAdminConnection.getStatus.map( + _.successOption.fold(false)(_.active) + ) + } yield Option.when(!(sequencerInitialized && mediatorInitialized))(prefix)).recover { + err => + logger.info(s"Failed to query sequencer or mediator for $prefix: $err") + None + } + } + tasks <- synchronizersWithOnlineNodes.traverseFilter { prefix => + scanUrls + .traverse { url => + withScanConnection(url) { c => + for { + identities <- c.getSynchronizerIdentities(prefix) + bootstrapTransactions <- c.getSynchronizerBootstrappingTransactions(prefix) + } yield (identities, bootstrapTransactions) + } + } + .map(_.unzip) + .map { case (identities, bootstrapTransactions) => + Some( + Task( + DomainId( + UniqueIdentifier.tryCreate( + prefix, + dsoStore.key.dsoParty.uid.namespace, + ) + ), + identities, + NonEmpty + .from(bootstrapTransactions) + .getOrElse( + throw Status.INTERNAL + .withDescription("Empty list of scan urls") + .asRuntimeException() + ), + ) + ) + } + .recover { err => + // TODO(#17032) Include the url of the failed SV + logger.info( + s"Failed to query synchronizer identities for $prefix, not all SVs are ready yet: $err" + ) + None + } + } + } yield tasks + + protected def completeTask(task: Task)(implicit tc: TraceContext): Future[TaskOutcome] = { + for { + scanUrls <- getScanUrls() + decentralizedNamespaceTxs <- getDecentralizedNamespaceDefinitionTransactions( + participantAdminConnection + ) + synchronizerIdentities = task.synchronizerIdentities + bootstrappingStates = task.bootstrappingTransactions + domainParameters = bootstrappingStates + .map(_.domainParameters) + .reduceLeft((a, b) => a.addSignatures(b.signatures.toSeq)) + sequencerDomainState = bootstrappingStates + .map(_.sequencerDomainState) + .reduceLeft((a, b) => a.addSignatures(b.signatures.toSeq)) + mediatorDomainState = bootstrappingStates + .map(_.mediatorDomainState) + .reduceLeft((a, b) => a.addSignatures(b.signatures.toSeq)) + node = synchronizerNodes(task.synchronizerId.identifier.unwrap) + bootstrapTransactions = toStoredTopologyBootstrapTransactions( + decentralizedNamespaceTxs ++ + synchronizerIdentities.flatMap(_.sequencerIdentityTransactions) ++ + synchronizerIdentities.flatMap(_.mediatorIdentityTransactions) ++ + Seq( + domainParameters, + sequencerDomainState, + mediatorDomainState, + ) + ) + staticDomainParameters = node.parameters + .toStaticDomainParameters( + CommunityCryptoConfig(provider = CommunityCryptoProvider.Jce), + ProtocolVersion.v32, + ) + .valueOr(err => + throw new IllegalArgumentException(s"Invalid domain parameters config: $err") + ) + _ <- context.retryProvider.ensureThatB( + RetryFor.Automation, + "sequencer_initialization", + "Sequencer is initialized", + node.sequencerAdminConnection.getStatus.map(_.successOption.fold(false)(_.active)), + node.sequencerAdminConnection + .initializeFromBeginning( + StoredTopologyTransactions( + bootstrapTransactions + ), + staticDomainParameters, + ) + .map(_ => ()), + logger, + ) + _ <- context.retryProvider.ensureThatB( + RetryFor.Automation, + "mediator_initialization", + "Mediator is initialized", + node.mediatorAdminConnection.getStatus.map(_.successOption.fold(false)(_.active)), + node.mediatorAdminConnection + .initialize( + task.synchronizerId, + LocalSynchronizerNode.toSequencerConnection(node.sequencerPublicApi), + ) + .map(_ => ()), + logger, + ) + } yield TaskSuccess(s"Initialized synchronizer ${task.synchronizerId}") + } + + protected def isStaleTask(task: Task)(implicit tc: TraceContext): Future[Boolean] = { + val node = synchronizerNodes(task.synchronizerId.identifier.unwrap) + for { + sequencerInitialized <- node.sequencerAdminConnection.getStatus.map( + _.successOption.fold(false)(_.active) + ) + mediatorInitialized <- node.mediatorAdminConnection.getStatus.map( + _.successOption.fold(false)(_.active) + ) + } yield sequencerInitialized && mediatorInitialized + } + + // Takes a list of (ordered) signed topology transactions and turns them into + // StoredTopologyTransactions ensuring that only the latest serial has validUntil = None + private def toStoredTopologyBootstrapTransactions( + ts: Seq[GenericSignedTopologyTransaction] + ): Seq[GenericStoredTopologyTransaction] = + ts.foldRight( + (Set.empty[TopologyMapping.MappingHash], Seq.empty[GenericStoredTopologyTransaction]) + ) { case (tx, (newerMappings, acc)) => + ( + newerMappings + tx.transaction.mapping.uniqueKey, + StoredTopologyTransaction( + SequencedTime(CantonTimestamp.MinValue.immediateSuccessor), + EffectiveTime(CantonTimestamp.MinValue.immediateSuccessor), + Option.when(newerMappings.contains(tx.transaction.mapping.uniqueKey))( + EffectiveTime(CantonTimestamp.MinValue.immediateSuccessor) + ), + tx.copy(isProposal = false), + ) +: acc, + ) + }._2 +} + +object InitializeSynchronizerTrigger { + final case class Task( + synchronizerId: DomainId, + synchronizerIdentities: Seq[SynchronizerIdentities], + bootstrappingTransactions: NonEmpty[Seq[SynchronizerBootstrappingTransactions]], + ) extends PrettyPrinting { + override def pretty: Pretty[this.type] = prettyOfClass( + param("synchronizerId", _.synchronizerId), + param("sequencerIds", _.sequencerIds), + param("mediatorIds", _.mediatorIds), + ) + + lazy val sequencerIds = synchronizerIdentities.map(_.sequencerId) + lazy val mediatorIds = synchronizerIdentities.map(_.mediatorId) + } +} diff --git a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/automation/singlesv/SignSynchronizerBootstrappingStateTrigger.scala b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/automation/singlesv/SignSynchronizerBootstrappingStateTrigger.scala new file mode 100644 index 00000000..41c8ed41 --- /dev/null +++ b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/automation/singlesv/SignSynchronizerBootstrappingStateTrigger.scala @@ -0,0 +1,263 @@ +// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package org.lfdecentralizedtrust.splice.sv.automation.singlesv + +import cats.syntax.either.* +import cats.syntax.traverse.* +import cats.syntax.traverseFilter.* +import com.digitalasset.canton.config.RequireTypes.{NonNegativeInt, PositiveInt} +import com.digitalasset.canton.logging.pretty.{Pretty, PrettyPrinting} +import com.digitalasset.canton.tracing.TraceContext +import com.digitalasset.canton.topology.{DomainId, ForceFlag, UniqueIdentifier} +import com.digitalasset.canton.topology.store.{TimeQuery, TopologyStoreId} +import com.digitalasset.canton.topology.transaction.{ + DomainParametersState, + MediatorDomainState, + SequencerDomainState, +} +import io.grpc.Status +import io.opentelemetry.api.trace.Tracer +import org.apache.pekko.stream.Materializer +import org.lfdecentralizedtrust.splice.automation.{ + PollingParallelTaskExecutionTrigger, + TaskOutcome, + TaskSuccess, + TriggerContext, +} +import org.lfdecentralizedtrust.splice.config.{Thresholds, UpgradesConfig} +import org.lfdecentralizedtrust.splice.environment.{ParticipantAdminConnection, RetryFor} +import org.lfdecentralizedtrust.splice.environment.TopologyAdminConnection.TopologyTransactionType +import org.lfdecentralizedtrust.splice.http.HttpClient +import org.lfdecentralizedtrust.splice.scan.admin.api.client.commands.HttpScanSoftDomainMigrationPocAppClient.SynchronizerIdentities +import org.lfdecentralizedtrust.splice.sv.{ExtraSynchronizerNode, LocalSynchronizerNode} +import org.lfdecentralizedtrust.splice.sv.store.SvDsoStore +import org.lfdecentralizedtrust.splice.util.TemplateJsonDecoder +import scala.concurrent.{ExecutionContextExecutor, Future} + +import SignSynchronizerBootstrappingState.Task + +class SignSynchronizerBootstrappingStateTrigger( + override val dsoStore: SvDsoStore, + participantAdminConnection: ParticipantAdminConnection, + override protected val context: TriggerContext, + existingSynchronizer: LocalSynchronizerNode, + synchronizerNodes: Map[String, ExtraSynchronizerNode], + override val upgradesConfig: UpgradesConfig, +)(implicit + ec: ExecutionContextExecutor, + httpClient: HttpClient, + mat: Materializer, + templateJsonDecoder: TemplateJsonDecoder, + tracer: Tracer, +) extends PollingParallelTaskExecutionTrigger[Task] + with SoftMigrationTrigger { + + protected def retrieveTasks()(implicit tc: TraceContext): Future[Seq[Task]] = + for { + scanUrls <- getScanUrls() + // TODO(#17032): Handle failures properly + // TODO(#17032): consider whether we only want to initialize the ones in requiredSynchronizers in AmuletConfig + // TODO(#17032): Exclude synchronizers already bootstrapped + tasks <- synchronizerNodes.keys.toList.traverseFilter { prefix => + scanUrls + .traverse { url => withScanConnection(url)(_.getSynchronizerIdentities(prefix)) } + .map(identities => + Some( + Task( + DomainId( + UniqueIdentifier.tryCreate( + prefix, + dsoStore.key.dsoParty.uid.namespace, + ) + ), + identities, + ) + ) + ) + .recover { err => + // TODO(#17032) Include the url of the failed SV + logger.info( + s"Failed to query synchronizer identities for $prefix, not all SVs are ready yet: $err" + ) + None + } + } + } yield tasks + + protected def completeTask(task: Task)(implicit tc: TraceContext): Future[TaskOutcome] = { + for { + decentralizedSynchronizerId <- dsoStore.getAmuletRulesDomain()(tc) + // for now we just copy the parameters from the existing domain. + parameters <- existingSynchronizer.sequencerAdminConnection.getDomainParametersState( + decentralizedSynchronizerId + ) + domainParameters = DomainParametersState( + task.synchronizerId, + parameters.mapping.parameters, + ) + sequencerDomainState = SequencerDomainState + .create( + task.synchronizerId, + Thresholds.sequencerConnectionsSizeThreshold(task.sequencerIds.size), + task.sequencerIds, + Seq.empty, + ) + .valueOr(err => + throw Status.INTERNAL + .withDescription(s"Failed to construct SequencerDomainState: $err") + .asRuntimeException + ) + mediatorDomainState = MediatorDomainState + .create( + task.synchronizerId, + NonNegativeInt.zero, + Thresholds.mediatorDomainStateThreshold(task.mediatorIds.size), + task.mediatorIds, + Seq.empty, + ) + .valueOr(err => + throw Status.INTERNAL + .withDescription(s"Failed to construct MediatorDomainState: $err") + .asRuntimeException + ) + participantId <- participantAdminConnection.getParticipantId() + decentralizedNamespaceTxs <- getDecentralizedNamespaceDefinitionTransactions( + participantAdminConnection + ) + _ <- participantAdminConnection.addTopologyTransactions( + TopologyStoreId.AuthorizedStore, + decentralizedNamespaceTxs, + ForceFlag.AlienMember, + ) + signedBy = participantId.uid.namespace.fingerprint + _ <- context.retryProvider.ensureThatB( + RetryFor.ClientCalls, + "domain_parameters", + "domain parameters are signed", + for { + proposalsExist <- participantAdminConnection + .listDomainParametersState( + TopologyStoreId.AuthorizedStore, + task.synchronizerId, + TopologyTransactionType.AllProposals, + TimeQuery.HeadState, + ) + .map(_.nonEmpty) + authorizedExist <- + participantAdminConnection + .listDomainParametersState( + TopologyStoreId.AuthorizedStore, + task.synchronizerId, + TopologyTransactionType.AuthorizedState, + TimeQuery.HeadState, + ) + .map(_.nonEmpty) + } yield proposalsExist || authorizedExist, + participantAdminConnection + .proposeMapping( + TopologyStoreId.AuthorizedStore, + domainParameters, + serial = PositiveInt.one, + isProposal = true, + ) + .map(_ => ()), + logger, + ) + _ <- participantAdminConnection.addTopologyTransactions( + TopologyStoreId.AuthorizedStore, + task.synchronizerIdentities.flatMap(_.sequencerIdentityTransactions), + ForceFlag.AlienMember, + ) + _ <- context.retryProvider.ensureThatB( + RetryFor.ClientCalls, + "sequencer_domain_state", + "sequencer domain state is signed", + for { + proposalsExist <- participantAdminConnection + .listSequencerDomainState( + TopologyStoreId.AuthorizedStore, + task.synchronizerId, + TimeQuery.HeadState, + true, + ) + .map(_.nonEmpty) + authorizedExist <- + participantAdminConnection + .listSequencerDomainState( + TopologyStoreId.AuthorizedStore, + task.synchronizerId, + TimeQuery.HeadState, + false, + ) + .map(_.nonEmpty) + } yield proposalsExist || authorizedExist, + participantAdminConnection + .proposeMapping( + TopologyStoreId.AuthorizedStore, + sequencerDomainState, + serial = PositiveInt.one, + isProposal = true, + ) + .map(_ => ()), + logger, + ) + // add mediator keys, note that in 3.0 not adding these does not fail but in 3.1 it will + _ <- participantAdminConnection.addTopologyTransactions( + TopologyStoreId.AuthorizedStore, + task.synchronizerIdentities.flatMap(_.mediatorIdentityTransactions), + ForceFlag.AlienMember, + ) + _ <- context.retryProvider.ensureThatB( + RetryFor.ClientCalls, + "mediator_domain_state", + "mediator domain state is signed", + for { + proposalsExist <- participantAdminConnection + .listMediatorDomainState( + TopologyStoreId.AuthorizedStore, + task.synchronizerId, + true, + ) + .map(_.nonEmpty) + authorizedExist <- + participantAdminConnection + .listMediatorDomainState( + TopologyStoreId.AuthorizedStore, + task.synchronizerId, + false, + ) + .map(_.nonEmpty) + } yield proposalsExist || authorizedExist, + participantAdminConnection + .proposeMapping( + TopologyStoreId.AuthorizedStore, + mediatorDomainState, + serial = PositiveInt.one, + isProposal = true, + ) + .map(_ => ()), + logger, + ) + } yield TaskSuccess(s"Signed topology state for ${task.synchronizerId}") + } + // TODO(#17032) Add a better staleness check + protected def isStaleTask(task: Task)(implicit tc: TraceContext): Future[Boolean] = + Future.successful(false) +} + +object SignSynchronizerBootstrappingState { + final case class Task( + synchronizerId: DomainId, + synchronizerIdentities: Seq[SynchronizerIdentities], + ) extends PrettyPrinting { + override def pretty: Pretty[this.type] = prettyOfClass( + param("synchronizerId", _.synchronizerId), + param("sequencerIds", _.sequencerIds), + param("mediatorIds", _.mediatorIds), + ) + + lazy val sequencerIds = synchronizerIdentities.map(_.sequencerId) + lazy val mediatorIds = synchronizerIdentities.map(_.mediatorId) + } +} diff --git a/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/automation/singlesv/SoftMigrationTrigger.scala b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/automation/singlesv/SoftMigrationTrigger.scala new file mode 100644 index 00000000..3a3aa7a0 --- /dev/null +++ b/apps/sv/src/main/scala/org/lfdecentralizedtrust/splice/sv/automation/singlesv/SoftMigrationTrigger.scala @@ -0,0 +1,95 @@ +// Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +// SPDX-License-Identifier: Apache-2.0 + +package org.lfdecentralizedtrust.splice.sv.automation.singlesv + +import cats.syntax.traverse.* +import com.digitalasset.canton.topology.store.{TimeQuery, TopologyStoreId} +import com.digitalasset.canton.topology.transaction.TopologyMapping +import com.digitalasset.canton.topology.transaction.SignedTopologyTransaction.GenericSignedTopologyTransaction +import com.digitalasset.canton.tracing.TraceContext +import org.apache.pekko.stream.Materializer +import org.lfdecentralizedtrust.splice.automation.Trigger +import org.lfdecentralizedtrust.splice.config.{NetworkAppClientConfig, UpgradesConfig} +import org.lfdecentralizedtrust.splice.environment.TopologyAdminConnection +import org.lfdecentralizedtrust.splice.http.HttpClient +import org.lfdecentralizedtrust.splice.scan.admin.api.client.SingleScanConnection +import org.lfdecentralizedtrust.splice.scan.config.ScanAppClientConfig +import org.lfdecentralizedtrust.splice.sv.store.SvDsoStore +import org.lfdecentralizedtrust.splice.util.TemplateJsonDecoder +import scala.concurrent.{ExecutionContext, ExecutionContextExecutor, Future} +import scala.jdk.CollectionConverters.* +import scala.jdk.OptionConverters.* + +trait SoftMigrationTrigger extends Trigger { + def dsoStore: SvDsoStore + def upgradesConfig: UpgradesConfig + + def getScanUrls()(implicit ec: ExecutionContext, tc: TraceContext): Future[Seq[String]] = { + for { + // TODO(#13301) We should use the internal URL for the SV’s own scan to avoid a loopback requirement + dsoRulesWithSvNodeStates <- dsoStore.getDsoRulesWithSvNodeStates() + } yield dsoRulesWithSvNodeStates.svNodeStates.values + .flatMap( + _.payload.state.synchronizerNodes.asScala.values + .flatMap(_.scan.toScala.toList.map(_.publicUrl)) + ) + .toList + // sorted to make it deterministic + .sorted + } + + def withScanConnection[T]( + url: String + )(f: SingleScanConnection => Future[T])(implicit + ec: ExecutionContextExecutor, + httpClient: HttpClient, + mat: Materializer, + tc: TraceContext, + templateJsonDecoder: TemplateJsonDecoder, + ): Future[T] = + SingleScanConnection.withSingleScanConnection( + ScanAppClientConfig( + NetworkAppClientConfig( + url + ) + ), + upgradesConfig, + context.clock, + context.retryProvider, + loggerFactory, + )(f) + + def getDecentralizedNamespaceDefinitionTransactions(connection: TopologyAdminConnection)(implicit + tc: TraceContext + ): Future[Seq[GenericSignedTopologyTransaction]] = for { + decentralizedSynchronizerId <- dsoStore.getAmuletRulesDomain()(tc) + namespaceDefinitions <- connection.listDecentralizedNamespaceDefinition( + decentralizedSynchronizerId, + decentralizedSynchronizerId.uid.namespace, + timeQuery = TimeQuery.Range(None, None), + ) + identityTransactions <- namespaceDefinitions + .flatMap(_.mapping.owners) + .toSet + .toList + .traverse { namespace => + connection.listAllTransactions( + TopologyStoreId.DomainStore(decentralizedSynchronizerId), + TimeQuery.Range(None, None), + includeMappings = Set( + TopologyMapping.Code.OwnerToKeyMapping, + TopologyMapping.Code.NamespaceDelegation, + ), + filterNamespace = Some(namespace), + ) + } + .map(_.flatten) + decentralizedNamespaceDefinition <- connection.listAllTransactions( + TopologyStoreId.DomainStore(decentralizedSynchronizerId), + TimeQuery.Range(None, None), + includeMappings = Set(TopologyMapping.Code.DecentralizedNamespaceDefinition), + ) + } yield (identityTransactions ++ decentralizedNamespaceDefinition).map(_.transaction) + +} diff --git a/apps/validator/src/main/openapi/validator-internal.yaml b/apps/validator/src/main/openapi/validator-internal.yaml index 00760a63..a1f748c3 100644 --- a/apps/validator/src/main/openapi/validator-internal.yaml +++ b/apps/validator/src/main/openapi/validator-internal.yaml @@ -646,39 +646,6 @@ components: created_at: type: string - # Old format for backwards compat - # TODO(#13131) Remove this. - LegacyDomainMigrationDump: - type: object - required: - - participant - - acsSnapshot - - acsTimestamp - - dars - - migrationId - - domainId - - createdAt - properties: - participant: - "$ref": "../../../../common/src/main/openapi/common-internal.yaml#/components/schemas/NodeIdentitiesDump" - acsSnapshot: - description: | - base64 encoded string of acs snapshot for the requested party id - type: string - acsTimestamp: - type: string - dars: - type: array - items: - $ref: "#/components/schemas/Dar" - migrationId: - type: integer - format: int64 - domainId: - type: string - createdAt: - type: string - GetValidatorDomainDataSnapshotResponse: type: object required: diff --git a/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/ValidatorApp.scala b/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/ValidatorApp.scala index 9a1a1711..fb370177 100644 --- a/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/ValidatorApp.scala +++ b/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/ValidatorApp.scala @@ -146,7 +146,7 @@ class ValidatorApp( "Ensuring participant is initialized" ) val cantonIdentifierConfig = - config.cantonIdentifierConfig.getOrElse(ValidatorCantonIdentifierConfig.default(config)) + ValidatorCantonIdentifierConfig.resolvedNodeIdentifierConfig(config) ParticipantInitializer.ensureParticipantInitializedWithExpectedId( cantonIdentifierConfig.participant, participantAdminConnection, diff --git a/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/config/ValidatorAppConfig.scala b/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/config/ValidatorAppConfig.scala index 044db0f2..a748d91e 100644 --- a/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/config/ValidatorAppConfig.scala +++ b/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/config/ValidatorAppConfig.scala @@ -208,10 +208,16 @@ final case class ValidatorCantonIdentifierConfig( participant: String ) object ValidatorCantonIdentifierConfig { - def default(config: ValidatorAppBackendConfig): ValidatorCantonIdentifierConfig = { + private def default(config: ValidatorAppBackendConfig): ValidatorCantonIdentifierConfig = { val identifier = config.validatorPartyHint.getOrElse("unnamedValidator") ValidatorCantonIdentifierConfig( participant = identifier ) } + + // The config reader/writer derivation fails if we make this a method on the config class so we keep it here on the companion + def resolvedNodeIdentifierConfig( + config: ValidatorAppBackendConfig + ): ValidatorCantonIdentifierConfig = + config.cantonIdentifierConfig.getOrElse(ValidatorCantonIdentifierConfig.default(config)) } diff --git a/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/domain/DomainConnector.scala b/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/domain/DomainConnector.scala index d226e22f..eff319f7 100644 --- a/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/domain/DomainConnector.scala +++ b/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/domain/DomainConnector.scala @@ -17,7 +17,7 @@ import com.daml.nonempty.NonEmpty import com.digitalasset.canton.{DomainAlias, SequencerAlias} import com.digitalasset.canton.config.DomainTimeTrackerConfig import com.digitalasset.canton.data.CantonTimestamp -import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging, TracedLogger} +import com.digitalasset.canton.logging.{NamedLoggerFactory, NamedLogging} import com.digitalasset.canton.participant.domain.DomainConnectionConfig import com.digitalasset.canton.sequencing.{ GrpcSequencerConnection, @@ -83,7 +83,7 @@ class DomainConnector( ): Future[Map[DomainAlias, SequencerConnections]] = { config.domains.global.url match { case None => - sequencerConnectionsFromScan() + waitForSequencerConnectionsFromScan() case Some(url) => if (config.supportsSoftDomainMigrationPoc) { // TODO (#13301) Make this work by making the config more flexible. @@ -125,42 +125,45 @@ class DomainConnector( ) } - private def sequencerConnectionsFromScan( - )(implicit tc: TraceContext): Future[Map[DomainAlias, SequencerConnections]] = for { - _ <- waitForSequencerConnectionsFromScan(logger, retryProvider) - sequencerConnections <- getSequencerConnectionsFromScan(clock.now) - } yield sequencerConnections.view.mapValues { connections => - NonEmpty.from(connections) match { - case None => - sys.error("sequencer connections from scan is not expected to be empty.") - case Some(nonEmptyConnections) => - SequencerConnections.tryMany( - nonEmptyConnections.forgetNE, - Thresholds.sequencerConnectionsSizeThreshold(nonEmptyConnections.size), - submissionRequestAmplification = SubmissionRequestAmplification( - Thresholds.sequencerSubmissionRequestAmplification(nonEmptyConnections.size), - config.sequencerRequestAmplificationPatience, - ), - ) - } - }.toMap - private def waitForSequencerConnectionsFromScan( - logger: TracedLogger, - retryProvider: RetryProvider, - )(implicit tc: TraceContext) = { - retryProvider.waitUntil( - RetryFor.WaitingOnInitDependency, + )(implicit tc: TraceContext): Future[Map[DomainAlias, SequencerConnections]] = { + retryProvider.getValueWithRetries( + // Short retries since usually a failure here is just a misconfiguration error. + // The only case where this can happen is during a domain migration and even then + // it is fairly unlikely outside of tests for validators to come up fast enough that + // scan has not yet updated. + RetryFor.ClientCalls, "scan_sequencer_connections", - "valid sequencer connections from scan is non empty", + "non-empty sequencer connections from scan", getSequencerConnectionsFromScan(clock.now) .map { connections => - if (connections.isEmpty) + if (connections.isEmpty) { throw Status.NOT_FOUND .withDescription( s"sequencer connections for migration id $migrationId is empty, validate with your SV sponsor that your migration id is correct" ) .asRuntimeException() + } else { + connections.view.mapValues { + NonEmpty.from(_) match { + case None => + throw Status.NOT_FOUND + .withDescription( + s"sequencer connections for migration id $migrationId is empty, validate with your SV sponsor that your migration id is correct" + ) + .asRuntimeException() + case Some(nonEmptyConnections) => + SequencerConnections.tryMany( + nonEmptyConnections.forgetNE, + Thresholds.sequencerConnectionsSizeThreshold(nonEmptyConnections.size), + submissionRequestAmplification = SubmissionRequestAmplification( + Thresholds.sequencerSubmissionRequestAmplification(nonEmptyConnections.size), + config.sequencerRequestAmplificationPatience, + ), + ) + } + }.toMap + } }, logger, ) diff --git a/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/migration/DomainMigrationDump.scala b/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/migration/DomainMigrationDump.scala index 09dfb87c..954bad7b 100644 --- a/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/migration/DomainMigrationDump.scala +++ b/apps/validator/src/main/scala/org/lfdecentralizedtrust/splice/validator/migration/DomainMigrationDump.scala @@ -54,26 +54,12 @@ final case class DomainMigrationDump( object DomainMigrationDump { implicit val domainMigrationCodec: Codec[DomainMigrationDump] = Codec.from( - // We try legacy first so that if both fail the return error comes from the non-legacy decoder. - Decoder[http.LegacyDomainMigrationDump] - .map(fromLegacy(_)) - .handleErrorWith(_ => Decoder[http.DomainMigrationDump]) emap fromHttp, + Decoder[http.DomainMigrationDump] emap fromHttp, Encoder[http.DomainMigrationDump] contramap (_.toHttp), ) private val base64Decoder = Base64.getDecoder() - private def fromLegacy(legacy: http.LegacyDomainMigrationDump): http.DomainMigrationDump = - http.DomainMigrationDump( - legacy.participant, - legacy.acsSnapshot, - legacy.acsTimestamp, - legacy.dars, - legacy.migrationId, - legacy.domainId, - legacy.createdAt, - ) - def fromHttp(response: http.DomainMigrationDump) = for { participant <- NodeIdentitiesDump .fromHttp(ParticipantId.tryFromProtoPrimitive, response.participant) diff --git a/cluster/compose/sv/.env b/cluster/compose/sv/.env index db3a48df..75d20402 100644 --- a/cluster/compose/sv/.env +++ b/cluster/compose/sv/.env @@ -1,4 +1,4 @@ -IMAGE_REPO=digitalasset-canton-network-docker.jfrog.io/digitalasset/ +IMAGE_REPO=ghcr.io/digital-asset/decentralized-canton-sync/docker/ # Versions SPLICE_POSTGRES_VERSION=14 diff --git a/cluster/compose/sv/compose.yaml b/cluster/compose/sv/compose.yaml index 3e283002..1b07b4c6 100644 --- a/cluster/compose/sv/compose.yaml +++ b/cluster/compose/sv/compose.yaml @@ -71,7 +71,7 @@ services: - CANTON_PARTICIPANT_ADMIN_USER_NAME=administrator - AUTH_TARGET_AUDIENCE=${LEDGER_API_AUTH_AUDIENCE} - ADDITIONAL_CONFIG_DISABLE_AUTH=canton.participants.participant.ledger-api.auth-services=[] - - ADDITIONAL_CONFIG_ALLOW_INSECURE=canton.participants.participant.http-ledger-api-experimental.allow-insecure-tokens=true + - ADDITIONAL_CONFIG_ALLOW_INSECURE=canton.participants.participant.http-ledger-api.allow-insecure-tokens=true depends_on: postgres-splice-sv: condition: service_healthy diff --git a/cluster/compose/validator/.env b/cluster/compose/validator/.env index cc8888f0..304eac4b 100644 --- a/cluster/compose/validator/.env +++ b/cluster/compose/validator/.env @@ -1,4 +1,4 @@ -IMAGE_REPO=digitalasset-canton-network-docker.jfrog.io/digitalasset/ +IMAGE_REPO=ghcr.io/digital-asset/decentralized-canton-sync/docker/ # Versions SPLICE_POSTGRES_VERSION=14 diff --git a/cluster/compose/validator/compose-disable-auth.yaml b/cluster/compose/validator/compose-disable-auth.yaml index 1b28758a..22afb2ff 100644 --- a/cluster/compose/validator/compose-disable-auth.yaml +++ b/cluster/compose/validator/compose-disable-auth.yaml @@ -3,7 +3,7 @@ services: environment: - CANTON_PARTICIPANT_ADMIN_USER_NAME=ledger-api-user - ADDITIONAL_CONFIG_DISABLE_AUTH=canton.participants.participant.ledger-api.auth-services=[] - - ADDITIONAL_CONFIG_ALLOW_INSECURE=canton.participants.participant.http-ledger-api-experimental.allow-insecure-tokens=true + - ADDITIONAL_CONFIG_ALLOW_INSECURE=canton.participants.participant.http-ledger-api.allow-insecure-tokens=true validator: environment: diff --git a/cluster/compose/validator/start.sh b/cluster/compose/validator/start.sh index 40f8e6e6..f6c9ad26 100755 --- a/cluster/compose/validator/start.sh +++ b/cluster/compose/validator/start.sh @@ -21,9 +21,9 @@ function _info(){ } function usage() { - echo "Usage: $0 -s -o -p [-a] [-b] [-c ] [-C ] [-q ] [-n ] [-m ] [-M] [-i ] [-P ] [-w] [-l]" + echo "Usage: $0 -s -o -p -m [-a] [-b] [-c ] [-C ] [-q ] [-n ] [-M] [-i ] [-P ] [-w] [-l]" echo " -s : The full URL of the sponsor SV" - echo " -o : The onboarding secret to use. If not provided, it will be fetched from the sponsor SV (possible on DevNet only)" + echo " -o : The onboarding secret to use. May be empty (\"\") if you are already onboarded." echo " -p : The party hint to use for the validator operator, by default also your participant identifier." echo " -P : The participant identifier." echo " -a: Use this flag to enable authentication" @@ -52,10 +52,10 @@ trust_single=0 SPONSOR_SV_ADDRESS="" SCAN_ADDRESS="" host_scan_address="" -ONBOARDING_SECRET="" +ONBOARDING_SECRET="undefined" SEQUENCER_ADDRESS="" network_name="" -migration_id=0 +migration_id="" migrating=0 party_hint="" participant_id="" @@ -142,8 +142,8 @@ if [ -z "${SPONSOR_SV_ADDRESS}" ]; then exit 1 fi -if [ -z "${ONBOARDING_SECRET}" ]; then - _error_msg "Please provide the onboarding secret" +if [ "${ONBOARDING_SECRET}" == "undefined" ]; then + _error_msg "Please provide the onboarding secret. If you are already onboarded, you may leave the secret value itself empty, i.e. specify \`-o \"\"\`" usage exit 1 fi @@ -165,6 +165,12 @@ if [ $trust_single -eq 1 ] && [ -z "${SEQUENCER_ADDRESS}" ]; then exit 1 fi +if [ -z "${migration_id}" ]; then + _error_msg "Please provide a migration id, you can find the current migration id at https://sync.global/sv-network/ (make sure you select the right network)" + usage + exit 1 +fi + if [[ ! "${migration_id}" =~ ^[0-9]+$ ]]; then _error_msg "Migration ID must be a non-negative integer" usage diff --git a/cluster/helm/.gitignore b/cluster/helm/.gitignore index 9cfa0a0b..67afb041 100644 --- a/cluster/helm/.gitignore +++ b/cluster/helm/.gitignore @@ -2,4 +2,6 @@ Chart.yaml values.yaml /.version-tag +/.app-charts +/.image-digests LICENSE diff --git a/cluster/helm/cn-docs/values-template.yaml b/cluster/helm/cn-docs/values-template.yaml index d1310cca..8cdf942c 100644 --- a/cluster/helm/cn-docs/values-template.yaml +++ b/cluster/helm/cn-docs/values-template.yaml @@ -1,7 +1,7 @@ # Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. # SPDX-License-Identifier: Apache-2.0 -imageRepo: "digitalasset-canton-network-docker.jfrog.io/digitalasset" +imageRepo: "ghcr.io/digital-asset/decentralized-canton-sync/docker" enableGcsProxy: false diff --git a/cluster/helm/local.mk b/cluster/helm/local.mk index 12cb41ef..a78eb2eb 100644 --- a/cluster/helm/local.mk +++ b/cluster/helm/local.mk @@ -22,6 +22,7 @@ all_charts := $(app_charts) splice-util-lib HELM_VERSION_TAG := cluster/helm/.version-tag IMAGE_DIGESTS := cluster/helm/.image-digests +APP_CHARTS_FILE := cluster/helm/.app-charts # Makefile for each file in cluster/helm/target run `helm push $file` .PHONY: cluster/helm/push @@ -38,6 +39,14 @@ cluster/helm/write-version: cluster/helm/write-digests: get-docker-image-digests.sh > $(IMAGE_DIGESTS) +.PHONY: cluster/helm/write-app-charts +cluster/helm/write-app-charts: + overwrite-if-changed '$(shell echo $(app_charts) | tr ' ' '\n')' $(APP_CHARTS_FILE) + +.PHONY: cluster/helm/copy_release_to_ghcr +cluster/helm/copy_release_to_ghcr: cluster/helm/write-app-charts cluster/helm/write-version + ./build-tools/copy_release_helm_charts_to_ghcr.sh -v $(shell cat cluster/helm/.version-tag) -f cluster/helm/.app-charts + .PHONY: cluster/helm/build cluster/helm/build: $(foreach chart,$(all_charts),cluster/helm/$(chart)/helm-build) diff --git a/cluster/helm/splice-cluster-ingress-runbook/values-template.yaml b/cluster/helm/splice-cluster-ingress-runbook/values-template.yaml index 83881a3e..84afc8ec 100644 --- a/cluster/helm/splice-cluster-ingress-runbook/values-template.yaml +++ b/cluster/helm/splice-cluster-ingress-runbook/values-template.yaml @@ -1,7 +1,7 @@ # Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. # SPDX-License-Identifier: Apache-2.0 -imageRepo: "digitalasset-canton-network-docker.jfrog.io/digitalasset" +imageRepo: "ghcr.io/digital-asset/decentralized-canton-sync/docker" withSvIngress: true defaultJvmOptions: -Xms384M -Xmx384M -Dscala.concurrent.context.minThreads=8 -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/persistent-data ingress: diff --git a/cluster/helm/splice-cluster-loopback-gateway/values-template.yaml b/cluster/helm/splice-cluster-loopback-gateway/values-template.yaml index 65f93576..42e99454 100644 --- a/cluster/helm/splice-cluster-loopback-gateway/values-template.yaml +++ b/cluster/helm/splice-cluster-loopback-gateway/values-template.yaml @@ -1,7 +1,7 @@ # Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. # SPDX-License-Identifier: Apache-2.0 -imageRepo: "digitalasset-canton-network-docker.jfrog.io/digitalasset" +imageRepo: "ghcr.io/digital-asset/decentralized-canton-sync/docker" cluster: ingress: istio-ingress.cluster-ingress defaultJvmOptions: -Xms384M -Xmx384M -Dscala.concurrent.context.minThreads=8 diff --git a/cluster/helm/splice-domain/values-template.yaml b/cluster/helm/splice-domain/values-template.yaml index f37502f8..748623d8 100644 --- a/cluster/helm/splice-domain/values-template.yaml +++ b/cluster/helm/splice-domain/values-template.yaml @@ -1,7 +1,7 @@ # Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. # SPDX-License-Identifier: Apache-2.0 -imageRepo: "digitalasset-canton-network-docker.jfrog.io/digitalasset" +imageRepo: "ghcr.io/digital-asset/decentralized-canton-sync/docker" # -- Annotations for the domain pods pod: diff --git a/cluster/helm/splice-global-domain/values-template.yaml b/cluster/helm/splice-global-domain/values-template.yaml index f6a983e2..92380c75 100644 --- a/cluster/helm/splice-global-domain/values-template.yaml +++ b/cluster/helm/splice-global-domain/values-template.yaml @@ -1,7 +1,7 @@ # Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. # SPDX-License-Identifier: Apache-2.0 -imageRepo: "digitalasset-canton-network-docker.jfrog.io/digitalasset" +imageRepo: "ghcr.io/digital-asset/decentralized-canton-sync/docker" enableHealthProbes: true diff --git a/cluster/helm/splice-istio-gateway/values-template.yaml b/cluster/helm/splice-istio-gateway/values-template.yaml index d4fef094..918e8fef 100644 --- a/cluster/helm/splice-istio-gateway/values-template.yaml +++ b/cluster/helm/splice-istio-gateway/values-template.yaml @@ -1,7 +1,7 @@ # Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. # SPDX-License-Identifier: Apache-2.0 -imageRepo: "digitalasset-canton-network-docker.jfrog.io/digitalasset" +imageRepo: "ghcr.io/digital-asset/decentralized-canton-sync/docker" defaultJvmOptions: -Xms384M -Xmx384M -Dscala.concurrent.context.minThreads=8 -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/persistent-data cometbftPorts: nodes: 10 diff --git a/cluster/helm/splice-load-tester/values-template.yaml b/cluster/helm/splice-load-tester/values-template.yaml index 2f042b05..30c17295 100644 --- a/cluster/helm/splice-load-tester/values-template.yaml +++ b/cluster/helm/splice-load-tester/values-template.yaml @@ -1,7 +1,7 @@ # Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. # SPDX-License-Identifier: Apache-2.0 -imageRepo: "digitalasset-canton-network-docker.jfrog.io/digitalasset" +imageRepo: "ghcr.io/digital-asset/decentralized-canton-sync/docker" config: "{}" prometheusRw: "" diff --git a/cluster/helm/splice-participant/values-template.yaml b/cluster/helm/splice-participant/values-template.yaml index 7bbdf100..14f01103 100644 --- a/cluster/helm/splice-participant/values-template.yaml +++ b/cluster/helm/splice-participant/values-template.yaml @@ -1,7 +1,7 @@ # Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. # SPDX-License-Identifier: Apache-2.0 -imageRepo: "digitalasset-canton-network-docker.jfrog.io/digitalasset" +imageRepo: "ghcr.io/digital-asset/decentralized-canton-sync/docker" enableHealthProbes: true diff --git a/cluster/helm/splice-postgres/values-template.yaml b/cluster/helm/splice-postgres/values-template.yaml index 123f34f3..d465ed47 100644 --- a/cluster/helm/splice-postgres/values-template.yaml +++ b/cluster/helm/splice-postgres/values-template.yaml @@ -1,7 +1,7 @@ # Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. # SPDX-License-Identifier: Apache-2.0 -imageRepo: "digitalasset-canton-network-docker.jfrog.io/digitalasset" +imageRepo: "ghcr.io/digital-asset/decentralized-canton-sync/docker" resources: limits: cpu: "4" diff --git a/cluster/helm/splice-scan/values-template.yaml b/cluster/helm/splice-scan/values-template.yaml index 52b7df9d..e0b4f761 100644 --- a/cluster/helm/splice-scan/values-template.yaml +++ b/cluster/helm/splice-scan/values-template.yaml @@ -1,7 +1,7 @@ # Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. # SPDX-License-Identifier: Apache-2.0 -imageRepo: "digitalasset-canton-network-docker.jfrog.io/digitalasset" +imageRepo: "ghcr.io/digital-asset/decentralized-canton-sync/docker" # -- Annotations for the scan pod pod: diff --git a/cluster/helm/splice-splitwell-app/values-template.yaml b/cluster/helm/splice-splitwell-app/values-template.yaml index 493b6d0e..36768a11 100644 --- a/cluster/helm/splice-splitwell-app/values-template.yaml +++ b/cluster/helm/splice-splitwell-app/values-template.yaml @@ -1,7 +1,7 @@ # Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. # SPDX-License-Identifier: Apache-2.0 -imageRepo: "digitalasset-canton-network-docker.jfrog.io/digitalasset" +imageRepo: "ghcr.io/digital-asset/decentralized-canton-sync/docker" defaultJvmOptions: -XX:+UseG1GC -XX:MaxRAMPercentage=75 -XX:InitialRAMPercentage=75 -Dscala.concurrent.context.minThreads=8 -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/persistent-data fixedTokens: false resources: diff --git a/cluster/helm/splice-splitwell-web-ui/values-template.yaml b/cluster/helm/splice-splitwell-web-ui/values-template.yaml index 2aea0a44..5ca0749d 100644 --- a/cluster/helm/splice-splitwell-web-ui/values-template.yaml +++ b/cluster/helm/splice-splitwell-web-ui/values-template.yaml @@ -1,7 +1,7 @@ # Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. # SPDX-License-Identifier: Apache-2.0 -imageRepo: "digitalasset-canton-network-docker.jfrog.io/digitalasset" +imageRepo: "ghcr.io/digital-asset/decentralized-canton-sync/docker" resources: limits: cpu: "1" diff --git a/cluster/helm/splice-sv-node/values-template.yaml b/cluster/helm/splice-sv-node/values-template.yaml index 29406d4b..7d71808f 100644 --- a/cluster/helm/splice-sv-node/values-template.yaml +++ b/cluster/helm/splice-sv-node/values-template.yaml @@ -1,7 +1,7 @@ # Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. # SPDX-License-Identifier: Apache-2.0 -imageRepo: "digitalasset-canton-network-docker.jfrog.io/digitalasset" +imageRepo: "ghcr.io/digital-asset/decentralized-canton-sync/docker" # -- Annotations for the sv-node pods pod: diff --git a/cluster/helm/splice-util-lib/values-template.yaml b/cluster/helm/splice-util-lib/values-template.yaml index c7c4ee1f..909f7037 100644 --- a/cluster/helm/splice-util-lib/values-template.yaml +++ b/cluster/helm/splice-util-lib/values-template.yaml @@ -1,4 +1,4 @@ # Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. # SPDX-License-Identifier: Apache-2.0 -imageRepo: "digitalasset-canton-network-docker.jfrog.io/digitalasset" +imageRepo: "ghcr.io/digital-asset/decentralized-canton-sync/docker" diff --git a/cluster/helm/splice-validator/values-template.yaml b/cluster/helm/splice-validator/values-template.yaml index 2a8312f0..b5f82efe 100644 --- a/cluster/helm/splice-validator/values-template.yaml +++ b/cluster/helm/splice-validator/values-template.yaml @@ -1,7 +1,7 @@ # Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. # SPDX-License-Identifier: Apache-2.0 -imageRepo: "digitalasset-canton-network-docker.jfrog.io/digitalasset" +imageRepo: "ghcr.io/digital-asset/decentralized-canton-sync/docker" fixedTokens: false diff --git a/cluster/images/.gitignore b/cluster/images/.gitignore new file mode 100644 index 00000000..5a345eca --- /dev/null +++ b/cluster/images/.gitignore @@ -0,0 +1 @@ +/.images diff --git a/cluster/images/canton-participant/app.conf b/cluster/images/canton-participant/app.conf index 986e0895..5bc25b3b 100644 --- a/cluster/images/canton-participant/app.conf +++ b/cluster/images/canton-participant/app.conf @@ -77,7 +77,7 @@ canton { } } - http-ledger-api-experimental.server { + http-ledger-api.server { port = 7575 address = 0.0.0.0 path-prefix = ${?CANTON_PARTICIPANT_JSON_API_SERVER_PATH_PREFIX} diff --git a/cluster/images/canton/Dockerfile b/cluster/images/canton/Dockerfile index ce5ab8e9..b709f536 100644 --- a/cluster/images/canton/Dockerfile +++ b/cluster/images/canton/Dockerfile @@ -27,6 +27,9 @@ ADD target/canton.tar . COPY target/monitoring.conf target/parameters.conf target/entrypoint.sh target/bootstrap-entrypoint.sc target/tools.sh target/logback.xml /app/ +# Overwrite the LICENSE.txt file from canton image with the one from Splice +COPY target/LICENSE.txt . + RUN ln -s bin/canton splice-image-bin FROM fullstorydev/grpcurl:v1.9.2 AS grpcurl diff --git a/cluster/images/canton/local.mk b/cluster/images/canton/local.mk index ca3b7ca4..08acd119 100644 --- a/cluster/images/canton/local.mk +++ b/cluster/images/canton/local.mk @@ -8,7 +8,11 @@ target-logback := $(dir)/target/logback.xml include ${REPO_ROOT}/cluster/images/common/entrypoint-image.mk -$(dir)/$(docker-build): $(dir)/target/entrypoint.sh $(target-canton) $(target-logback) +$(dir)/$(docker-build): $(dir)/target/entrypoint.sh $(target-canton) $(target-logback) $(dir)/target/LICENSE.txt + +# We override LICENSE.txt from canton to avoid diversion in the LICENSE file +$(dir)/target/LICENSE.txt: LICENSE $(target-canton) + cp $< $@ $(target-canton): rm -f $@ ;\ diff --git a/cluster/images/cometbft/Dockerfile b/cluster/images/cometbft/Dockerfile index 4b71633a..0843e28f 100644 --- a/cluster/images/cometbft/Dockerfile +++ b/cluster/images/cometbft/Dockerfile @@ -5,3 +5,5 @@ ARG cometbft_version FROM digitalasset-canton-enterprise-docker.jfrog.io/cometbft-canton-network:$cometbft_version COPY configure-state-sync.sh /cometbft/ RUN chmod +x /cometbft/configure-state-sync.sh + +COPY target/LICENSE . diff --git a/cluster/images/cometbft/local.mk b/cluster/images/cometbft/local.mk index d6656d30..d4da038f 100644 --- a/cluster/images/cometbft/local.mk +++ b/cluster/images/cometbft/local.mk @@ -3,5 +3,8 @@ dir := $(call current_dir) -$(dir)/$(docker-build): $(dir)/configure-state-sync.sh +$(dir)/$(docker-build): $(dir)/configure-state-sync.sh $(dir)/target/LICENSE $(dir)/$(docker-build): build_arg := --build-arg cometbft_version=${COMETBFT_RELEASE_VERSION} + +$(dir)/target/LICENSE: LICENSE + cp $< $@ diff --git a/cluster/images/docs/Dockerfile b/cluster/images/docs/Dockerfile index a272cfa1..39d8b789 100644 --- a/cluster/images/docs/Dockerfile +++ b/cluster/images/docs/Dockerfile @@ -8,6 +8,7 @@ FROM nginx:stable ARG version COPY --from=0 app/splice-node/docs/html /usr/share/nginx/html/ +COPY --from=0 app/LICENSE . COPY script.js /tmpl/script.js.tmpl COPY docker-entrypoint.sh /custom-docker-entrypoint.sh ENTRYPOINT ["/custom-docker-entrypoint.sh"] diff --git a/cluster/images/gcs-proxy/Dockerfile b/cluster/images/gcs-proxy/Dockerfile index 3279e2e2..0bdbfb93 100644 --- a/cluster/images/gcs-proxy/Dockerfile +++ b/cluster/images/gcs-proxy/Dockerfile @@ -14,4 +14,6 @@ RUN apt-get update \ ENV GOOGLE_CLOUD_PROJECT=da-cn-shared +COPY target/LICENSE . + CMD ["/gcsproxy", "-v", "-b", "0.0.0.0:8080" ] diff --git a/cluster/images/gcs-proxy/local.mk b/cluster/images/gcs-proxy/local.mk index 8871b021..ca334a80 100644 --- a/cluster/images/gcs-proxy/local.mk +++ b/cluster/images/gcs-proxy/local.mk @@ -1,3 +1,12 @@ # Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. # SPDX-License-Identifier: Apache-2.0 +dir := $(call current_dir) + +$(dir)/$(docker-build): $(dir)/target/LICENSE $(target-dir) + +$(dir)/target/LICENSE: LICENSE $(target-dir) + cp $< $@ + +$(target-dir): + mkdir -p $(@D) diff --git a/cluster/images/load-tester/Dockerfile b/cluster/images/load-tester/Dockerfile index 3a4b5c3d..1e1f0c86 100644 --- a/cluster/images/load-tester/Dockerfile +++ b/cluster/images/load-tester/Dockerfile @@ -5,6 +5,7 @@ FROM grafana/k6:0.48.0 COPY entrypoint.sh . COPY target/test/* ./ +COPY target/LICENSE . ENTRYPOINT [ "/bin/sh" ] CMD [ "/home/k6/entrypoint.sh" ] diff --git a/cluster/images/load-tester/local.mk b/cluster/images/load-tester/local.mk index fe3ae5e3..aa00f3ad 100644 --- a/cluster/images/load-tester/local.mk +++ b/cluster/images/load-tester/local.mk @@ -6,8 +6,11 @@ dir := $(call current_dir) target-load-tester := $(dir)/target/test load-tester := ${REPO_ROOT}/load-tester/dist -$(dir)/$(docker-build): $(target-load-tester) +$(dir)/$(docker-build): $(target-load-tester) $(dir)/target/LICENSE $(target-load-tester): $(load-tester) mkdir -p $(@D) cp -r $< $@ + +$(dir)/target/LICENSE: LICENSE + cp $< $@ diff --git a/cluster/images/local.mk b/cluster/images/local.mk index 1fce637c..a98851e5 100644 --- a/cluster/images/local.mk +++ b/cluster/images/local.mk @@ -33,12 +33,14 @@ images := \ \ splice-test-postgres \ splice-test-ci \ + splice-test-docker-runner \ splice-test-cometbft \ splice-test-temp-runner-hook \ canton-image := cluster/images/canton splice-image := cluster/images/splice-app splice-ui-image := cluster/images/splice-web-ui +images_file := cluster/images/.images ifdef CI # never use the cache in CI on the master branch @@ -88,7 +90,6 @@ $$(prefix)/get-docker-image-id: $$(prefix)/$(docker-image-tag) .PHONY: $$(prefix)/clean $$(prefix)/clean: -rm -vfr $$(@D)/target - endef # end DEFINE_PHONY_RULES $(foreach image,$(images),$(eval $(call DEFINE_PHONY_RULES,$(image)))) @@ -113,3 +114,18 @@ $(foreach image,$(images),$(eval $(call DEFINE_PHONY_RULES,$(image)))) %/$(docker-push): %/$(docker-image-tag) %/$(docker-build) cd $(@D)/.. && docker-push $$(cat $(abspath $<)) + +%/$(docker-copy-release-to-ghcr): %/$(docker-image-tag) %/$(docker-build) + cd $(@D)/.. && copy_release_to_ghcr $$(cat $(abspath $<)) + +######### +# Global targets +######### + +.PHONY: write-images +write-images: + overwrite-if-changed '$(shell echo $(images) | tr ' ' '\n')' $(images_file) + +.PHONY: cluster/docker/copy_release_to_ghcr +cluster/docker/copy_release_to_ghcr: write-images + ./build-tools/copy_release_images_to_ghcr.sh -v '$(shell get-snapshot-version)' -f $(images_file) diff --git a/cluster/images/pulumi-kubernetes-operator/Dockerfile b/cluster/images/pulumi-kubernetes-operator/Dockerfile index e367ca7c..4556ee97 100644 --- a/cluster/images/pulumi-kubernetes-operator/Dockerfile +++ b/cluster/images/pulumi-kubernetes-operator/Dockerfile @@ -37,3 +37,5 @@ ENV XDG_CACHE_HOME=/tmp/.cache ENV XDG_CONFIG_CACHE=/tmp/.cache ENV GOCACHE=/tmp/.cache/go-build ENV GOPATH=/tmp/.cache/go + +COPY target/LICENSE . diff --git a/cluster/images/pulumi-kubernetes-operator/local.mk b/cluster/images/pulumi-kubernetes-operator/local.mk index 37522361..98af2085 100644 --- a/cluster/images/pulumi-kubernetes-operator/local.mk +++ b/cluster/images/pulumi-kubernetes-operator/local.mk @@ -3,4 +3,8 @@ dir := $(call current_dir) +$(dir)/$(docker-build): $(dir)/target/LICENSE $(dir)/$(docker-build): platform_opt := --platform=linux/amd64 --build-arg pulumi_version=${PULUMI_VERSION} + +$(dir)/target/LICENSE: LICENSE + cp $< $@ diff --git a/cluster/images/splice-debug/Dockerfile b/cluster/images/splice-debug/Dockerfile index 6f1ec948..2ea58dff 100644 --- a/cluster/images/splice-debug/Dockerfile +++ b/cluster/images/splice-debug/Dockerfile @@ -6,3 +6,5 @@ FROM --platform=linux/amd64 ubuntu:latest RUN apt-get update && apt-get install -y postgresql-client curl RUN curl -sSLO https://github.com/fullstorydev/grpcurl/releases/download/v1.9.2/grpcurl_1.9.2_linux_amd64.deb && dpkg -i grpcurl_1.9.2_linux_amd64.deb && rm grpcurl_1.9.2_linux_amd64.deb + +COPY target/LICENSE . diff --git a/cluster/images/splice-debug/local.mk b/cluster/images/splice-debug/local.mk index 324d11d1..52006328 100644 --- a/cluster/images/splice-debug/local.mk +++ b/cluster/images/splice-debug/local.mk @@ -3,3 +3,7 @@ dir := $(call current_dir) +$(dir)/$(docker-build): $(dir)/target/LICENSE + +$(dir)/target/LICENSE: LICENSE + cp $< $@ diff --git a/cluster/images/splice-test-ci/Dockerfile b/cluster/images/splice-test-ci/Dockerfile index a67222a5..afbeba4a 100644 --- a/cluster/images/splice-test-ci/Dockerfile +++ b/cluster/images/splice-test-ci/Dockerfile @@ -1,6 +1,7 @@ # Note that we don't currently support arm64 runners, so we build this only for amd64 FROM --platform=$BUILDPLATFORM ubuntu:24.04 +# TODO(#15988): consider whether we can move some of the things installed here (and with pip below) into nix RUN apt-get update && \ apt-get install -y sudo git curl xz-utils pigz rsync jq unzip python3-pip && \ rm -rf /var/lib/apt/lists/* @@ -12,7 +13,8 @@ RUN groupadd --gid=1002 ci && \ echo 'Defaults env_keep += "DEBIAN_FRONTEND"' >> /etc/sudoers.d/env_keep && \ sudo -u ci mkdir /github/home/project && \ sudo -u ci mkdir /github/home/bin && \ - sudo -u ci mkdir -p /github/home/.local/bin + sudo -u ci mkdir -p /github/home/.local/bin && \ + sudo chown -R ci:ci /github/home RUN sudo pip3 install GitPython gql humanize marshmallow-dataclass requests requests_toolbelt prometheus_client --break-system-packages @@ -25,4 +27,6 @@ RUN whoami && \ ENV COURSIER_CACHE=/cache/coursier +COPY target/LICENSE . + WORKDIR /github/home/project diff --git a/cluster/images/splice-test-ci/local.mk b/cluster/images/splice-test-ci/local.mk index 324d11d1..52006328 100644 --- a/cluster/images/splice-test-ci/local.mk +++ b/cluster/images/splice-test-ci/local.mk @@ -3,3 +3,7 @@ dir := $(call current_dir) +$(dir)/$(docker-build): $(dir)/target/LICENSE + +$(dir)/target/LICENSE: LICENSE + cp $< $@ diff --git a/cluster/images/splice-test-cometbft/Dockerfile b/cluster/images/splice-test-cometbft/Dockerfile index 1db57f38..ba7aafef 100644 --- a/cluster/images/splice-test-cometbft/Dockerfile +++ b/cluster/images/splice-test-cometbft/Dockerfile @@ -7,4 +7,6 @@ COPY configs / ENV sv_idx=0 +COPY target/LICENSE . + ENTRYPOINT ["sh", "-c", "cometbft-canton-network start --home /sv${sv_idx}" ] diff --git a/cluster/images/splice-test-cometbft/local.mk b/cluster/images/splice-test-cometbft/local.mk index a25da585..787f0cb2 100644 --- a/cluster/images/splice-test-cometbft/local.mk +++ b/cluster/images/splice-test-cometbft/local.mk @@ -3,7 +3,7 @@ dir := $(call current_dir) -$(dir)/$(docker-build): $(dir)/configs +$(dir)/$(docker-build): $(dir)/configs $(dir)/target/LICENSE $(dir)/clean: $(dir)/clean-configs @@ -12,3 +12,6 @@ $(dir)/clean-configs: $(dir)/configs $(dir)/configs: ${REPO_ROOT}/apps/sv/src/test/resources/cometbft ${REPO_ROOT}/cluster/images/splice-test-cometbft/copy-configs.sh $< $@ + +$(dir)/target/LICENSE: LICENSE + cp $< $@ diff --git a/cluster/images/splice-test-docker-runner/Dockerfile b/cluster/images/splice-test-docker-runner/Dockerfile new file mode 100644 index 00000000..a5c6904f --- /dev/null +++ b/cluster/images/splice-test-docker-runner/Dockerfile @@ -0,0 +1,16 @@ +# Note that we don't currently support arm64 runners, so we build this only for amd64 +FROM --platform=$BUILDPLATFORM ghcr.io/actions/actions-runner:2.321.0 + +# TODO(#15988): can we reduce duplication between this and splice-test-ci ? + +RUN sudo apt-get update && \ + sudo apt-get install -y sudo git curl xz-utils pigz rsync jq unzip python3-pip moreutils && \ + sudo rm -rf /var/lib/apt/lists/* + +RUN sudo pip3 install GitPython gql humanize marshmallow-dataclass requests requests_toolbelt prometheus_client + +RUN sudo ln -s /usr/bin/python3 /usr/bin/python + +ENV COURSIER_CACHE=/cache/coursier + +COPY target/LICENSE . diff --git a/cluster/images/splice-test-docker-runner/local.mk b/cluster/images/splice-test-docker-runner/local.mk new file mode 100644 index 00000000..4a6b720e --- /dev/null +++ b/cluster/images/splice-test-docker-runner/local.mk @@ -0,0 +1,10 @@ +# Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 + +dir := $(call current_dir) + +$(dir)/$(docker-build): $(dir)/target/LICENSE + + +$(dir)/target/LICENSE: LICENSE + cp $< $@ diff --git a/cluster/images/splice-test-postgres/Dockerfile b/cluster/images/splice-test-postgres/Dockerfile index b52f7ec7..2f31e28d 100644 --- a/cluster/images/splice-test-postgres/Dockerfile +++ b/cluster/images/splice-test-postgres/Dockerfile @@ -1,5 +1,6 @@ FROM postgres:14 COPY cmd.sh /usr/local/bin/ +COPY target/LICENSE . CMD ["cmd.sh"] diff --git a/cluster/images/splice-test-postgres/local.mk b/cluster/images/splice-test-postgres/local.mk index 324d11d1..52006328 100644 --- a/cluster/images/splice-test-postgres/local.mk +++ b/cluster/images/splice-test-postgres/local.mk @@ -3,3 +3,7 @@ dir := $(call current_dir) +$(dir)/$(docker-build): $(dir)/target/LICENSE + +$(dir)/target/LICENSE: LICENSE + cp $< $@ diff --git a/cluster/images/splice-test-temp-runner-hook/Dockerfile b/cluster/images/splice-test-temp-runner-hook/Dockerfile index 551dd358..de3c5dc8 100644 --- a/cluster/images/splice-test-temp-runner-hook/Dockerfile +++ b/cluster/images/splice-test-temp-runner-hook/Dockerfile @@ -1,3 +1,4 @@ FROM ghcr.io/actions/actions-runner:latest COPY index.js /home/runner/k8s/index.js +COPY target/LICENSE . diff --git a/cluster/images/splice-test-temp-runner-hook/local.mk b/cluster/images/splice-test-temp-runner-hook/local.mk index 324d11d1..52006328 100644 --- a/cluster/images/splice-test-temp-runner-hook/local.mk +++ b/cluster/images/splice-test-temp-runner-hook/local.mk @@ -3,3 +3,7 @@ dir := $(call current_dir) +$(dir)/$(docker-build): $(dir)/target/LICENSE + +$(dir)/target/LICENSE: LICENSE + cp $< $@ diff --git a/docs/livepreview.sh b/docs/livepreview.sh index c8c27130..156515db 100755 --- a/docs/livepreview.sh +++ b/docs/livepreview.sh @@ -9,4 +9,4 @@ cd "$(dirname "${BASH_SOURCE[0]}")" (cd "$REPO_ROOT"; sbt --batch damlBuild) ./gen-daml-docs.sh -VERSION="live-preview-build-$(date)" sphinx-autobuild src html/html +VERSION="live-preview-build-$(date)" sphinx-autobuild src html/html -D todo_include_todos=1 diff --git a/docs/src/app_dev/index.rst b/docs/src/app_dev/index.rst index 02fd41b4..bd2982b3 100644 --- a/docs/src/app_dev/index.rst +++ b/docs/src/app_dev/index.rst @@ -3,10 +3,29 @@ .. SPDX-License-Identifier: Apache-2.0 -Daml and Canton -=============== -.. list-table:: Version Information + +Overview +======== + +.. todo:: add section on deployment topology and hardware requirements + + - add overview over types of apps + - add overview over types of APIs and which apps need what + - for Daml apps + + - explain app provider and app user nodes + - refer to the TSA training for the in-depth explanation of building Daml apps + + +.. todo:: split into validator/wallet api, scan api, daml, ledger API +.. todo:: add section on testing including spinning up localnet +.. todo:: add section on deployment for app devs, e.g., DAR uploads + +Version Information +=================== + +.. list-table:: :header-rows: 0 * - Canton version used for validator and SV nodes @@ -16,11 +35,33 @@ Daml and Canton * - Daml SDK version used for Java and TS codegens - |daml_sdk_tooling_version| +Testing +======= + +.. toctree:: + + localnet + API Reference ============= +.. todo:: + + add overview over each API type, e.g., overview over scan + +.. todo:: + + Add overview of how to integrate with CC at the Daml level + + - use the token standard + - mention the `AppPaymentRequestFlow` as deprecated + - clearly mark splice subscription API as deprecated + + Where possible refer to splice Daml code as the primary source; consider adding Daml docs where they are missing for this to work. + + .. toctree:: - daml splice_app_apis/index + daml diff --git a/docs/src/app_dev/localnet.rst b/docs/src/app_dev/localnet.rst new file mode 100644 index 00000000..ef87376b --- /dev/null +++ b/docs/src/app_dev/localnet.rst @@ -0,0 +1,49 @@ +.. + Copyright (c) 2024 Digital Asset (Switzerland) GmbH and/or its affiliates. All rights reserved. +.. + SPDX-License-Identifier: Apache-2.0 + +.. _compose_sv: + +Docker-Compose Based Deployment of a Super Validator Node +--------------------------------------------------------- + +This section describes how to deploy a standalone super validator node on a local machine +using docker-compose. This is useful for app developers who want a complete standalone +environment to develop against. + +If you have already deployed a validator against an existing network, you will need to first +tear it down and wipe all its data, as a validator cannot be moved between networks. +To do that, first stop the validator with `./stop.sh` from the `compose/validator` directory, +and wipe out all its data with `docker volume rm compose_postgres-splice`. + +Now you can spin up a docker-compose based Super-Validator as follows: + +.. code-block:: bash + + cd splice-node/docker-compose/sv + ./start.sh -w + +It will take a few minutes for the SV to be ready, after which you can use it to onboard a new +validator. + +This is quite similar to the steps for spinning up a validator that were performed above. +First, fetch a new onboarding secret from the SV: + +.. code-block:: bash + + curl -X POST http://sv.localhost:8080/api/sv/v0/devnet/onboard/validator/prepare + +Now you can onboard a new validator to the SV: + +.. code-block:: bash + + cd ../validator + ./start.sh -o "" -p -l -w + +Note that ``-l`` automatically configures the validator with the correct configuration required +in order for it to use the docker-compose SV created above. + +To tear everything down, run `./stop.sh` from both the `compose/validator` and `compose/sv` +directories. As above, this will retain the data for reuse. In order to completely wipe out +the network's and validator's data, also run `docker volume rm splice-validator_postgres-splice splice-sv_postgres-splice-sv`. diff --git a/docs/src/conf.py b/docs/src/conf.py index c74cb105..c4470afc 100644 --- a/docs/src/conf.py +++ b/docs/src/conf.py @@ -62,9 +62,6 @@ } -todo_include_todos = True - - # Configure the sphinx copy-button plugin as per # https://sphinx-copybutton.readthedocs.io/en/latest/use.html#strip-and-configure-input-prompts-for-code-cells copybutton_prompt_text = "@ " @@ -117,6 +114,9 @@ sys.exit(1) chart_version = version +# TODO(#17226): Make the helm_repo_prefix also correct for snapshots (not an OCI one). For this case we'll also need to: +# - put somewhere the "helm repo add" and "helm repo update" commands. +# - put somewhere the docker login command to jfrog for docker-compose. if re.match(r"^[0-9]+.[0-9]+.[0-9]+$", version): # For releases, we download artifacts from GitHub Releases download_url = f"https://github.com/digital-asset/decentralized-canton-sync/releases/download/v{version}" @@ -148,5 +148,5 @@ .. |bundle_download_link| replace:: :raw-html:`Download Bundle` .. |openapi_download_link| replace:: :raw-html:`Download OpenAPI specs` -.. |canton_download_link| replace:: :raw-html:`Download Canton enterprise` +.. |helm_repo_prefix| replace:: oci://ghcr.io/digital-asset/decentralized-canton-sync/helm """ diff --git a/nix/canton-sources.json b/nix/canton-sources.json index b16a7be8..66d08d45 100644 --- a/nix/canton-sources.json +++ b/nix/canton-sources.json @@ -1,5 +1,5 @@ { - "version": "3.2.0-snapshot.20241217.14528.0.v538e5faf", + "version": "3.2.0-snapshot.20250122.14536.0.vf1523ca8", "tooling_sdk_version": "3.1.0-snapshot.20240717.13187.0.va47ab77f", - "sha256": "sha256:0ckly4dblx36rdgh6nby96cyzarx7j5dxs3ln90770pi2a9q18b7" + "sha256": "sha256:0n64829fzyrvjhk3avz2b5kz0fbrci4ijfyrni5vsc4s3gjjwbig" } diff --git a/nix/cometbft-driver-sources.json b/nix/cometbft-driver-sources.json index 01ecb9f6..76c4ffbc 100644 --- a/nix/cometbft-driver-sources.json +++ b/nix/cometbft-driver-sources.json @@ -1,4 +1,4 @@ { - "version": "3.2.0-snapshot.20241219.14529.0.v2abb8b00", - "sha256": "03wjgs0hfxxz4xgqlqjc5y8g24h6fqjfrksk2x5rwi0cwyrsrapi" + "version": "3.2.0-snapshot.20250106.14530.0.vc5fbb05f-stable-20250121", + "sha256": "1m0i6j6h82mi8xynvi9fqwwq91mqansgbh1yh7yczxsqwhfqx44s" } diff --git a/nix/shell.nix b/nix/shell.nix index 11593e63..357bbb0c 100644 --- a/nix/shell.nix +++ b/nix/shell.nix @@ -15,6 +15,7 @@ in pkgs.mkShell { # NOTE: please keep this list sorted for an easy overview and to avoid merge noise. istioctl + actionlint ammonite auth0-cli bc @@ -28,6 +29,7 @@ in pkgs.mkShell { getopt gh git + git-search-replace (google-cloud-sdk.withExtraComponents ([google-cloud-sdk.components.gke-gcloud-auth-plugin ])) grpcurl daml2js @@ -76,7 +78,6 @@ in pkgs.mkShell { python3Packages.requests_toolbelt python3Packages.sphinx_rtd_theme python3Packages.sphinx-copybutton - git-search-replace python3.pkgs.sphinx-reredirects ripgrep rsync @@ -84,6 +85,7 @@ in pkgs.mkShell { scala_2_13 selenium-server-standalone shellcheck + skopeo sphinx sphinx-lint tmux diff --git a/project/ignore-patterns/canton_log.ignore.txt b/project/ignore-patterns/canton_log.ignore.txt index 1ac1b45a..fde662c2 100644 --- a/project/ignore-patterns/canton_log.ignore.txt +++ b/project/ignore-patterns/canton_log.ignore.txt @@ -108,4 +108,8 @@ Failed to send commitment message batch for period CommitmentPeriod.*RequestRefu Instrument rpc\.server\.duration has exceeded the maximum allowed cardinality Too many log messages detected + +# TODO(#17081) Figure out why this happens +Thread starvation or clock leap detected + # Make sure to have a trailing newline diff --git a/project/ignore-patterns/canton_network_test_log.ignore.txt b/project/ignore-patterns/canton_network_test_log.ignore.txt index d38e7911..0e3a806b 100644 --- a/project/ignore-patterns/canton_network_test_log.ignore.txt +++ b/project/ignore-patterns/canton_network_test_log.ignore.txt @@ -104,4 +104,7 @@ Cannot read contract.*AssignedContract, it is in flight.*SoftDomainMigrationInte # TODO(#13301) This is BftScanConnection.refreshAction failing to fetch the scan list while the DsoRules contract is in flight SoftDomainMigrationIntegrationTest.*The global domain global-domain-new.* is not present in the scans response.*getPeerScansFromStore +# internal selenium error. Potentially intermittent +Exception managing firefox: error sending request for url .* http2 error: stream error received: refused stream before processing any application + # Make sure to have a trailing newline diff --git a/project/ignore-patterns/sbt-output.ignore.txt b/project/ignore-patterns/sbt-output.ignore.txt index efeb420f..0ecf84be 100644 --- a/project/ignore-patterns/sbt-output.ignore.txt +++ b/project/ignore-patterns/sbt-output.ignore.txt @@ -113,4 +113,7 @@ WARNING: Failed to fetch secret, retrying in 10 seconds .*Condition \[isDefined\("LOG_LAST_ERRORS"\)\] evaluated.* +# This is actually not a warning +Warning: No want/action statements, nothing to do + # Make sure to have a trailing newline