From 047dafc28dfef120e637c64f7feb2958efb7a66b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mat=C3=ADas=20Ignacio=20Gonz=C3=A1lez?= Date: Mon, 2 Sep 2024 11:26:13 -0300 Subject: [PATCH 001/116] chore(configs): Update protocol version (#2779) Update protocol version --- core/node/external_proof_integration_api/src/lib.rs | 3 +-- etc/env/file_based/genesis.yaml | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/core/node/external_proof_integration_api/src/lib.rs b/core/node/external_proof_integration_api/src/lib.rs index 4ad8e2595a01..4355896e2a2e 100644 --- a/core/node/external_proof_integration_api/src/lib.rs +++ b/core/node/external_proof_integration_api/src/lib.rs @@ -4,8 +4,6 @@ mod middleware; mod processor; mod types; -pub use crate::processor::Processor; - use std::net::SocketAddr; use anyhow::Context; @@ -20,6 +18,7 @@ use tokio::sync::watch; use types::{ExternalProof, ProofGenerationDataResponse}; use zksync_basic_types::L1BatchNumber; +pub use crate::processor::Processor; use crate::{ metrics::{CallOutcome, Method}, middleware::MetricsMiddleware, diff --git a/etc/env/file_based/genesis.yaml b/etc/env/file_based/genesis.yaml index 6d7a6ba3c338..220a75944e02 100644 --- a/etc/env/file_based/genesis.yaml +++ b/etc/env/file_based/genesis.yaml @@ -1,7 +1,7 @@ genesis_root: 0xabdb766b18a479a5c783a4b80e12686bc8ea3cc2d8a3050491b701d72370ebb5 genesis_rollup_leaf_index: 54 genesis_batch_commitment: 0x2d00e5f8d77afcebf58a6b82ae56ba967566fe7dfbcb6760319fb0d215d18ffd -genesis_protocol_semantic_version: '0.24.1' +genesis_protocol_semantic_version: '0.24.2' # deprecated genesis_protocol_version: 24 default_aa_hash: 0x01000563374c277a2c1e34659a2a1e87371bb6d852ce142022d497bfb50b9e32 From ec3f0843bc1ac93bf46a1c7340d1f00d0e5a715a Mon Sep 17 00:00:00 2001 From: Danil Date: Mon, 2 Sep 2024 16:27:39 +0200 Subject: [PATCH 002/116] chore(ci): Use artifacts in ci (#2765) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --------- Signed-off-by: Danil Co-authored-by: Manuel Mauro Co-authored-by: Alexander Melnikov Co-authored-by: Rodion Sabodash --- .github/workflows/ci-zk-toolbox-reusable.yml | 193 ++++++------------ core/tests/recovery-test/src/index.ts | 17 +- .../tests/genesis-recovery.test.ts | 9 +- .../tests/snapshot-recovery.test.ts | 7 +- .../tests/revert-and-restart-en.test.ts | 36 +--- .../tests/revert-and-restart.test.ts | 28 +-- core/tests/ts-integration/jest.config.json | 38 ++-- core/tests/ts-integration/package.json | 2 +- .../ts-integration/tests/base-token.test.ts | 3 + .../ts-integration/tests/contracts.test.ts | 42 ++-- etc/utils/src/kill.ts | 19 ++ etc/utils/src/logs.ts | 11 + etc/utils/src/server.ts | 23 --- .../src/commands/test/integration.rs | 2 +- 14 files changed, 178 insertions(+), 252 deletions(-) create mode 100644 etc/utils/src/kill.ts create mode 100644 etc/utils/src/logs.ts delete mode 100644 etc/utils/src/server.ts diff --git a/.github/workflows/ci-zk-toolbox-reusable.yml b/.github/workflows/ci-zk-toolbox-reusable.yml index 5e9402b69ea0..21ffdc0523c9 100644 --- a/.github/workflows/ci-zk-toolbox-reusable.yml +++ b/.github/workflows/ci-zk-toolbox-reusable.yml @@ -33,6 +33,32 @@ jobs: - name: Build zk_toolbox run: ci_run bash -c "./bin/zkt" + - name: Create log directories + run: | + SERVER_LOGS_DIR=logs/server + INTEGRATION_TESTS_LOGS_DIR=logs/integration_tests + INTEGRATION_TESTS_EN_LOGS_DIR=logs/integration_tests/en + SNAPSHOT_RECOVERY_LOGS_DIR=logs/integration_tests/en + GENESIS_RECOVERY_LOGS_DIR=logs/integration_tests/en + EXTERNAL_NODE_LOGS_DIR=logs/external_node + REVERT_LOGS_DIR=logs/revert + + mkdir -p $SERVER_LOGS_DIR + mkdir -p $INTEGRATION_TESTS_LOGS_DIR + mkdir -p $INTEGRATION_TESTS_EN_LOGS_DIR + mkdir -p $SNAPSHOT_RECOVERY_LOGS_DIR + mkdir -p $GENESIS_RECOVERY_LOGS_DIR + mkdir -p $EXTERNAL_NODE_LOGS_DIR + mkdir -p $REVERT_LOGS_DIR + + echo "SERVER_LOGS_DIR=$SERVER_LOGS_DIR" >> $GITHUB_ENV + echo "INTEGRATION_TESTS_LOGS_DIR=$INTEGRATION_TESTS_LOGS_DIR" >> $GITHUB_ENV + echo "INTEGRATION_TESTS_EN_LOGS_DIR=$INTEGRATION_TESTS_EN_LOGS_DIR" >> $GITHUB_ENV + echo "SNAPSHOT_RECOVERY_LOGS_DIR=$SNAPSHOT_RECOVERY_LOGS_DIR" >> $GITHUB_ENV + echo "GENESIS_RECOVERY_LOGS_DIR=$GENESIS_RECOVERY_LOGS_DIR" >> $GITHUB_ENV + echo "EXTERNAL_NODE_LOGS_DIR=$EXTERNAL_NODE_LOGS_DIR" >> $GITHUB_ENV + echo "REVERT_LOGS_DIR=$REVERT_LOGS_DIR" >> $GITHUB_ENV + - name: Initialize ecosystem run: | ci_run git config --global --add safe.directory /usr/src/zksync @@ -57,7 +83,7 @@ jobs: - name: Create and initialize Validium chain run: | ci_run zk_inception chain create \ - --chain-name chain_validium \ + --chain-name validium \ --chain-id sequential \ --prover-mode no-proofs \ --wallet-creation localhost \ @@ -76,12 +102,12 @@ jobs: --prover-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ --prover-db-name=zksync_prover_localhost_validium \ --port-offset 2000 \ - --chain chain_validium + --chain validium - name: Create and initialize chain with Custom Token run: | ci_run zk_inception chain create \ - --chain-name chain_custom_token \ + --chain-name custom_token \ --chain-id sequential \ --prover-mode no-proofs \ --wallet-creation localhost \ @@ -100,7 +126,7 @@ jobs: --prover-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ --prover-db-name=zksync_prover_localhost_custom_token \ --port-offset 3000 \ - --chain chain_custom_token + --chain custom_token - name: Build test dependencies run: | @@ -108,20 +134,20 @@ jobs: - name: Run servers run: | - ci_run zk_inception server --ignore-prerequisites --chain era &> server_rollup.log & - ci_run zk_inception server --ignore-prerequisites --chain chain_validium &> server_validium.log & - ci_run zk_inception server --ignore-prerequisites --chain chain_custom_token &> server_custom_token.log & + ci_run zk_inception server --ignore-prerequisites --chain era &> ${{ env.SERVER_LOGS_DIR }}/rollup.log & + ci_run zk_inception server --ignore-prerequisites --chain validium &> ${{ env.SERVER_LOGS_DIR }}/validium.log & + ci_run zk_inception server --ignore-prerequisites --chain custom_token &> ${{ env.SERVER_LOGS_DIR }}/custom_token.log & ci_run sleep 5 - name: Run integration tests run: | - ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --chain era &> integration_rollup.log & + ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --chain era &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/rollup.log & PID1=$! - ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --chain chain_validium &> integration_validium.log & + ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --chain validium &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/validium.log & PID2=$! - ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --chain chain_custom_token &> integration_custom_token.log & + ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --chain custom_token &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/custom_token.log & PID3=$! wait $PID1 @@ -135,22 +161,23 @@ jobs: ci_run zk_inception external-node init --ignore-prerequisites --chain era ci_run zk_inception external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - --db-name=zksync_en_localhost_era_validium1 --l1-rpc-url=http://localhost:8545 --chain chain_validium - ci_run zk_inception external-node init --ignore-prerequisites --chain chain_validium + --db-name=zksync_en_localhost_era_validium1 --l1-rpc-url=http://localhost:8545 --chain validium + ci_run zk_inception external-node init --ignore-prerequisites --chain validium ci_run zk_inception external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - --db-name=zksync_en_localhost_era_custom_token --l1-rpc-url=http://localhost:8545 --chain chain_custom_token - ci_run zk_inception external-node init --ignore-prerequisites --chain chain_custom_token + --db-name=zksync_en_localhost_era_custom_token --l1-rpc-url=http://localhost:8545 --chain custom_token + ci_run zk_inception external-node init --ignore-prerequisites --chain custom_token - name: Run recovery tests (from snapshot) run: | - ci_run zk_supervisor test recovery --snapshot --no-deps --ignore-prerequisites --verbose --chain era &> recovery_snap_rollup.log & + + ci_run zk_supervisor test recovery --snapshot --no-deps --ignore-prerequisites --verbose --chain era &> ${{ env.SNAPSHOT_RECOVERY_LOGS_DIR }}/rollup.log & PID1=$! - ci_run zk_supervisor test recovery --snapshot --no-deps --ignore-prerequisites --verbose --chain chain_validium &> recovery_snap_validium.log & + ci_run zk_supervisor test recovery --snapshot --no-deps --ignore-prerequisites --verbose --chain validium &> ${{ env.SNAPSHOT_RECOVERY_LOGS_DIR }}//validium.log & PID2=$! - ci_run zk_supervisor test recovery --snapshot --no-deps --ignore-prerequisites --verbose --chain chain_custom_token &> recovery_snap_custom_token.log & + ci_run zk_supervisor test recovery --snapshot --no-deps --ignore-prerequisites --verbose --chain custom_token &> ${{ env.SNAPSHOT_RECOVERY_LOGS_DIR }}//custom_token.log & PID3=$! wait $PID1 @@ -159,13 +186,13 @@ jobs: - name: Run recovery tests (from genesis) run: | - ci_run zk_supervisor test recovery --no-deps --no-kill --ignore-prerequisites --verbose --chain era &> recovery_gen_rollup.log & + ci_run zk_supervisor test recovery --no-deps --no-kill --ignore-prerequisites --verbose --chain era &> ${{ env.GENESIS_RECOVERY_LOGS_DIR }}/rollup.log & PID1=$! - ci_run zk_supervisor test recovery --no-deps --no-kill --ignore-prerequisites --verbose --chain chain_validium &> recovery_gen_validium.log & + ci_run zk_supervisor test recovery --no-deps --no-kill --ignore-prerequisites --verbose --chain validium &> ${{ env.GENESIS_RECOVERY_LOGS_DIR }}/validium.log & PID2=$! - ci_run zk_supervisor test recovery --no-deps --no-kill --ignore-prerequisites --verbose --chain chain_custom_token &> recovery_gen_custom_token.log & + ci_run zk_supervisor test recovery --no-deps --no-kill --ignore-prerequisites --verbose --chain custom_token &> ${{ env.GENESIS_RECOVERY_LOGS_DIR }}/custom_token.log & PID3=$! wait $PID1 @@ -174,38 +201,38 @@ jobs: - name: Run external node server run: | - ci_run zk_inception external-node run --ignore-prerequisites --chain era &> external_node_rollup.log & - ci_run zk_inception external-node run --ignore-prerequisites --chain chain_validium &> external_node_validium.log & - ci_run zk_inception external-node run --ignore-prerequisites --chain chain_custom_token &> external_node_custom_token.log & + ci_run zk_inception external-node run --ignore-prerequisites --chain era &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/rollup.log & + ci_run zk_inception external-node run --ignore-prerequisites --chain validium &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/validium.log & + ci_run zk_inception external-node run --ignore-prerequisites --chain custom_token &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/custom_token.log & ci_run sleep 5 - name: Run integration tests en run: | - ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --external-node --chain era &> integration_en_rollup.log & + ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --external-node --chain era &> ${{ env.INTEGRATION_TESTS_EN_LOGS_DIR }}/rollup.log & PID1=$! - ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --external-node --chain chain_validium &> integration_en_validium.log & + ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --external-node --chain validium &> ${{ env.INTEGRATION_TESTS_EN_LOGS_DIR }}/validium.log & PID2=$! - ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --external-node --chain chain_custom_token &> integration_en_cusotm_token.log & + ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --external-node --chain custom_token &> ${{ env.INTEGRATION_TESTS_EN_LOGS_DIR }}/custom_token.log & PID3=$! wait $PID1 wait $PID2 wait $PID3 - - name: Run revert tests (external node) + - name: Run revert tests run: | - ci_run killall -INT zksync_server - ci_run killall -INT zksync_external_node + ci_run killall -INT zksync_server || true + ci_run killall -INT zksync_external_node || true - ci_run zk_supervisor test revert --no-deps --no-kill --ignore-prerequisites --chain chain_validium &> revert_validium.log & + ci_run zk_supervisor test revert --no-deps --external-node --no-kill --ignore-prerequisites --chain era &> ${{ env.REVERT_LOGS_DIR }}/rollup.log & PID1=$! - ci_run zk_supervisor test revert --no-deps --no-kill --external-node --ignore-prerequisites --chain era &> revert_rollup.log & + ci_run zk_supervisor test revert --no-deps --external-node --no-kill --ignore-prerequisites --chain validium &> ${{ env.REVERT_LOGS_DIR }}/validium.log & PID2=$! - ci_run zk_supervisor test revert --no-deps --no-kill --external-node --ignore-prerequisites --chain chain_custom_token &> revert_custom_token.log & + ci_run zk_supervisor test revert --no-deps --external-node --no-kill --ignore-prerequisites --chain custom_token &> ${{ env.REVERT_LOGS_DIR }}/custom_token.log & PID3=$! wait $PID1 @@ -213,107 +240,17 @@ jobs: wait $PID3 - # Upgrade tests should run last, because as soon as they + # Upgrade tests should run last, because as soon as they # finish the bootloader will be different # TODO make upgrade tests safe to run multiple times - name: Run upgrade test run: | ci_run zk_supervisor test upgrade --no-deps --chain era - - name: Show server_rollup.log logs - if: always() - run: ci_run cat server_rollup.log || true - - - name: Show server_validium.log logs - if: always() - run: ci_run cat server_validium.log || true - - - name: Show server_custom_token.log logs - if: always() - run: ci_run cat server_custom_token.log || true - - - name: Show external_node_rollup.log logs - if: always() - run: ci_run cat external_node_rollup.log || true - - - name: Show external_node_validium.log logs - if: always() - run: ci_run cat external_node_validium.log || true - - - name: Show external_node_custom_token.log logs - if: always() - run: ci_run cat external_node_custom_token.log || true - - - name: Show integration_rollup.log logs - if: always() - run: ci_run cat integration_rollup.log || true - - - name: Show integration_validium.log logs - if: always() - run: ci_run cat integration_validium.log || true - - - name: Show integration_custom_token.log logs - if: always() - run: ci_run cat integration_custom_token.log || true - - - name: Show recovery_snap_rollup.log logs - if: always() - run: ci_run cat recovery_snap_rollup.log || true - - - name: Show recovery_snap_validium.log logs - if: always() - run: ci_run cat recovery_snap_validium.log || true - - name: Show recovery_snap_custom_token.log logs + - name: Upload logs + uses: actions/upload-artifact@v4 if: always() - run: ci_run cat recovery_snap_custom_token.log || true - - - name: Show recovery_gen_rollup.log logs - if: always() - run: ci_run cat recovery_gen_rollup.log || true - - - name: Show recovery_gen_validium.log logs - if: always() - run: ci_run cat recovery_gen_validium.log || true - - - name: Show recovery_gen_custom_token.log logs - if: always() - run: ci_run cat recovery_gen_custom_token.log || true - - - name: Show integration_en_rollup.log logs - if: always() - run: ci_run cat integration_en_rollup.log || true - - - name: Show integration_en_validium.log logs - if: always() - run: ci_run cat integration_en_validium.log || true - - - name: Show integration_en_custom_token.log logs - if: always() - run: ci_run cat integration_en_custom_token.log || true - - - name: Show revert_rollup.log logs - if: always() - run: ci_run cat revert_rollup.log || true - - - name: Show revert_validium.log logs - if: always() - run: ci_run cat revert_validium.log || true - - - name: Show revert_custom_token.log logs - if: always() - run: ci_run cat revert_custom_token.log || true - - - name: Show revert_main.log logs - if: always() - run: | - ci_run cat core/tests/revert-test/era_revert_main.log || true - ci_run cat core/tests/revert-test/chain_validium_revert_main.log || true - ci_run cat core/tests/revert-test/chain_custom_token_revert_main.log || true - - - name: Show revert_ext.log logs - if: always() - run: | - ci_run cat core/tests/revert-test/era_revert_ext.log || true - ci_run cat core/tests/revert-test/chain_validium_revert_ext.log || true - ci_run cat core/tests/revert-test/chain_validium_custom_token_ext.log || true + with: + name: logs + path: logs diff --git a/core/tests/recovery-test/src/index.ts b/core/tests/recovery-test/src/index.ts index be74c010ed36..6599e7c5d298 100644 --- a/core/tests/recovery-test/src/index.ts +++ b/core/tests/recovery-test/src/index.ts @@ -11,6 +11,7 @@ import * as ethers from 'ethers'; import path from 'node:path'; import { expect } from 'chai'; import { runExternalNodeInBackground } from './utils'; +import { killPidWithAllChilds } from 'utils/build/kill'; export interface Health { readonly status: string; @@ -159,19 +160,7 @@ export class NodeProcess { signalNumber = 15; } try { - let childs = [this.childProcess.pid]; - while (true) { - try { - let child = childs.at(-1); - childs.push(+(await promisify(exec)(`pgrep -P ${child}`)).stdout); - } catch (e) { - break; - } - } - // We always run the test using additional tools, that means we have to kill not the main process, but the child process - for (let i = childs.length - 1; i >= 0; i--) { - await promisify(exec)(`kill -${signalNumber} ${childs[i]}`); - } + await killPidWithAllChilds(this.childProcess.pid!, signalNumber); } catch (err) { const typedErr = err as ChildProcessError; if (typedErr.code === 1) { @@ -190,7 +179,7 @@ export class NodeProcess { useZkInception?: boolean, chain?: string ) { - const logs = typeof logsFile === 'string' ? await fs.open(logsFile, 'w') : logsFile; + const logs = typeof logsFile === 'string' ? await fs.open(logsFile, 'a') : logsFile; let childProcess = runExternalNodeInBackground({ components: [components], diff --git a/core/tests/recovery-test/tests/genesis-recovery.test.ts b/core/tests/recovery-test/tests/genesis-recovery.test.ts index 2a9a8982204c..a43f5a9e92be 100644 --- a/core/tests/recovery-test/tests/genesis-recovery.test.ts +++ b/core/tests/recovery-test/tests/genesis-recovery.test.ts @@ -4,11 +4,18 @@ import { ethers } from 'ethers'; import { NodeProcess, dropNodeData, getExternalNodeHealth, NodeComponents, sleep, FundedWallet } from '../src'; import { loadConfig, shouldLoadConfigFromFile } from 'utils/build/file-configs'; + import path from 'path'; const pathToHome = path.join(__dirname, '../../../..'); const fileConfig = shouldLoadConfigFromFile(); +import { logsTestPath } from 'utils/build/logs'; + +async function logsPath(name: string): Promise { + return await logsTestPath(fileConfig.chain, 'logs/recovery/genesis', name); +} + /** * Tests recovery of an external node from scratch. * @@ -111,7 +118,7 @@ describe('genesis recovery', () => { step('initialize external node w/o a tree', async () => { externalNodeProcess = await NodeProcess.spawn( externalNodeEnv, - 'genesis-recovery.log', + await logsPath('external-node.log'), pathToHome, NodeComponents.WITH_TREE_FETCHER_AND_NO_TREE, fileConfig.loadFromFile, diff --git a/core/tests/recovery-test/tests/snapshot-recovery.test.ts b/core/tests/recovery-test/tests/snapshot-recovery.test.ts index b1b68db42bed..cadf146c5226 100644 --- a/core/tests/recovery-test/tests/snapshot-recovery.test.ts +++ b/core/tests/recovery-test/tests/snapshot-recovery.test.ts @@ -23,6 +23,7 @@ import { setTreeRecoveryParallelPersistenceBuffer } from './utils'; import { loadConfig, shouldLoadConfigFromFile } from 'utils/build/file-configs'; +import { logsTestPath } from 'utils/build/logs'; const pathToHome = path.join(__dirname, '../../../..'); const fileConfig = shouldLoadConfigFromFile(); @@ -59,6 +60,10 @@ interface TokenInfo { readonly l2_address: string; } +async function logsPath(name: string): Promise { + return await logsTestPath(fileConfig.chain, 'logs/recovery/snapshot/', name); +} + /** * Tests snapshot recovery and node state pruning. * @@ -240,7 +245,7 @@ describe('snapshot recovery', () => { step('initialize external node', async () => { externalNodeProcess = await NodeProcess.spawn( externalNodeEnv, - 'snapshot-recovery.log', + await logsPath('external_node.log'), pathToHome, NodeComponents.STANDARD, fileConfig.loadFromFile, diff --git a/core/tests/revert-test/tests/revert-and-restart-en.test.ts b/core/tests/revert-test/tests/revert-and-restart-en.test.ts index bd5dca6d270b..e1694418db14 100644 --- a/core/tests/revert-test/tests/revert-and-restart-en.test.ts +++ b/core/tests/revert-test/tests/revert-and-restart-en.test.ts @@ -20,6 +20,8 @@ import { } from 'utils/build/file-configs'; import path from 'path'; import { ChildProcessWithoutNullStreams } from 'child_process'; +import { logsTestPath } from 'utils/build/logs'; +import { killPidWithAllChilds } from 'utils/build/kill'; const pathToHome = path.join(__dirname, '../../../..'); const fileConfig = shouldLoadConfigFromFile(); @@ -50,8 +52,10 @@ if (deploymentMode == 'Validium') { mainEnv = process.env.IN_DOCKER ? 'docker' : 'dev'; extEnv = process.env.IN_DOCKER ? 'ext-node-docker' : 'ext-node'; } -const mainLogsPath: string = 'revert_main.log'; -const extLogsPath: string = 'revert_ext.log'; + +async function logsPath(name: string): Promise { + return await logsTestPath(fileConfig.chain, 'logs/revert/en', name); +} interface SuggestedValues { lastExecutedL1BatchNumber: bigint; @@ -154,15 +158,7 @@ class MainNode { public async terminate() { try { - let child = this.proc.pid; - while (true) { - try { - child = +(await utils.exec(`pgrep -P ${child}`)).stdout; - } catch (e) { - break; - } - } - await utils.exec(`kill -9 ${child}`); + await killPidWithAllChilds(this.proc.pid!, 9); } catch (err) { console.log(`ignored error: ${err}`); } @@ -239,15 +235,7 @@ class ExtNode { public async terminate() { try { - let child = this.proc.pid; - while (true) { - try { - child = +(await utils.exec(`pgrep -P ${child}`)).stdout; - } catch (e) { - break; - } - } - await utils.exec(`kill -9 ${child}`); + await killPidWithAllChilds(this.proc.pid!, 9); } catch (err) { console.log(`ignored error: ${err}`); } @@ -347,8 +335,6 @@ describe('Block reverting test', function () { baseTokenAddress = contractsConfig.l1.base_token_addr; enEthClientUrl = externalNodeGeneralConfig.api.web3_json_rpc.http_url; operatorAddress = walletsConfig.operator.address; - mainLogs = fs.createWriteStream(`${fileConfig.chain}_${mainLogsPath}`, { flags: 'a' }); - extLogs = fs.createWriteStream(`${fileConfig.chain}_${extLogsPath}`, { flags: 'a' }); } else { let env = fetchEnv(mainEnv); ethClientWeb3Url = env.ETH_CLIENT_WEB3_URL; @@ -357,10 +343,9 @@ describe('Block reverting test', function () { enEthClientUrl = `http://127.0.0.1:${env.EN_HTTP_PORT}`; // TODO use env variable for this? operatorAddress = '0xde03a0B5963f75f1C8485B355fF6D30f3093BDE7'; - mainLogs = fs.createWriteStream(mainLogsPath, { flags: 'a' }); - extLogs = fs.createWriteStream(extLogsPath, { flags: 'a' }); } - + mainLogs = fs.createWriteStream(await logsPath('server.log'), { flags: 'a' }); + extLogs = fs.createWriteStream(await logsPath('external_node.log'), { flags: 'a' }); if (process.env.SKIP_COMPILATION !== 'true' && !fileConfig.loadFromFile) { compileBinaries(); } @@ -421,7 +406,6 @@ describe('Block reverting test', function () { } console.log('Restart the main node with L1 batch execution disabled.'); - await mainNode.terminate(); await killServerAndWaitForShutdown(mainNode); mainNode = await MainNode.spawn( mainLogs, diff --git a/core/tests/revert-test/tests/revert-and-restart.test.ts b/core/tests/revert-test/tests/revert-and-restart.test.ts index 17669bca4f13..a01788284d2a 100644 --- a/core/tests/revert-test/tests/revert-and-restart.test.ts +++ b/core/tests/revert-test/tests/revert-and-restart.test.ts @@ -5,12 +5,12 @@ import { Tester } from './tester'; import * as zksync from 'zksync-ethers'; import * as ethers from 'ethers'; import { expect } from 'chai'; -import fs from 'fs'; import { IZkSyncHyperchain } from 'zksync-ethers/build/typechain'; import path from 'path'; import { ChildProcessWithoutNullStreams } from 'child_process'; - -const fileConfig = shouldLoadConfigFromFile(); +import fs from 'node:fs/promises'; +import { logsTestPath } from 'utils/build/logs'; +import { killPidWithAllChilds } from 'utils/build/kill'; // Parses output of "print-suggested-values" command of the revert block tool. function parseSuggestedValues(suggestedValuesString: string): { @@ -48,16 +48,7 @@ async function killServerAndWaitForShutdown(tester: Tester, serverProcess?: Chil await utils.exec('killall -9 zksync_server').catch(ignoreError); return; } - - let child = serverProcess.pid; - while (true) { - try { - child = +(await utils.exec(`pgrep -P ${child}`)).stdout; - } catch (e) { - break; - } - } - await utils.exec(`kill -9 ${child}`); + await killPidWithAllChilds(serverProcess.pid!, 9).catch(ignoreError); // Wait until it's really stopped. let iter = 0; while (iter < 30) { @@ -79,21 +70,24 @@ function ignoreError(_err: any, context?: string) { console.info(message); } +const fileConfig = shouldLoadConfigFromFile(); const depositAmount = ethers.parseEther('0.001'); +async function logsPath(name: string): Promise { + return await logsTestPath(fileConfig.chain, 'logs/revert/', name); +} + describe('Block reverting test', function () { let tester: Tester; let alice: zksync.Wallet; let mainContract: IZkSyncHyperchain; let blocksCommittedBeforeRevert: bigint; - let logs: fs.WriteStream; + let logs: fs.FileHandle; let operatorAddress: string; let ethClientWeb3Url: string; let apiWeb3JsonRpcHttpUrl: string; let serverProcess: ChildProcessWithoutNullStreams | undefined; - const pathToHome = path.join(__dirname, '../../../..'); - const autoKill: boolean = !fileConfig.loadFromFile || !process.env.NO_KILL; const enableConsensus = process.env.ENABLE_CONSENSUS == 'true'; let components = 'api,tree,eth,state_keeper,commitment_generator,da_dispatcher,vm_runner_protective_reads'; @@ -141,7 +135,6 @@ describe('Block reverting test', function () { // Create test wallets tester = await Tester.init(ethClientWeb3Url, apiWeb3JsonRpcHttpUrl, baseTokenAddress); alice = tester.emptyWallet(); - logs = fs.createWriteStream(`revert_${fileConfig.chain}.log`, { flags: 'a' }); }); step('run server and execute some transactions', async () => { @@ -151,6 +144,7 @@ describe('Block reverting test', function () { } // Run server in background. + logs = await fs.open(await logsPath('server.log'), 'a'); serverProcess = runServerInBackground({ components: [components], stdio: ['ignore', logs, logs], diff --git a/core/tests/ts-integration/jest.config.json b/core/tests/ts-integration/jest.config.json index cf23d389d0ec..8fa5ea1eb721 100644 --- a/core/tests/ts-integration/jest.config.json +++ b/core/tests/ts-integration/jest.config.json @@ -1,21 +1,21 @@ { - "maxWorkers": "70%", - "reporters": [ - "default", - "github-actions" - ], - "transform": { - "^.+\\.ts?$": "ts-jest" - }, - "//": "!!! Do not increase the test timeout blindly!!!", - "//": "Timeout is set to match ~4 L1 operations with 10 blocks confirmation", - "//": "If you need bigger timeout, consider either disabling the test outside of fast mode or increasing timeout on a single test", - "//": "If this value would be too big, it may cause tests on stage to get stuck for too long", - "testTimeout": 605000, - "globalSetup": "/src/jest-setup/global-setup.ts", - "globalTeardown": "/src/jest-setup/global-teardown.ts", - "setupFilesAfterEnv": [ - "/src/jest-setup/add-matchers.ts" - ], - "slowTestThreshold": 120 + "maxWorkers": "70%", + "reporters": [ + "default", + "github-actions" + ], + "transform": { + "^.+\\.ts?$": "ts-jest" + }, + "//": "!!! Do not increase the test timeout blindly!!!", + "//": "Timeout is set to match ~4 L1 operations with 10 blocks confirmation", + "//": "If you need bigger timeout, consider either disabling the test outside of fast mode or increasing timeout on a single test", + "//": "If this value would be too big, it may cause tests on stage to get stuck for too long", + "testTimeout": 605000, + "globalSetup": "/src/jest-setup/global-setup.ts", + "globalTeardown": "/src/jest-setup/global-teardown.ts", + "setupFilesAfterEnv": [ + "/src/jest-setup/add-matchers.ts" + ], + "slowTestThreshold": 120 } diff --git a/core/tests/ts-integration/package.json b/core/tests/ts-integration/package.json index 3f92cecb4a53..0e9b863d8e16 100644 --- a/core/tests/ts-integration/package.json +++ b/core/tests/ts-integration/package.json @@ -4,7 +4,7 @@ "license": "MIT", "private": true, "scripts": { - "test": "zk f jest --detectOpenHandles --verbose --testTimeout 60000", + "test": "zk f jest --forceExit --verbose --testTimeout 120000", "long-running-test": "zk f jest", "fee-test": "RUN_FEE_TEST=1 zk f jest -- fees.test.ts", "api-test": "zk f jest -- api/web3.test.ts api/debug.test.ts", diff --git a/core/tests/ts-integration/tests/base-token.test.ts b/core/tests/ts-integration/tests/base-token.test.ts index adb32def5b07..70df1dff9282 100644 --- a/core/tests/ts-integration/tests/base-token.test.ts +++ b/core/tests/ts-integration/tests/base-token.test.ts @@ -9,6 +9,9 @@ import * as zksync from 'zksync-ethers'; import * as ethers from 'ethers'; import { scaledGasPrice } from '../src/helpers'; +const SECONDS = 1000; +jest.setTimeout(100 * SECONDS); + describe('base ERC20 contract checks', () => { let testMaster: TestMaster; let alice: zksync.Wallet; diff --git a/core/tests/ts-integration/tests/contracts.test.ts b/core/tests/ts-integration/tests/contracts.test.ts index 3b2347244b50..cb1bec35b51b 100644 --- a/core/tests/ts-integration/tests/contracts.test.ts +++ b/core/tests/ts-integration/tests/contracts.test.ts @@ -15,6 +15,9 @@ import * as zksync from 'zksync-ethers'; import * as elliptic from 'elliptic'; import { RetryProvider } from '../src/retry-provider'; +const SECONDS = 1000; +jest.setTimeout(300 * SECONDS); + // TODO: Leave only important ones. const contracts = { counter: getTestContract('Counter'), @@ -35,8 +38,7 @@ describe('Smart contract behavior checks', () => { // Contracts shared in several tests. let counterContract: zksync.Contract; - // TODO: fix error and uncomment - // let expensiveContract: zksync.Contract; + let expensiveContract: zksync.Contract; beforeAll(() => { testMaster = TestMaster.getInstance(__filename); @@ -72,25 +74,23 @@ describe('Smart contract behavior checks', () => { await expect(contract.getFooName()).resolves.toBe('Foo'); }); - // TODO: fix and uncomment - // - // test('Should perform "expensive" contract calls', async () => { - // expensiveContract = await deployContract(alice, contracts.expensive, []); - // // Check that the transaction that is too expensive would be rejected by the API server. - // await expect(expensiveContract.expensive(15000)).toBeRejected(); - // }); - // - // test('Should perform underpriced "expensive" contract calls', async () => { - // // Check that processable transaction may fail with "out of gas" error. - // // To do so, we estimate gas for arg "1" and supply it to arg "20". - // // This guarantees that transaction won't fail during verification. - // const lowGasLimit = await expensiveContract.expensive.estimateGas(1); - // await expect( - // expensiveContract.expensive(20, { - // gasLimit: lowGasLimit - // }) - // ).toBeReverted(); - // }); + test('Should perform "expensive" contract calls', async () => { + expensiveContract = await deployContract(alice, contracts.expensive, []); + // Check that the transaction that is too expensive would be rejected by the API server. + await expect(expensiveContract.expensive(15000)).toBeRejected(); + }); + + test('Should perform underpriced "expensive" contract calls', async () => { + // Check that processable transaction may fail with "out of gas" error. + // To do so, we estimate gas for arg "1" and supply it to arg "20". + // This guarantees that transaction won't fail during verification. + const lowGasLimit = await expensiveContract.expensive.estimateGas(1); + await expect( + expensiveContract.expensive(20, { + gasLimit: lowGasLimit + }) + ).toBeReverted(); + }); test('Should fail an infinite loop transaction', async () => { if (testMaster.isFastMode()) { diff --git a/etc/utils/src/kill.ts b/etc/utils/src/kill.ts new file mode 100644 index 000000000000..7fdab85afadd --- /dev/null +++ b/etc/utils/src/kill.ts @@ -0,0 +1,19 @@ +import { promisify } from 'node:util'; +import { exec } from 'node:child_process'; + +export async function killPidWithAllChilds(pid: number, signalNumber: number) { + let childs = [pid]; + while (true) { + try { + let child = childs.at(-1); + childs.push(+(await promisify(exec)(`pgrep -P ${child}`)).stdout); + } catch (e) { + break; + } + } + // We always run the test using additional tools, that means we have to kill not the main process, but the child process + for (let i = childs.length - 1; i >= 0; i--) { + console.log(`kill ${childs[i]}`); + await promisify(exec)(`kill -${signalNumber} ${childs[i]}`); + } +} diff --git a/etc/utils/src/logs.ts b/etc/utils/src/logs.ts new file mode 100644 index 000000000000..cdb26f5ad1b7 --- /dev/null +++ b/etc/utils/src/logs.ts @@ -0,0 +1,11 @@ +import path from 'path'; +import fs from 'node:fs/promises'; + +const pathToHome = path.join(__dirname, '../../../..'); + +export async function logsTestPath(chain: string | undefined, relativePath: string, name: string): Promise { + chain = chain ? chain! : 'default'; + let dir = path.join(pathToHome, relativePath, chain); + await fs.mkdir(dir, { recursive: true }); + return path.join(dir, name); +} diff --git a/etc/utils/src/server.ts b/etc/utils/src/server.ts deleted file mode 100644 index 94184f0db9b6..000000000000 --- a/etc/utils/src/server.ts +++ /dev/null @@ -1,23 +0,0 @@ -import { background } from '.'; - -// TODO: change to use `zk_inception` once migration is complete -const BASE_COMMAND = 'zk_inception server'; -const BASE_COMMAND_WITH_ZK = 'zk server'; - -export function runServerInBackground({ - components, - stdio, - cwd, - useZkInception -}: { - components?: string[]; - stdio: any; - cwd?: Parameters[0]['cwd']; - useZkInception?: boolean; -}) { - let command = useZkInception ? BASE_COMMAND : BASE_COMMAND_WITH_ZK; - if (components && components.length > 0) { - command += ` --components=${components.join(',')}`; - } - background({ command, stdio, cwd }); -} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/integration.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/integration.rs index 8c22fb411f8c..e1ec932ca7f9 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/integration.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/integration.rs @@ -39,7 +39,7 @@ pub async fn run(shell: &Shell, args: IntegrationArgs) -> anyhow::Result<()> { .init_test_wallet(&ecosystem_config, &chain_config) .await?; - let mut command = cmd!(shell, "yarn jest --detectOpenHandles --testTimeout 120000") + let mut command = cmd!(shell, "yarn jest --forceExit --testTimeout 120000") .env("CHAIN_NAME", ecosystem_config.current_chain()) .env("MASTER_WALLET_PK", wallets.get_test_pk(&chain_config)?); From 4fdc80636437090f6ebcfa4e2f1eb50edf53631a Mon Sep 17 00:00:00 2001 From: Thomas Knauth Date: Mon, 2 Sep 2024 17:56:16 +0200 Subject: [PATCH 003/116] fix(tee-prover): increase retries to reduce spurious alerts (#2776) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Increase retries. ## Why ❔ An alert fires when we hit maximum number of retries. Retries happen, for example, when a component restarts. Those restarts are transient and harmless. By increasing the number of retries, we reduce the number of false/spurious alerts. ## Checklist --- core/lib/dal/src/tee_verifier_input_producer_dal.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/core/lib/dal/src/tee_verifier_input_producer_dal.rs b/core/lib/dal/src/tee_verifier_input_producer_dal.rs index 4adee62e7aa6..4a178fd52253 100644 --- a/core/lib/dal/src/tee_verifier_input_producer_dal.rs +++ b/core/lib/dal/src/tee_verifier_input_producer_dal.rs @@ -17,7 +17,7 @@ pub struct TeeVerifierInputProducerDal<'a, 'c> { } /// The amount of attempts to process a job before giving up. -pub const JOB_MAX_ATTEMPT: i16 = 2; +pub const JOB_MAX_ATTEMPT: i16 = 5; /// Time to wait for job to be processed const JOB_PROCESSING_TIMEOUT: PgInterval = pg_interval_from_duration(Duration::from_secs(10 * 60)); From 178b38644f507c5f6d12ba862d0c699e87985dd7 Mon Sep 17 00:00:00 2001 From: Patrick Date: Mon, 2 Sep 2024 18:05:27 +0200 Subject: [PATCH 004/116] fix(tee-prover): mitigate panic on redeployments (#2764) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ We experienced `tee-prover` panic, likely due to the automatic redeployment of the `proof-data-handler` in the `staging` environment. We've been getting `503 Service Unavailable` errors for an extended period when trying to reach http://server-v2-proof-data-handler-internal.stage.matterlabs.corp/tee/proof_input, which resulted in a panic after reaching the retry limit. Relevant code causing the panic: https://github.com/matter-labs/zksync-era/blob/8ed086afecfcad30bfda44fc4d29a00beea71cca/core/bin/zksync_tee_prover/src/tee_prover.rs#L201-L203 [Relevant logs](https://grafana.matterlabs.dev/explore?schemaVersion=1&panes=%7B%223ss%22:%7B%22datasource%22:%22cduazndivuosga%22,%22queries%22:%5B%7B%22metrics%22:%5B%7B%22id%22:%221%22,%22type%22:%22logs%22%7D%5D,%22query%22:%22container_name:%5C%22zksync-tee-prover%5C%22%22,%22refId%22:%22A%22,%22datasource%22:%7B%22type%22:%22quickwit-quickwit-datasource%22,%22uid%22:%22cduazndivuosga%22%7D,%22alias%22:%22%22,%22bucketAggs%22:%5B%7B%22type%22:%22date_histogram%22,%22id%22:%222%22,%22settings%22:%7B%22interval%22:%22auto%22%7D,%22field%22:%22%22%7D%5D,%22timeField%22:%22%22%7D%5D,%22range%22:%7B%22from%22:%221724854712742%22,%22to%22:%221724855017388%22%7D%7D%7D&orgId=1). ## Why ❔ To mitigate panics on `proof-data-handler` redeployments. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- Cargo.lock | 2 + core/bin/zksync_tee_prover/Cargo.toml | 3 +- core/bin/zksync_tee_prover/src/config.rs | 35 ++++--- core/bin/zksync_tee_prover/src/main.rs | 9 +- core/bin/zksync_tee_prover/src/tee_prover.rs | 99 ++++++-------------- etc/nix/container-tee_prover.nix | 6 +- 6 files changed, 61 insertions(+), 93 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0350028da7d1..f1dc1a5d3a37 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5520,6 +5520,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25996b82292a7a57ed3508f052cfff8640d38d32018784acd714758b43da9c8f" dependencies = [ "secp256k1-sys", + "serde", ] [[package]] @@ -9620,6 +9621,7 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", + "envy", "reqwest 0.12.5", "secp256k1", "serde", diff --git a/core/bin/zksync_tee_prover/Cargo.toml b/core/bin/zksync_tee_prover/Cargo.toml index 0c89971fd305..85908eebeaaa 100644 --- a/core/bin/zksync_tee_prover/Cargo.toml +++ b/core/bin/zksync_tee_prover/Cargo.toml @@ -14,8 +14,9 @@ publish = false [dependencies] anyhow.workspace = true async-trait.workspace = true +envy.workspace = true reqwest.workspace = true -secp256k1.workspace = true +secp256k1 = { workspace = true, features = ["serde"] } serde = { workspace = true, features = ["derive"] } thiserror.workspace = true tokio = { workspace = true, features = ["full"] } diff --git a/core/bin/zksync_tee_prover/src/config.rs b/core/bin/zksync_tee_prover/src/config.rs index 2a77c3752180..5b009e33f25e 100644 --- a/core/bin/zksync_tee_prover/src/config.rs +++ b/core/bin/zksync_tee_prover/src/config.rs @@ -1,12 +1,13 @@ -use std::path::PathBuf; +use std::{path::PathBuf, time::Duration}; use secp256k1::SecretKey; +use serde::Deserialize; use url::Url; use zksync_env_config::FromEnv; use zksync_types::tee_types::TeeType; /// Configuration for the TEE prover. -#[derive(Debug)] +#[derive(Debug, Clone, Deserialize)] pub(crate) struct TeeProverConfig { /// The private key used to sign the proofs. pub signing_key: SecretKey, @@ -16,6 +17,16 @@ pub(crate) struct TeeProverConfig { pub tee_type: TeeType, /// TEE proof data handler API. pub api_url: Url, + /// Number of retries for retriable errors before giving up on recovery (i.e., returning an error + /// from [`Self::run()`]). + pub max_retries: usize, + /// Initial back-off interval when retrying recovery on a retriable error. Each subsequent retry interval + /// will be multiplied by [`Self.retry_backoff_multiplier`]. + pub initial_retry_backoff: Duration, + /// Multiplier for the back-off interval when retrying recovery on a retriable error. + pub retry_backoff_multiplier: f32, + /// Maximum back-off interval when retrying recovery on a retriable error. + pub max_backoff: Duration, } impl FromEnv for TeeProverConfig { @@ -23,17 +34,17 @@ impl FromEnv for TeeProverConfig { /// /// Example usage of environment variables for tests: /// ``` - /// export TEE_SIGNING_KEY="b50b38c8d396c88728fc032ece558ebda96907a0b1a9340289715eef7bf29deb" - /// export TEE_QUOTE_FILE="/tmp/test" # run `echo test > /tmp/test` beforehand - /// export TEE_TYPE="sgx" - /// export TEE_API_URL="http://127.0.0.1:3320" + /// export TEE_PROVER_SIGNING_KEY="b50b38c8d396c88728fc032ece558ebda96907a0b1a9340289715eef7bf29deb" + /// export TEE_PROVER_ATTESTATION_QUOTE_FILE_PATH="/tmp/test" # run `echo test > /tmp/test` beforehand + /// export TEE_PROVER_TEE_TYPE="sgx" + /// export TEE_PROVER_API_URL="http://127.0.0.1:3320" + /// export TEE_PROVER_MAX_RETRIES=10 + /// export TEE_PROVER_INITIAL_RETRY_BACKOFF=1 + /// export TEE_PROVER_RETRY_BACKOFF_MULTIPLIER=2.0 + /// export TEE_PROVER_MAX_BACKOFF=128 /// ``` fn from_env() -> anyhow::Result { - Ok(Self { - signing_key: std::env::var("TEE_SIGNING_KEY")?.parse()?, - attestation_quote_file_path: std::env::var("TEE_QUOTE_FILE")?.parse()?, - tee_type: std::env::var("TEE_TYPE")?.parse()?, - api_url: std::env::var("TEE_API_URL")?.parse()?, - }) + let config: Self = envy::prefixed("TEE_PROVER_").from_env()?; + Ok(config) } } diff --git a/core/bin/zksync_tee_prover/src/main.rs b/core/bin/zksync_tee_prover/src/main.rs index 41f3be2ea052..70c6f888185a 100644 --- a/core/bin/zksync_tee_prover/src/main.rs +++ b/core/bin/zksync_tee_prover/src/main.rs @@ -32,8 +32,6 @@ fn main() -> anyhow::Result<()> { ObservabilityConfig::from_env().context("ObservabilityConfig::from_env()")?; let tee_prover_config = TeeProverConfig::from_env()?; - let attestation_quote_bytes = std::fs::read(tee_prover_config.attestation_quote_file_path)?; - let prometheus_config = PrometheusConfig::from_env()?; let mut builder = ZkStackServiceBuilder::new()?; @@ -45,12 +43,7 @@ fn main() -> anyhow::Result<()> { builder .add_layer(SigintHandlerLayer) - .add_layer(TeeProverLayer::new( - tee_prover_config.api_url, - tee_prover_config.signing_key, - attestation_quote_bytes, - tee_prover_config.tee_type, - )); + .add_layer(TeeProverLayer::new(tee_prover_config)); if let Some(gateway) = prometheus_config.gateway_endpoint() { let exporter_config = diff --git a/core/bin/zksync_tee_prover/src/tee_prover.rs b/core/bin/zksync_tee_prover/src/tee_prover.rs index 7f874533b4b3..3d227118e57f 100644 --- a/core/bin/zksync_tee_prover/src/tee_prover.rs +++ b/core/bin/zksync_tee_prover/src/tee_prover.rs @@ -1,7 +1,6 @@ -use std::{fmt, time::Duration}; +use std::fmt; -use secp256k1::{ecdsa::Signature, Message, PublicKey, Secp256k1, SecretKey}; -use url::Url; +use secp256k1::{ecdsa::Signature, Message, PublicKey, Secp256k1}; use zksync_basic_types::H256; use zksync_node_framework::{ service::StopReceiver, @@ -11,32 +10,21 @@ use zksync_node_framework::{ }; use zksync_prover_interface::inputs::TeeVerifierInput; use zksync_tee_verifier::Verify; -use zksync_types::{tee_types::TeeType, L1BatchNumber}; +use zksync_types::L1BatchNumber; -use crate::{api_client::TeeApiClient, error::TeeProverError, metrics::METRICS}; +use crate::{ + api_client::TeeApiClient, config::TeeProverConfig, error::TeeProverError, metrics::METRICS, +}; /// Wiring layer for `TeeProver` #[derive(Debug)] pub(crate) struct TeeProverLayer { - api_url: Url, - signing_key: SecretKey, - attestation_quote_bytes: Vec, - tee_type: TeeType, + config: TeeProverConfig, } impl TeeProverLayer { - pub fn new( - api_url: Url, - signing_key: SecretKey, - attestation_quote_bytes: Vec, - tee_type: TeeType, - ) -> Self { - Self { - api_url, - signing_key, - attestation_quote_bytes, - tee_type, - } + pub fn new(config: TeeProverConfig) -> Self { + Self { config } } } @@ -56,13 +44,10 @@ impl WiringLayer for TeeProverLayer { } async fn wire(self, _input: Self::Input) -> Result { + let api_url = self.config.api_url.clone(); let tee_prover = TeeProver { - config: Default::default(), - signing_key: self.signing_key, - public_key: self.signing_key.public_key(&Secp256k1::new()), - attestation_quote_bytes: self.attestation_quote_bytes, - tee_type: self.tee_type, - api_client: TeeApiClient::new(self.api_url), + config: self.config, + api_client: TeeApiClient::new(api_url), }; Ok(LayerOutput { tee_prover }) } @@ -70,10 +55,6 @@ impl WiringLayer for TeeProverLayer { pub(crate) struct TeeProver { config: TeeProverConfig, - signing_key: SecretKey, - public_key: PublicKey, - attestation_quote_bytes: Vec, - tee_type: TeeType, api_client: TeeApiClient, } @@ -81,9 +62,6 @@ impl fmt::Debug for TeeProver { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("TeeProver") .field("config", &self.config) - .field("public_key", &self.public_key) - .field("attestation_quote_bytes", &self.attestation_quote_bytes) - .field("tee_type", &self.tee_type) .finish() } } @@ -101,7 +79,7 @@ impl TeeProver { let batch_number = verification_result.batch_number; let msg_to_sign = Message::from_slice(root_hash_bytes) .map_err(|e| TeeProverError::Verification(e.into()))?; - let signature = self.signing_key.sign_ecdsa(msg_to_sign); + let signature = self.config.signing_key.sign_ecdsa(msg_to_sign); observer.observe(); Ok((signature, batch_number, verification_result.value_hash)) } @@ -111,17 +89,17 @@ impl TeeProver { } } - async fn step(&self) -> Result, TeeProverError> { - match self.api_client.get_job(self.tee_type).await? { + async fn step(&self, public_key: &PublicKey) -> Result, TeeProverError> { + match self.api_client.get_job(self.config.tee_type).await? { Some(job) => { let (signature, batch_number, root_hash) = self.verify(*job)?; self.api_client .submit_proof( batch_number, signature, - &self.public_key, + public_key, root_hash, - self.tee_type, + self.config.tee_type, ) .await?; Ok(Some(batch_number)) @@ -134,30 +112,6 @@ impl TeeProver { } } -/// TEE prover configuration options. -#[derive(Debug, Clone)] -pub struct TeeProverConfig { - /// Number of retries for retriable errors before giving up on recovery (i.e., returning an error - /// from [`Self::run()`]). - pub max_retries: usize, - /// Initial back-off interval when retrying recovery on a retriable error. Each subsequent retry interval - /// will be multiplied by [`Self.retry_backoff_multiplier`]. - pub initial_retry_backoff: Duration, - pub retry_backoff_multiplier: f32, - pub max_backoff: Duration, -} - -impl Default for TeeProverConfig { - fn default() -> Self { - Self { - max_retries: 5, - initial_retry_backoff: Duration::from_secs(1), - retry_backoff_multiplier: 2.0, - max_backoff: Duration::from_secs(128), - } - } -} - #[async_trait::async_trait] impl Task for TeeProver { fn id(&self) -> TaskId { @@ -167,12 +121,15 @@ impl Task for TeeProver { async fn run(self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { tracing::info!("Starting the task {}", self.id()); + let config = &self.config; + let attestation_quote_bytes = std::fs::read(&config.attestation_quote_file_path)?; + let public_key = config.signing_key.public_key(&Secp256k1::new()); self.api_client - .register_attestation(self.attestation_quote_bytes.clone(), &self.public_key) + .register_attestation(attestation_quote_bytes, &public_key) .await?; let mut retries = 1; - let mut backoff = self.config.initial_retry_backoff; + let mut backoff = config.initial_retry_backoff; let mut observer = METRICS.job_waiting_time.start(); loop { @@ -180,11 +137,11 @@ impl Task for TeeProver { tracing::info!("Stop signal received, shutting down TEE Prover component"); return Ok(()); } - let result = self.step().await; + let result = self.step(&public_key).await; let need_to_sleep = match result { Ok(batch_number) => { retries = 1; - backoff = self.config.initial_retry_backoff; + backoff = config.initial_retry_backoff; if let Some(batch_number) = batch_number { observer.observe(); observer = METRICS.job_waiting_time.start(); @@ -198,14 +155,14 @@ impl Task for TeeProver { } Err(err) => { METRICS.network_errors_counter.inc_by(1); - if !err.is_retriable() || retries > self.config.max_retries { + if !err.is_retriable() || retries > config.max_retries { return Err(err.into()); } - tracing::warn!(%err, "Failed TEE prover step function {retries}/{}, retrying in {} milliseconds.", self.config.max_retries, backoff.as_millis()); + tracing::warn!(%err, "Failed TEE prover step function {retries}/{}, retrying in {} milliseconds.", config.max_retries, backoff.as_millis()); retries += 1; backoff = std::cmp::min( - backoff.mul_f32(self.config.retry_backoff_multiplier), - self.config.max_backoff, + backoff.mul_f32(config.retry_backoff_multiplier), + config.max_backoff, ); true } diff --git a/etc/nix/container-tee_prover.nix b/etc/nix/container-tee_prover.nix index 303c91b137cb..a4128e008693 100644 --- a/etc/nix/container-tee_prover.nix +++ b/etc/nix/container-tee_prover.nix @@ -28,7 +28,11 @@ nixsgxLib.mkSGXContainer { log_level = "error"; env = { - TEE_API_URL.passthrough = true; + TEE_PROVER_API_URL.passthrough = true; + TEE_PROVER_MAX_RETRIES.passthrough = true; + TEE_PROVER_INITIAL_RETRY_BACKOFF_SECONDS.passthrough = true; + TEE_PROVER_RETRY_BACKOFF_MULTIPLIER.passthrough = true; + TEE_PROVER_MAX_BACKOFF_SECONDS.passthrough = true; API_PROMETHEUS_LISTENER_PORT.passthrough = true; API_PROMETHEUS_PUSHGATEWAY_URL.passthrough = true; API_PROMETHEUS_PUSH_INTERVAL_MS.passthrough = true; From 1e768d402012f6c7ce83fdd46c55f830ec31416a Mon Sep 17 00:00:00 2001 From: Danil Date: Mon, 2 Sep 2024 19:53:15 +0200 Subject: [PATCH 005/116] fix(config): Do not panic for observability config (#2639) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. Signed-off-by: Danil --- core/lib/protobuf_config/src/observability.rs | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/core/lib/protobuf_config/src/observability.rs b/core/lib/protobuf_config/src/observability.rs index dcf87771b587..9a6c31f9223c 100644 --- a/core/lib/protobuf_config/src/observability.rs +++ b/core/lib/protobuf_config/src/observability.rs @@ -30,11 +30,7 @@ impl ProtoRepr for proto::Observability { sentry_url, sentry_environment, log_format: required(&self.log_format).context("log_format")?.clone(), - opentelemetry: self - .opentelemetry - .as_ref() - .map(|cfg| cfg.read().context("opentelemetry")) - .transpose()?, + opentelemetry: self.opentelemetry.as_ref().and_then(|cfg| cfg.read().ok()), log_directives: self.log_directives.clone(), }) } From 62e4d4619dde9d6bd9102f1410eea75b0e2051c5 Mon Sep 17 00:00:00 2001 From: Igor Aleksanov Date: Tue, 3 Sep 2024 10:39:17 +0400 Subject: [PATCH 006/116] feat: Move prover data to /home/popzxc/workspace/current/zksync-era/prover/data (#2778) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ⚠️ this change is non-destructive globally (e.g. no changes in deployments/configs are needed), but for local deployment if you have setup keys locally, you need to move them to the new folder: ``` cd $ZKSYNC_HOME mv prover/crates/bin/vk_setup_data_generator_server_fri/data/setup* prover/data/keys rmdir prover/crates/bin/vk_setup_data_generator_server_fri/data ``` ## What - Moves prover data (`data`/`historical_data`) from `prover/crates/bin/vk_setup_data_generator_server_fri` to `prover/data` - Updates all the relevant paths - Adds some minimal documentation to `prover/data/README.md` ## Why - More intuitive - Simplifies refactoring of the workspace: no need to worry about layout of data when renaming crates/changing structure/etc. --- .dockerignore | 2 +- .gitignore | 1 + core/lib/env_config/src/fri_prover.rs | 4 +-- docker/proof-fri-gpu-compressor/Dockerfile | 2 +- docker/prover-fri-gateway/Dockerfile | 2 +- docker/prover-gpu-fri-gar/Dockerfile | 2 +- docker/prover-gpu-fri/Dockerfile | 2 +- docker/witness-generator/Dockerfile | 2 +- docker/witness-vector-generator/Dockerfile | 2 +- docs/guides/advanced/15_prover_keys.md | 4 +-- etc/env/base/fri_prover.toml | 2 +- etc/env/file_based/general.yaml | 2 +- infrastructure/zk/src/prover_setup.ts | 8 +++--- .../src/keystore.rs | 6 ++--- .../src/utils.rs | 2 +- prover/data/README.md | 23 ++++++++++++++++++ .../historical_data/0.24.0/commitments.json | 0 .../snark_verification_scheduler_key.json | 0 .../historical_data/0.24.1}/commitments.json | 0 .../snark_verification_scheduler_key.json | 0 .../historical_data/18/commitments.json | 0 .../18/snark_verification_scheduler_key.json | 0 .../historical_data/19/commitments.json | 0 .../19/snark_verification_scheduler_key.json | 0 .../historical_data/20/commitments.json | 0 .../20/snark_verification_scheduler_key.json | 0 .../historical_data/21/commitments.json | 0 .../21/snark_verification_scheduler_key.json | 0 .../historical_data/22/commitments.json | 0 .../22/snark_verification_scheduler_key.json | 0 .../historical_data/23/commitments.json | 0 .../23/snark_verification_scheduler_key.json | 0 .../historical_data/README.md | 0 .../0.24.1 => data/keys}/commitments.json | 0 .../keys}/finalization_hints_basic_1.bin | Bin .../keys}/finalization_hints_basic_10.bin | Bin .../keys}/finalization_hints_basic_11.bin | Bin .../keys}/finalization_hints_basic_12.bin | Bin .../keys}/finalization_hints_basic_13.bin | Bin .../keys}/finalization_hints_basic_14.bin | Bin .../keys}/finalization_hints_basic_15.bin | Bin .../keys}/finalization_hints_basic_2.bin | Bin .../keys}/finalization_hints_basic_255.bin | Bin .../keys}/finalization_hints_basic_3.bin | Bin .../keys}/finalization_hints_basic_4.bin | Bin .../keys}/finalization_hints_basic_5.bin | Bin .../keys}/finalization_hints_basic_6.bin | Bin .../keys}/finalization_hints_basic_7.bin | Bin .../keys}/finalization_hints_basic_8.bin | Bin .../keys}/finalization_hints_basic_9.bin | Bin .../keys}/finalization_hints_leaf_10.bin | Bin .../keys}/finalization_hints_leaf_11.bin | Bin .../keys}/finalization_hints_leaf_12.bin | Bin .../keys}/finalization_hints_leaf_13.bin | Bin .../keys}/finalization_hints_leaf_14.bin | Bin .../keys}/finalization_hints_leaf_15.bin | Bin .../keys}/finalization_hints_leaf_16.bin | Bin .../keys}/finalization_hints_leaf_17.bin | Bin .../keys}/finalization_hints_leaf_18.bin | Bin .../keys}/finalization_hints_leaf_3.bin | Bin .../keys}/finalization_hints_leaf_4.bin | Bin .../keys}/finalization_hints_leaf_5.bin | Bin .../keys}/finalization_hints_leaf_6.bin | Bin .../keys}/finalization_hints_leaf_7.bin | Bin .../keys}/finalization_hints_leaf_8.bin | Bin .../keys}/finalization_hints_leaf_9.bin | Bin .../keys}/finalization_hints_node.bin | Bin .../finalization_hints_recursion_tip.bin | Bin .../keys}/finalization_hints_scheduler.bin | Bin .../snark_verification_scheduler_key.json | 0 .../keys}/verification_basic_10_key.json | 0 .../keys}/verification_basic_11_key.json | 0 .../keys}/verification_basic_12_key.json | 0 .../keys}/verification_basic_13_key.json | 0 .../keys}/verification_basic_14_key.json | 0 .../keys}/verification_basic_15_key.json | 0 .../keys}/verification_basic_1_key.json | 0 .../keys}/verification_basic_255_key.json | 0 .../keys}/verification_basic_2_key.json | 0 .../keys}/verification_basic_3_key.json | 0 .../keys}/verification_basic_4_key.json | 0 .../keys}/verification_basic_5_key.json | 0 .../keys}/verification_basic_6_key.json | 0 .../keys}/verification_basic_7_key.json | 0 .../keys}/verification_basic_8_key.json | 0 .../keys}/verification_basic_9_key.json | 0 .../keys}/verification_leaf_10_key.json | 0 .../keys}/verification_leaf_11_key.json | 0 .../keys}/verification_leaf_12_key.json | 0 .../keys}/verification_leaf_13_key.json | 0 .../keys}/verification_leaf_14_key.json | 0 .../keys}/verification_leaf_15_key.json | 0 .../keys}/verification_leaf_16_key.json | 0 .../keys}/verification_leaf_17_key.json | 0 .../keys}/verification_leaf_18_key.json | 0 .../keys}/verification_leaf_3_key.json | 0 .../keys}/verification_leaf_4_key.json | 0 .../keys}/verification_leaf_5_key.json | 0 .../keys}/verification_leaf_6_key.json | 0 .../keys}/verification_leaf_7_key.json | 0 .../keys}/verification_leaf_8_key.json | 0 .../keys}/verification_leaf_9_key.json | 0 .../keys}/verification_node_key.json | 0 .../keys}/verification_recursion_tip_key.json | 0 .../keys}/verification_scheduler_key.json | 0 .../src/commands/prover/generate_sk.rs | 4 +-- .../src/commands/prover_version.rs | 3 +-- 107 files changed, 46 insertions(+), 27 deletions(-) create mode 100644 prover/data/README.md rename prover/{crates/bin/vk_setup_data_generator_server_fri => data}/historical_data/0.24.0/commitments.json (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri => data}/historical_data/0.24.0/snark_verification_scheduler_key.json (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/historical_data/0.24.1}/commitments.json (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/historical_data/0.24.1}/snark_verification_scheduler_key.json (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri => data}/historical_data/18/commitments.json (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri => data}/historical_data/18/snark_verification_scheduler_key.json (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri => data}/historical_data/19/commitments.json (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri => data}/historical_data/19/snark_verification_scheduler_key.json (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri => data}/historical_data/20/commitments.json (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri => data}/historical_data/20/snark_verification_scheduler_key.json (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri => data}/historical_data/21/commitments.json (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri => data}/historical_data/21/snark_verification_scheduler_key.json (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri => data}/historical_data/22/commitments.json (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri => data}/historical_data/22/snark_verification_scheduler_key.json (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri => data}/historical_data/23/commitments.json (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri => data}/historical_data/23/snark_verification_scheduler_key.json (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri => data}/historical_data/README.md (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/historical_data/0.24.1 => data/keys}/commitments.json (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/finalization_hints_basic_1.bin (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/finalization_hints_basic_10.bin (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/finalization_hints_basic_11.bin (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/finalization_hints_basic_12.bin (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/finalization_hints_basic_13.bin (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/finalization_hints_basic_14.bin (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/finalization_hints_basic_15.bin (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/finalization_hints_basic_2.bin (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/finalization_hints_basic_255.bin (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/finalization_hints_basic_3.bin (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/finalization_hints_basic_4.bin (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/finalization_hints_basic_5.bin (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/finalization_hints_basic_6.bin (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/finalization_hints_basic_7.bin (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/finalization_hints_basic_8.bin (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/finalization_hints_basic_9.bin (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/finalization_hints_leaf_10.bin (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/finalization_hints_leaf_11.bin (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/finalization_hints_leaf_12.bin (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/finalization_hints_leaf_13.bin (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/finalization_hints_leaf_14.bin (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/finalization_hints_leaf_15.bin (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/finalization_hints_leaf_16.bin (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/finalization_hints_leaf_17.bin (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/finalization_hints_leaf_18.bin (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/finalization_hints_leaf_3.bin (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/finalization_hints_leaf_4.bin (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/finalization_hints_leaf_5.bin (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/finalization_hints_leaf_6.bin (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/finalization_hints_leaf_7.bin (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/finalization_hints_leaf_8.bin (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/finalization_hints_leaf_9.bin (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/finalization_hints_node.bin (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/finalization_hints_recursion_tip.bin (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/finalization_hints_scheduler.bin (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/historical_data/0.24.1 => data/keys}/snark_verification_scheduler_key.json (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/verification_basic_10_key.json (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/verification_basic_11_key.json (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/verification_basic_12_key.json (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/verification_basic_13_key.json (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/verification_basic_14_key.json (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/verification_basic_15_key.json (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/verification_basic_1_key.json (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/verification_basic_255_key.json (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/verification_basic_2_key.json (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/verification_basic_3_key.json (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/verification_basic_4_key.json (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/verification_basic_5_key.json (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/verification_basic_6_key.json (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/verification_basic_7_key.json (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/verification_basic_8_key.json (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/verification_basic_9_key.json (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/verification_leaf_10_key.json (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/verification_leaf_11_key.json (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/verification_leaf_12_key.json (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/verification_leaf_13_key.json (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/verification_leaf_14_key.json (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/verification_leaf_15_key.json (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/verification_leaf_16_key.json (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/verification_leaf_17_key.json (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/verification_leaf_18_key.json (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/verification_leaf_3_key.json (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/verification_leaf_4_key.json (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/verification_leaf_5_key.json (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/verification_leaf_6_key.json (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/verification_leaf_7_key.json (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/verification_leaf_8_key.json (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/verification_leaf_9_key.json (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/verification_node_key.json (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/verification_recursion_tip_key.json (100%) rename prover/{crates/bin/vk_setup_data_generator_server_fri/data => data/keys}/verification_scheduler_key.json (100%) diff --git a/.dockerignore b/.dockerignore index c32286be6a01..39efdabca19a 100644 --- a/.dockerignore +++ b/.dockerignore @@ -39,7 +39,7 @@ contracts/.git !etc/multivm_bootloaders !cargo !bellman-cuda -!prover/crates/bin/vk_setup_data_generator_server_fri/data/ +!prover/data/ !.github/release-please/manifest.json !etc/env/file_based diff --git a/.gitignore b/.gitignore index 66d7d00b263a..725b5940afeb 100644 --- a/.gitignore +++ b/.gitignore @@ -110,6 +110,7 @@ hyperchain-*.yml # Prover keys that should not be commited prover/crates/bin/vk_setup_data_generator_server_fri/data/setup_* +prover/data/keys/setup_* # Zk Toolbox chains/era/configs/* diff --git a/core/lib/env_config/src/fri_prover.rs b/core/lib/env_config/src/fri_prover.rs index 33698221dc92..6eb199c7e438 100644 --- a/core/lib/env_config/src/fri_prover.rs +++ b/core/lib/env_config/src/fri_prover.rs @@ -32,7 +32,7 @@ mod tests { fn expected_config() -> FriProverConfig { FriProverConfig { - setup_data_path: "vk_setup_data_generator_server_fri/data".to_string(), + setup_data_path: "prover/data/keys".to_string(), prometheus_port: 3315, max_attempts: 10, generation_timeout_in_secs: 300, @@ -68,7 +68,7 @@ mod tests { fn from_env() { let mut lock = MUTEX.lock(); let config = r#" - FRI_PROVER_SETUP_DATA_PATH="vk_setup_data_generator_server_fri/data" + FRI_PROVER_SETUP_DATA_PATH="prover/data/keys" FRI_PROVER_PROMETHEUS_PORT="3315" FRI_PROVER_MAX_ATTEMPTS="10" FRI_PROVER_GENERATION_TIMEOUT_IN_SECS="300" diff --git a/docker/proof-fri-gpu-compressor/Dockerfile b/docker/proof-fri-gpu-compressor/Dockerfile index 02ca4a3b77b0..45f2ffa51b04 100644 --- a/docker/proof-fri-gpu-compressor/Dockerfile +++ b/docker/proof-fri-gpu-compressor/Dockerfile @@ -37,7 +37,7 @@ FROM nvidia/cuda:12.2.0-runtime-ubuntu22.04 RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* # copy VK required for proof wrapping -COPY prover/crates/bin/vk_setup_data_generator_server_fri/data/ /prover/crates/bin/vk_setup_data_generator_server_fri/data/ +COPY prover/data/keys/ /prover/data/keys COPY setup_2\^24.key /setup_2\^24.key diff --git a/docker/prover-fri-gateway/Dockerfile b/docker/prover-fri-gateway/Dockerfile index c53f27818687..de59451fee8f 100644 --- a/docker/prover-fri-gateway/Dockerfile +++ b/docker/prover-fri-gateway/Dockerfile @@ -11,7 +11,7 @@ FROM debian:bookworm-slim RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* # copy VK required for proof wrapping -COPY prover/crates/bin/vk_setup_data_generator_server_fri/data/ /prover/crates/bin/vk_setup_data_generator_server_fri/data/ +COPY prover/data/keys/ /prover/data/keys/ COPY --from=builder /usr/src/zksync/prover/target/release/zksync_prover_fri_gateway /usr/bin/ diff --git a/docker/prover-gpu-fri-gar/Dockerfile b/docker/prover-gpu-fri-gar/Dockerfile index 248f6aaf35fe..06a1ff532b57 100644 --- a/docker/prover-gpu-fri-gar/Dockerfile +++ b/docker/prover-gpu-fri-gar/Dockerfile @@ -9,7 +9,7 @@ COPY *.bin / RUN apt-get update && apt-get install -y libpq5 ca-certificates openssl && rm -rf /var/lib/apt/lists/* # copy finalization hints required for assembly generation -COPY --from=prover prover/crates/bin/vk_setup_data_generator_server_fri/data/ /prover/crates/bin/vk_setup_data_generator_server_fri/data/ +COPY --from=prover prover/data/keys/ /prover/data/keys/ COPY --from=prover /usr/bin/zksync_prover_fri /usr/bin/ ENTRYPOINT ["zksync_prover_fri"] diff --git a/docker/prover-gpu-fri/Dockerfile b/docker/prover-gpu-fri/Dockerfile index 1f1aaa447f22..ad3ff1ff7197 100644 --- a/docker/prover-gpu-fri/Dockerfile +++ b/docker/prover-gpu-fri/Dockerfile @@ -31,7 +31,7 @@ FROM nvidia/cuda:12.2.0-runtime-ubuntu22.04 RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* # copy finalization hints required for assembly generation -COPY prover/crates/bin/vk_setup_data_generator_server_fri/data/ /prover/crates/bin/vk_setup_data_generator_server_fri/data/ +COPY prover/data/keys/ /prover/data/keys/ COPY --from=builder /usr/src/zksync/prover/target/release/zksync_prover_fri /usr/bin/ diff --git a/docker/witness-generator/Dockerfile b/docker/witness-generator/Dockerfile index 4f7c00aa2ef9..2eebe07515e4 100644 --- a/docker/witness-generator/Dockerfile +++ b/docker/witness-generator/Dockerfile @@ -13,7 +13,7 @@ FROM debian:bookworm-slim RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* -COPY prover/crates/bin/vk_setup_data_generator_server_fri/data/ /prover/crates/bin/vk_setup_data_generator_server_fri/data/ +COPY prover/data/keys/ /prover/data/keys/ COPY --from=builder /usr/src/zksync/prover/target/release/zksync_witness_generator /usr/bin/ diff --git a/docker/witness-vector-generator/Dockerfile b/docker/witness-vector-generator/Dockerfile index d1bc1e29c5fa..2f79395f1fd4 100644 --- a/docker/witness-vector-generator/Dockerfile +++ b/docker/witness-vector-generator/Dockerfile @@ -12,7 +12,7 @@ FROM debian:bookworm-slim RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* # copy finalization hints required for witness vector generation -COPY prover/crates/bin/vk_setup_data_generator_server_fri/data/ /prover/crates/bin/vk_setup_data_generator_server_fri/data/ +COPY prover/data/keys/ /prover/crates/bin/vk_setup_data_generator_server_fri/data/ COPY --from=builder /usr/src/zksync/prover/target/release/zksync_witness_vector_generator /usr/bin/ diff --git a/docs/guides/advanced/15_prover_keys.md b/docs/guides/advanced/15_prover_keys.md index 9f562b49d299..5a3a264e8ddd 100644 --- a/docs/guides/advanced/15_prover_keys.md +++ b/docs/guides/advanced/15_prover_keys.md @@ -118,9 +118,9 @@ friendly hash function (currently Poseidon2). [recursive_circuit_list]: https://github.com/matter-labs/era-zkevm_test_harness/blob/3cd647aa57fc2e1180bab53f7a3b61ec47502a46/circuit_definitions/src/circuit_definitions/recursion_layer/mod.rs#L29 [verification_key_list]: - https://github.com/matter-labs/zksync-era/tree/boojum-integration/prover/vk_setup_data_generator_server_fri/data + https://github.com/matter-labs/zksync-era/tree/6d18061df4a18803d3c6377305ef711ce60317e1/prover/data/keys [env_variables_for_hash]: - https://github.com/matter-labs/zksync-era/blob/boojum-integration/etc/env/base/contracts.toml#L44 + https://github.com/matter-labs/zksync-era/blob/6d18061df4a18803d3c6377305ef711ce60317e1/etc/env/base/contracts.toml#L61 [prover_setup_data]: https://github.com/matter-labs/zksync-era/blob/d2ca29bf20b4ec2d9ec9e327b4ba6b281d9793de/prover/vk_setup_data_generator_server_fri/src/lib.rs#L61 [verifier_computation]: diff --git a/etc/env/base/fri_prover.toml b/etc/env/base/fri_prover.toml index 1c93752251bc..d09991312ae5 100644 --- a/etc/env/base/fri_prover.toml +++ b/etc/env/base/fri_prover.toml @@ -1,5 +1,5 @@ [fri_prover] -setup_data_path = "crates/bin/vk_setup_data_generator_server_fri/data" +setup_data_path = "data/keys" prometheus_port = 3315 max_attempts = 10 generation_timeout_in_secs = 600 diff --git a/etc/env/file_based/general.yaml b/etc/env/file_based/general.yaml index 19921cf536c4..064a3b447b9c 100644 --- a/etc/env/file_based/general.yaml +++ b/etc/env/file_based/general.yaml @@ -140,7 +140,7 @@ prover: file_backed: file_backed_base_path: artifacts max_retries: 10 - setup_data_path: crates/bin/vk_setup_data_generator_server_fri/data + setup_data_path: data/keys prometheus_port: 3315 max_attempts: 10 generation_timeout_in_secs: 600 diff --git a/infrastructure/zk/src/prover_setup.ts b/infrastructure/zk/src/prover_setup.ts index 5a17c9683742..b5bd4c828aec 100644 --- a/infrastructure/zk/src/prover_setup.ts +++ b/infrastructure/zk/src/prover_setup.ts @@ -30,8 +30,7 @@ export async function setupProver(proverType: ProverType) { } else { env.modify( 'FRI_PROVER_SETUP_DATA_PATH', - `${process.env.ZKSYNC_HOME}/etc/hyperchains/prover-keys/${process.env.ZKSYNC_ENV}/${ - proverType === ProverType.GPU ? 'gpu' : 'cpu' + `${process.env.ZKSYNC_HOME}/etc/hyperchains/prover-keys/${process.env.ZKSYNC_ENV}/${proverType === ProverType.GPU ? 'gpu' : 'cpu' }/`, process.env.ENV_FILE! ); @@ -98,8 +97,7 @@ async function setupProverKeys(proverType: ProverType) { env.modify( 'FRI_PROVER_SETUP_DATA_PATH', - `${process.env.ZKSYNC_HOME}/etc/hyperchains/prover-keys/${process.env.ZKSYNC_ENV}/${ - proverType === ProverType.GPU ? 'gpu' : 'cpu' + `${process.env.ZKSYNC_HOME}/etc/hyperchains/prover-keys/${process.env.ZKSYNC_ENV}/${proverType === ProverType.GPU ? 'gpu' : 'cpu' }/`, process.env.ENV_FILE! ); @@ -204,7 +202,7 @@ async function downloadDefaultSetupKeys(proverType: ProverType, region: string) ); await utils.spawn( - `cp -r ${process.env.ZKSYNC_HOME}/prover/vk_setup_data_generator_server_fri/data/* ${process.env.ZKSYNC_HOME}/etc/hyperchains/prover-keys/${currentEnv}/${proverType}/` + `cp -r ${process.env.ZKSYNC_HOME}/prover/data/keys/* ${process.env.ZKSYNC_HOME}/etc/hyperchains/prover-keys/${currentEnv}/${proverType}/` ); } diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/src/keystore.rs b/prover/crates/bin/vk_setup_data_generator_server_fri/src/keystore.rs index e886b5d1b0c0..9e3a6e8d918d 100644 --- a/prover/crates/bin/vk_setup_data_generator_server_fri/src/keystore.rs +++ b/prover/crates/bin/vk_setup_data_generator_server_fri/src/keystore.rs @@ -47,16 +47,14 @@ pub struct Keystore { fn get_base_path() -> PathBuf { let path = core_workspace_dir_or_current_dir(); - let new_path = path.join("prover/crates/bin/vk_setup_data_generator_server_fri/data"); + let new_path = path.join("data/keys"); if new_path.exists() { return new_path; } let mut components = path.components(); components.next_back().unwrap(); - components - .as_path() - .join("prover/crates/bin/vk_setup_data_generator_server_fri/data") + components.as_path().join("data/keys") } impl Default for Keystore { diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/src/utils.rs b/prover/crates/bin/vk_setup_data_generator_server_fri/src/utils.rs index 1ac6c4f4230d..5387b73e76cd 100644 --- a/prover/crates/bin/vk_setup_data_generator_server_fri/src/utils.rs +++ b/prover/crates/bin/vk_setup_data_generator_server_fri/src/utils.rs @@ -132,7 +132,7 @@ mod tests { #[test] fn test_keyhash_generation() { let mut path_to_input = PathBuf::from(std::env::var("CARGO_MANIFEST_DIR").unwrap()); - path_to_input.push("historical_data"); + path_to_input.push("../../../data/historical_data"); for entry in std::fs::read_dir(path_to_input.clone()).unwrap().flatten() { if entry.metadata().unwrap().is_dir() { diff --git a/prover/data/README.md b/prover/data/README.md new file mode 100644 index 000000000000..8391aa33ba5c --- /dev/null +++ b/prover/data/README.md @@ -0,0 +1,23 @@ +# Prover data directory + +This directory contains the data required to run provers. + +Currently, it has the following sub-directories: + +- [keys](./keys/): Data required for proof generation. This data is mapped to a single protocol version. +- [historical_data](./historical_data/) Descriptors for the protocol versions used in the past. + +## Keys directory + +`keys` directory is used by various components in the prover subsystem, and it generally can contain two kinds of data: + +- Small static files, like commitments, finalization hints, or verification keys. +- Big generated blobs, like setup keys. + +Small static files are committed to the repository. Big files are expected to be downloaded or generated on demand. Two +important notices as of Sep 2024: + +- Path to setup keys can be overridden via configuration. +- Proof compressor requires an universal setup file, named, for example, `setup_2^24.bin` or `setup_2^26.bin`. It's + handled separately from the rest of the keys, e.g. it has separate configuration variables, and can naturally occur in + the `$ZKSYNC_HOME/keys/setup` during development. diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/0.24.0/commitments.json b/prover/data/historical_data/0.24.0/commitments.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/0.24.0/commitments.json rename to prover/data/historical_data/0.24.0/commitments.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/0.24.0/snark_verification_scheduler_key.json b/prover/data/historical_data/0.24.0/snark_verification_scheduler_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/0.24.0/snark_verification_scheduler_key.json rename to prover/data/historical_data/0.24.0/snark_verification_scheduler_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/commitments.json b/prover/data/historical_data/0.24.1/commitments.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/commitments.json rename to prover/data/historical_data/0.24.1/commitments.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/snark_verification_scheduler_key.json b/prover/data/historical_data/0.24.1/snark_verification_scheduler_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/snark_verification_scheduler_key.json rename to prover/data/historical_data/0.24.1/snark_verification_scheduler_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/18/commitments.json b/prover/data/historical_data/18/commitments.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/18/commitments.json rename to prover/data/historical_data/18/commitments.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/18/snark_verification_scheduler_key.json b/prover/data/historical_data/18/snark_verification_scheduler_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/18/snark_verification_scheduler_key.json rename to prover/data/historical_data/18/snark_verification_scheduler_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/19/commitments.json b/prover/data/historical_data/19/commitments.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/19/commitments.json rename to prover/data/historical_data/19/commitments.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/19/snark_verification_scheduler_key.json b/prover/data/historical_data/19/snark_verification_scheduler_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/19/snark_verification_scheduler_key.json rename to prover/data/historical_data/19/snark_verification_scheduler_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/20/commitments.json b/prover/data/historical_data/20/commitments.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/20/commitments.json rename to prover/data/historical_data/20/commitments.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/20/snark_verification_scheduler_key.json b/prover/data/historical_data/20/snark_verification_scheduler_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/20/snark_verification_scheduler_key.json rename to prover/data/historical_data/20/snark_verification_scheduler_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/21/commitments.json b/prover/data/historical_data/21/commitments.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/21/commitments.json rename to prover/data/historical_data/21/commitments.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/21/snark_verification_scheduler_key.json b/prover/data/historical_data/21/snark_verification_scheduler_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/21/snark_verification_scheduler_key.json rename to prover/data/historical_data/21/snark_verification_scheduler_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/22/commitments.json b/prover/data/historical_data/22/commitments.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/22/commitments.json rename to prover/data/historical_data/22/commitments.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/22/snark_verification_scheduler_key.json b/prover/data/historical_data/22/snark_verification_scheduler_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/22/snark_verification_scheduler_key.json rename to prover/data/historical_data/22/snark_verification_scheduler_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/23/commitments.json b/prover/data/historical_data/23/commitments.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/23/commitments.json rename to prover/data/historical_data/23/commitments.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/23/snark_verification_scheduler_key.json b/prover/data/historical_data/23/snark_verification_scheduler_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/23/snark_verification_scheduler_key.json rename to prover/data/historical_data/23/snark_verification_scheduler_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/README.md b/prover/data/historical_data/README.md similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/README.md rename to prover/data/historical_data/README.md diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/0.24.1/commitments.json b/prover/data/keys/commitments.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/0.24.1/commitments.json rename to prover/data/keys/commitments.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_1.bin b/prover/data/keys/finalization_hints_basic_1.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_1.bin rename to prover/data/keys/finalization_hints_basic_1.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_10.bin b/prover/data/keys/finalization_hints_basic_10.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_10.bin rename to prover/data/keys/finalization_hints_basic_10.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_11.bin b/prover/data/keys/finalization_hints_basic_11.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_11.bin rename to prover/data/keys/finalization_hints_basic_11.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_12.bin b/prover/data/keys/finalization_hints_basic_12.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_12.bin rename to prover/data/keys/finalization_hints_basic_12.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_13.bin b/prover/data/keys/finalization_hints_basic_13.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_13.bin rename to prover/data/keys/finalization_hints_basic_13.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_14.bin b/prover/data/keys/finalization_hints_basic_14.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_14.bin rename to prover/data/keys/finalization_hints_basic_14.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_15.bin b/prover/data/keys/finalization_hints_basic_15.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_15.bin rename to prover/data/keys/finalization_hints_basic_15.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_2.bin b/prover/data/keys/finalization_hints_basic_2.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_2.bin rename to prover/data/keys/finalization_hints_basic_2.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_255.bin b/prover/data/keys/finalization_hints_basic_255.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_255.bin rename to prover/data/keys/finalization_hints_basic_255.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_3.bin b/prover/data/keys/finalization_hints_basic_3.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_3.bin rename to prover/data/keys/finalization_hints_basic_3.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_4.bin b/prover/data/keys/finalization_hints_basic_4.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_4.bin rename to prover/data/keys/finalization_hints_basic_4.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_5.bin b/prover/data/keys/finalization_hints_basic_5.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_5.bin rename to prover/data/keys/finalization_hints_basic_5.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_6.bin b/prover/data/keys/finalization_hints_basic_6.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_6.bin rename to prover/data/keys/finalization_hints_basic_6.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_7.bin b/prover/data/keys/finalization_hints_basic_7.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_7.bin rename to prover/data/keys/finalization_hints_basic_7.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_8.bin b/prover/data/keys/finalization_hints_basic_8.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_8.bin rename to prover/data/keys/finalization_hints_basic_8.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_9.bin b/prover/data/keys/finalization_hints_basic_9.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_basic_9.bin rename to prover/data/keys/finalization_hints_basic_9.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_10.bin b/prover/data/keys/finalization_hints_leaf_10.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_10.bin rename to prover/data/keys/finalization_hints_leaf_10.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_11.bin b/prover/data/keys/finalization_hints_leaf_11.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_11.bin rename to prover/data/keys/finalization_hints_leaf_11.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_12.bin b/prover/data/keys/finalization_hints_leaf_12.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_12.bin rename to prover/data/keys/finalization_hints_leaf_12.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_13.bin b/prover/data/keys/finalization_hints_leaf_13.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_13.bin rename to prover/data/keys/finalization_hints_leaf_13.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_14.bin b/prover/data/keys/finalization_hints_leaf_14.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_14.bin rename to prover/data/keys/finalization_hints_leaf_14.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_15.bin b/prover/data/keys/finalization_hints_leaf_15.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_15.bin rename to prover/data/keys/finalization_hints_leaf_15.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_16.bin b/prover/data/keys/finalization_hints_leaf_16.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_16.bin rename to prover/data/keys/finalization_hints_leaf_16.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_17.bin b/prover/data/keys/finalization_hints_leaf_17.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_17.bin rename to prover/data/keys/finalization_hints_leaf_17.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_18.bin b/prover/data/keys/finalization_hints_leaf_18.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_18.bin rename to prover/data/keys/finalization_hints_leaf_18.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_3.bin b/prover/data/keys/finalization_hints_leaf_3.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_3.bin rename to prover/data/keys/finalization_hints_leaf_3.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_4.bin b/prover/data/keys/finalization_hints_leaf_4.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_4.bin rename to prover/data/keys/finalization_hints_leaf_4.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_5.bin b/prover/data/keys/finalization_hints_leaf_5.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_5.bin rename to prover/data/keys/finalization_hints_leaf_5.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_6.bin b/prover/data/keys/finalization_hints_leaf_6.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_6.bin rename to prover/data/keys/finalization_hints_leaf_6.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_7.bin b/prover/data/keys/finalization_hints_leaf_7.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_7.bin rename to prover/data/keys/finalization_hints_leaf_7.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_8.bin b/prover/data/keys/finalization_hints_leaf_8.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_8.bin rename to prover/data/keys/finalization_hints_leaf_8.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_9.bin b/prover/data/keys/finalization_hints_leaf_9.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_leaf_9.bin rename to prover/data/keys/finalization_hints_leaf_9.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_node.bin b/prover/data/keys/finalization_hints_node.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_node.bin rename to prover/data/keys/finalization_hints_node.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_recursion_tip.bin b/prover/data/keys/finalization_hints_recursion_tip.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_recursion_tip.bin rename to prover/data/keys/finalization_hints_recursion_tip.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_scheduler.bin b/prover/data/keys/finalization_hints_scheduler.bin similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/finalization_hints_scheduler.bin rename to prover/data/keys/finalization_hints_scheduler.bin diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/0.24.1/snark_verification_scheduler_key.json b/prover/data/keys/snark_verification_scheduler_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/historical_data/0.24.1/snark_verification_scheduler_key.json rename to prover/data/keys/snark_verification_scheduler_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_10_key.json b/prover/data/keys/verification_basic_10_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_10_key.json rename to prover/data/keys/verification_basic_10_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_11_key.json b/prover/data/keys/verification_basic_11_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_11_key.json rename to prover/data/keys/verification_basic_11_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_12_key.json b/prover/data/keys/verification_basic_12_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_12_key.json rename to prover/data/keys/verification_basic_12_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_13_key.json b/prover/data/keys/verification_basic_13_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_13_key.json rename to prover/data/keys/verification_basic_13_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_14_key.json b/prover/data/keys/verification_basic_14_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_14_key.json rename to prover/data/keys/verification_basic_14_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_15_key.json b/prover/data/keys/verification_basic_15_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_15_key.json rename to prover/data/keys/verification_basic_15_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_1_key.json b/prover/data/keys/verification_basic_1_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_1_key.json rename to prover/data/keys/verification_basic_1_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_255_key.json b/prover/data/keys/verification_basic_255_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_255_key.json rename to prover/data/keys/verification_basic_255_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_2_key.json b/prover/data/keys/verification_basic_2_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_2_key.json rename to prover/data/keys/verification_basic_2_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_3_key.json b/prover/data/keys/verification_basic_3_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_3_key.json rename to prover/data/keys/verification_basic_3_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_4_key.json b/prover/data/keys/verification_basic_4_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_4_key.json rename to prover/data/keys/verification_basic_4_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_5_key.json b/prover/data/keys/verification_basic_5_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_5_key.json rename to prover/data/keys/verification_basic_5_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_6_key.json b/prover/data/keys/verification_basic_6_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_6_key.json rename to prover/data/keys/verification_basic_6_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_7_key.json b/prover/data/keys/verification_basic_7_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_7_key.json rename to prover/data/keys/verification_basic_7_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_8_key.json b/prover/data/keys/verification_basic_8_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_8_key.json rename to prover/data/keys/verification_basic_8_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_9_key.json b/prover/data/keys/verification_basic_9_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_basic_9_key.json rename to prover/data/keys/verification_basic_9_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_10_key.json b/prover/data/keys/verification_leaf_10_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_10_key.json rename to prover/data/keys/verification_leaf_10_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_11_key.json b/prover/data/keys/verification_leaf_11_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_11_key.json rename to prover/data/keys/verification_leaf_11_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_12_key.json b/prover/data/keys/verification_leaf_12_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_12_key.json rename to prover/data/keys/verification_leaf_12_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_13_key.json b/prover/data/keys/verification_leaf_13_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_13_key.json rename to prover/data/keys/verification_leaf_13_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_14_key.json b/prover/data/keys/verification_leaf_14_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_14_key.json rename to prover/data/keys/verification_leaf_14_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_15_key.json b/prover/data/keys/verification_leaf_15_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_15_key.json rename to prover/data/keys/verification_leaf_15_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_16_key.json b/prover/data/keys/verification_leaf_16_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_16_key.json rename to prover/data/keys/verification_leaf_16_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_17_key.json b/prover/data/keys/verification_leaf_17_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_17_key.json rename to prover/data/keys/verification_leaf_17_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_18_key.json b/prover/data/keys/verification_leaf_18_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_18_key.json rename to prover/data/keys/verification_leaf_18_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_3_key.json b/prover/data/keys/verification_leaf_3_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_3_key.json rename to prover/data/keys/verification_leaf_3_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_4_key.json b/prover/data/keys/verification_leaf_4_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_4_key.json rename to prover/data/keys/verification_leaf_4_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_5_key.json b/prover/data/keys/verification_leaf_5_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_5_key.json rename to prover/data/keys/verification_leaf_5_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_6_key.json b/prover/data/keys/verification_leaf_6_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_6_key.json rename to prover/data/keys/verification_leaf_6_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_7_key.json b/prover/data/keys/verification_leaf_7_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_7_key.json rename to prover/data/keys/verification_leaf_7_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_8_key.json b/prover/data/keys/verification_leaf_8_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_8_key.json rename to prover/data/keys/verification_leaf_8_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_9_key.json b/prover/data/keys/verification_leaf_9_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_leaf_9_key.json rename to prover/data/keys/verification_leaf_9_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_node_key.json b/prover/data/keys/verification_node_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_node_key.json rename to prover/data/keys/verification_node_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_recursion_tip_key.json b/prover/data/keys/verification_recursion_tip_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_recursion_tip_key.json rename to prover/data/keys/verification_recursion_tip_key.json diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_scheduler_key.json b/prover/data/keys/verification_scheduler_key.json similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/data/verification_scheduler_key.json rename to prover/data/keys/verification_scheduler_key.json diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/generate_sk.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/generate_sk.rs index 7f678470d178..c13d1c3b5e03 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/generate_sk.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/generate_sk.rs @@ -18,8 +18,8 @@ pub(crate) async fn run(shell: &Shell) -> anyhow::Result<()> { shell, "cargo run --features gpu --release --bin key_generator -- generate-sk-gpu all --recompute-if-missing - --setup-path=crates/bin/vk_setup_data_generator_server_fri/data - --path={link_to_prover}/crates/bin/vk_setup_data_generator_server_fri/data" + --setup-path=data/keys + --path={link_to_prover}/data/keys" )); cmd.run()?; spinner.finish(); diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/prover_version.rs b/zk_toolbox/crates/zk_supervisor/src/commands/prover_version.rs index 479f796294fa..8740e7c873a9 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/prover_version.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/prover_version.rs @@ -27,8 +27,7 @@ async fn get_protocol_version(shell: &Shell, link_to_prover: &Path) -> anyhow::R } async fn get_snark_wrapper(link_to_prover: &Path) -> anyhow::Result { - let path = - link_to_prover.join("crates/bin/vk_setup_data_generator_server_fri/data/commitments.json"); + let path = link_to_prover.join("data/keys/commitments.json"); let file = fs::File::open(path).expect("Could not find commitments file in zksync-era"); let json: serde_json::Value = serde_json::from_reader(file).expect("Could not parse commitments.json"); From 0c023f898d09b2f98e4efeda71f6d639b868189f Mon Sep 17 00:00:00 2001 From: Igor Aleksanov Date: Tue, 3 Sep 2024 11:41:10 +0400 Subject: [PATCH 007/116] chore: Fix keys paths for wvg docker image (#2785) Follow-up to #2784 Forgot to update one dockerfile. --- docker/witness-vector-generator/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/witness-vector-generator/Dockerfile b/docker/witness-vector-generator/Dockerfile index 2f79395f1fd4..e315f670101a 100644 --- a/docker/witness-vector-generator/Dockerfile +++ b/docker/witness-vector-generator/Dockerfile @@ -12,7 +12,7 @@ FROM debian:bookworm-slim RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* # copy finalization hints required for witness vector generation -COPY prover/data/keys/ /prover/crates/bin/vk_setup_data_generator_server_fri/data/ +COPY prover/data/keys/ /prover/data/keys/ COPY --from=builder /usr/src/zksync/prover/target/release/zksync_witness_vector_generator /usr/bin/ From e9d2a1e8a8d60fc2a2e6d66281f4c08ef4a8b08b Mon Sep 17 00:00:00 2001 From: Igor Aleksanov Date: Tue, 3 Sep 2024 15:32:28 +0400 Subject: [PATCH 008/116] chore: Fix path resolution when running from compiled binary (#2788) Follow-up to #2785 We use `zksync_utils::env::locate_workspace` in our code, which behaves differently locally (uses `cargo locate-project` locally and does a fallback to `.` in docker), which is then altered via `..` in `core_workspace_dir_or_current_dir`, making the path resolution very hard. I fix the issue so that it works both locally and in docker, and also add comments explaining how it works. --- .../src/keystore.rs | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/src/keystore.rs b/prover/crates/bin/vk_setup_data_generator_server_fri/src/keystore.rs index 9e3a6e8d918d..c683ed3d2965 100644 --- a/prover/crates/bin/vk_setup_data_generator_server_fri/src/keystore.rs +++ b/prover/crates/bin/vk_setup_data_generator_server_fri/src/keystore.rs @@ -45,16 +45,28 @@ pub struct Keystore { } fn get_base_path() -> PathBuf { + // This will return the path to the _core_ workspace locally, + // otherwise (e.g. in Docker) it will return `.` (which is usually equivalent to `/`). + // + // Note: at the moment of writing this function, it locates the prover workspace, and uses + // `..` to get to the core workspace, so the path returned is something like: + // `/path/to/workspace/zksync-era/prover/..` (or `.` for binaries). let path = core_workspace_dir_or_current_dir(); - let new_path = path.join("data/keys"); + // Check if we're in the folder equivalent to the core workspace root. + // Path we're actually checking is: + // `/path/to/workspace/zksync-era/prover/../prover/data/keys` + let new_path = path.join("prover/data/keys"); if new_path.exists() { return new_path; } let mut components = path.components(); + // This removes the last component of `path`, so: + // for local workspace, we're removing `..` and putting ourselves back to the prover workspace. + // for binaries, we're removing `.` and getting the empty path. components.next_back().unwrap(); - components.as_path().join("data/keys") + components.as_path().join("prover/data/keys") } impl Default for Keystore { From 8773ee148e795595e59931995a471259d01ce29e Mon Sep 17 00:00:00 2001 From: Harald Hoyer Date: Tue, 3 Sep 2024 14:52:33 +0200 Subject: [PATCH 009/116] feat(container-tee_prover): use `--env-prefix` for `tee-key-preexec` (#2789) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ With https://github.com/matter-labs/teepot/pull/196 merged, update the `flake.lock` for `teepot` to use the `--env-prefix` argument for `tee-key-preexec`. ## Why ❔ This aligns the environment variable names, which were changed in https://github.com/matter-labs/zksync-era/pull/2764 ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. Signed-off-by: Harald Hoyer --- etc/nix/container-tee_prover.nix | 3 +++ flake.lock | 12 ++++++------ 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/etc/nix/container-tee_prover.nix b/etc/nix/container-tee_prover.nix index a4128e008693..7c0d8d164e34 100644 --- a/etc/nix/container-tee_prover.nix +++ b/etc/nix/container-tee_prover.nix @@ -22,6 +22,9 @@ nixsgxLib.mkSGXContainer { loader = { argv = [ entrypoint + "--env-prefix" + "TEE_PROVER_" + "--" "${tee_prover}/bin/zksync_tee_prover" ]; diff --git a/flake.lock b/flake.lock index e217d37664cd..e1905f2a1f65 100644 --- a/flake.lock +++ b/flake.lock @@ -360,11 +360,11 @@ "snowfall-lib": "snowfall-lib_2" }, "locked": { - "lastModified": 1719916365, - "narHash": "sha256-RzCFbGAHq6rTY4ctrmazGIx59qXtfrVfEnIe+L0leTo=", + "lastModified": 1723120465, + "narHash": "sha256-sWu5lKy71hHnSwydhwzG2XgSehjvLfK2iuUtNimvGkg=", "owner": "matter-labs", "repo": "nixsgx", - "rev": "0309a20ee5bf12b7390aa6795409b448420e80f2", + "rev": "b080c32f2aa8b3d4b4bc4356a8a513279b6f82ab", "type": "github" }, "original": { @@ -623,11 +623,11 @@ "vault-auth-tee-flake": "vault-auth-tee-flake" }, "locked": { - "lastModified": 1723034739, - "narHash": "sha256-bu4XvqwsPUzfMzk5t10wyHliItfH7FOk42V0CIwl4lg=", + "lastModified": 1725354393, + "narHash": "sha256-RSiDY3sr0hdlydO3cYtidjVx+OlqIsmcnvsSDSGQPF0=", "owner": "matter-labs", "repo": "teepot", - "rev": "4ed311a16a72521f79418216ad29e6eed8db347d", + "rev": "2c21d0161e43dc7a786787c89b84ecd6e8857106", "type": "github" }, "original": { From b82dfa4d29fce107223c3638fe490b5cb0f28d8c Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Tue, 3 Sep 2024 17:50:47 +0300 Subject: [PATCH 010/116] feat(vm): Extract batch executor to separate crate (#2702) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ - Extracts batch executor from `zksync_state_keeper` into a separate crate, `zksync_vm_executor`. Places batch executor interfaces in `zksync_vm_interface`. - Moves `zksync_vm_utils` remains to this crate, too. - Removes dependency on the `zksync_state_keeper` in the VM runner. ## Why ❔ Improves encapsulation and versatility (e.g., when using custom VMs). ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- Cargo.lock | 41 ++-- Cargo.toml | 4 +- core/lib/{vm_utils => vm_executor}/Cargo.toml | 9 +- .../vm_executor/src/batch/executor.rs} | 159 ++++----------- .../vm_executor/src/batch/factory.rs} | 175 +++++++--------- core/lib/vm_executor/src/batch/metrics.rs | 95 +++++++++ core/lib/vm_executor/src/batch/mod.rs | 9 + core/lib/vm_executor/src/lib.rs | 9 + .../{vm_utils => vm_executor}/src/storage.rs | 4 +- core/lib/vm_interface/Cargo.toml | 2 + core/lib/vm_interface/src/executor.rs | 44 ++++ core/lib/vm_interface/src/lib.rs | 11 +- core/lib/vm_interface/src/storage/view.rs | 2 +- .../src/types/outputs/execution_result.rs | 15 ++ .../src/types/outputs/finished_l1batch.rs | 26 +++ .../lib/vm_interface/src/types/outputs/mod.rs | 5 +- core/lib/vm_utils/src/lib.rs | 1 - core/node/consensus/src/testonly.rs | 8 +- core/node/node_framework/Cargo.toml | 1 + .../state_keeper/main_batch_executor.rs | 8 +- .../layers/state_keeper/mod.rs | 27 ++- .../implementations/layers/vm_runner/bwip.rs | 6 +- .../implementations/resources/state_keeper.rs | 15 +- core/node/node_sync/Cargo.toml | 2 +- core/node/node_sync/src/external_io.rs | 2 +- core/node/node_sync/src/tests.rs | 6 +- core/node/state_keeper/Cargo.toml | 2 +- core/node/state_keeper/src/executor/mod.rs | 60 ++++++ .../{batch_executor => executor}/tests/mod.rs | 61 +++--- .../tests/read_storage_factory.rs | 0 .../tests/tester.rs | 36 ++-- core/node/state_keeper/src/io/common/tests.rs | 2 +- core/node/state_keeper/src/io/mempool.rs | 2 +- core/node/state_keeper/src/io/mod.rs | 2 +- core/node/state_keeper/src/io/persistence.rs | 5 +- core/node/state_keeper/src/keeper.rs | 126 +++++------- core/node/state_keeper/src/lib.rs | 54 +---- core/node/state_keeper/src/metrics.rs | 66 +----- core/node/state_keeper/src/testonly/mod.rs | 107 ++++------ .../src/testonly/test_batch_executor.rs | 193 +++++++++--------- core/node/state_keeper/src/tests/mod.rs | 56 +++-- .../tee_verifier_input_producer/Cargo.toml | 2 +- .../tee_verifier_input_producer/src/lib.rs | 2 +- core/node/vm_runner/Cargo.toml | 5 +- core/node/vm_runner/src/impls/bwip.rs | 90 ++++---- core/node/vm_runner/src/impls/playground.rs | 37 ++-- .../vm_runner/src/impls/protective_reads.rs | 55 +++-- core/node/vm_runner/src/lib.rs | 3 +- core/node/vm_runner/src/output_handler.rs | 157 ++++++++------ core/node/vm_runner/src/process.rs | 88 +++----- core/node/vm_runner/src/storage.rs | 4 +- core/node/vm_runner/src/tests/mod.rs | 21 +- .../vm_runner/src/tests/output_handler.rs | 80 ++++---- core/node/vm_runner/src/tests/process.rs | 4 +- .../vm_runner/src/tests/storage_writer.rs | 78 +++---- prover/Cargo.lock | 2 + 56 files changed, 1051 insertions(+), 1035 deletions(-) rename core/lib/{vm_utils => vm_executor}/Cargo.toml (67%) rename core/{node/state_keeper/src/batch_executor/mod.rs => lib/vm_executor/src/batch/executor.rs} (53%) rename core/{node/state_keeper/src/batch_executor/main_executor.rs => lib/vm_executor/src/batch/factory.rs} (68%) create mode 100644 core/lib/vm_executor/src/batch/metrics.rs create mode 100644 core/lib/vm_executor/src/batch/mod.rs create mode 100644 core/lib/vm_executor/src/lib.rs rename core/lib/{vm_utils => vm_executor}/src/storage.rs (98%) create mode 100644 core/lib/vm_interface/src/executor.rs delete mode 100644 core/lib/vm_utils/src/lib.rs create mode 100644 core/node/state_keeper/src/executor/mod.rs rename core/node/state_keeper/src/{batch_executor => executor}/tests/mod.rs (92%) rename core/node/state_keeper/src/{batch_executor => executor}/tests/read_storage_factory.rs (100%) rename core/node/state_keeper/src/{batch_executor => executor}/tests/tester.rs (95%) diff --git a/Cargo.lock b/Cargo.lock index f1dc1a5d3a37..e57c437d4bf1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9177,6 +9177,7 @@ dependencies = [ "zksync_types", "zksync_utils", "zksync_vlog", + "zksync_vm_executor", "zksync_vm_runner", "zksync_web3_decl", ] @@ -9263,7 +9264,7 @@ dependencies = [ "zksync_system_constants", "zksync_types", "zksync_utils", - "zksync_vm_utils", + "zksync_vm_executor", "zksync_web3_decl", ] @@ -9590,7 +9591,7 @@ dependencies = [ "zksync_test_account", "zksync_types", "zksync_utils", - "zksync_vm_utils", + "zksync_vm_executor", ] [[package]] @@ -9677,7 +9678,7 @@ dependencies = [ "zksync_tee_verifier", "zksync_types", "zksync_utils", - "zksync_vm_utils", + "zksync_vm_executor", ] [[package]] @@ -9779,11 +9780,29 @@ dependencies = [ "vise-exporter", ] +[[package]] +name = "zksync_vm_executor" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-trait", + "once_cell", + "tokio", + "tracing", + "vise", + "zksync_contracts", + "zksync_dal", + "zksync_multivm", + "zksync_types", +] + [[package]] name = "zksync_vm_interface" version = "0.1.0" dependencies = [ + "anyhow", "assert_matches", + "async-trait", "hex", "serde", "serde_json", @@ -9815,30 +9834,16 @@ dependencies = [ "zksync_contracts", "zksync_dal", "zksync_health_check", - "zksync_multivm", "zksync_node_genesis", "zksync_node_test_utils", "zksync_object_store", "zksync_prover_interface", "zksync_state", - "zksync_state_keeper", "zksync_storage", "zksync_test_account", "zksync_types", "zksync_utils", - "zksync_vm_utils", -] - -[[package]] -name = "zksync_vm_utils" -version = "0.1.0" -dependencies = [ - "anyhow", - "tokio", - "tracing", - "zksync_contracts", - "zksync_dal", - "zksync_types", + "zksync_vm_executor", "zksync_vm_interface", ] diff --git a/Cargo.toml b/Cargo.toml index 6faea57fa1a0..334c85870f27 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -70,7 +70,7 @@ members = [ "core/lib/vlog", "core/lib/multivm", "core/lib/vm_interface", - "core/lib/vm_utils", + "core/lib/vm_executor", "core/lib/web3_decl", "core/lib/snapshots_applier", "core/lib/crypto_primitives", @@ -236,7 +236,7 @@ zksync_multivm = { version = "0.1.0", path = "core/lib/multivm" } zksync_prover_dal = { version = "0.1.0", path = "prover/crates/lib/prover_dal" } zksync_vlog = { version = "0.1.0", path = "core/lib/vlog" } zksync_vm_interface = { version = "0.1.0", path = "core/lib/vm_interface" } -zksync_vm_utils = { version = "0.1.0", path = "core/lib/vm_utils" } +zksync_vm_executor = { version = "0.1.0", path = "core/lib/vm_executor" } zksync_basic_types = { version = "0.1.0", path = "core/lib/basic_types" } zksync_circuit_breaker = { version = "0.1.0", path = "core/lib/circuit_breaker" } zksync_config = { version = "0.1.0", path = "core/lib/config" } diff --git a/core/lib/vm_utils/Cargo.toml b/core/lib/vm_executor/Cargo.toml similarity index 67% rename from core/lib/vm_utils/Cargo.toml rename to core/lib/vm_executor/Cargo.toml index cb12e7c8f673..9471e263bf43 100644 --- a/core/lib/vm_utils/Cargo.toml +++ b/core/lib/vm_executor/Cargo.toml @@ -1,6 +1,6 @@ [package] -name = "zksync_vm_utils" -description = "ZKsync VM utilities" +name = "zksync_vm_executor" +description = "Implementations of ZKsync VM executors" version.workspace = true edition.workspace = true authors.workspace = true @@ -14,8 +14,11 @@ categories.workspace = true zksync_contracts.workspace = true zksync_dal.workspace = true zksync_types.workspace = true -zksync_vm_interface.workspace = true +zksync_multivm.workspace = true +async-trait.workspace = true +once_cell.workspace = true tokio.workspace = true anyhow.workspace = true tracing.workspace = true +vise.workspace = true diff --git a/core/node/state_keeper/src/batch_executor/mod.rs b/core/lib/vm_executor/src/batch/executor.rs similarity index 53% rename from core/node/state_keeper/src/batch_executor/mod.rs rename to core/lib/vm_executor/src/batch/executor.rs index 235a8f581c82..6dc9354fd7db 100644 --- a/core/node/state_keeper/src/batch_executor/mod.rs +++ b/core/lib/vm_executor/src/batch/executor.rs @@ -1,82 +1,32 @@ -use std::{error::Error as StdError, fmt, sync::Arc}; +use std::{error::Error as StdError, sync::Arc}; use anyhow::Context as _; +use async_trait::async_trait; use tokio::{ sync::{mpsc, oneshot}, task::JoinHandle, }; use zksync_multivm::interface::{ - storage::StorageViewCache, Call, CompressedBytecodeInfo, FinishedL1Batch, Halt, L1BatchEnv, - L2BlockEnv, SystemEnv, VmExecutionResultAndLogs, + executor::BatchExecutor, + storage::{ReadStorage, StorageView}, + BatchTransactionExecutionResult, FinishedL1Batch, L2BlockEnv, }; -use zksync_state::OwnedStorage; use zksync_types::Transaction; -use crate::{ - metrics::{ExecutorCommand, EXECUTOR_METRICS}, - types::ExecutionMetricsForCriteria, -}; - -pub mod main_executor; -#[cfg(test)] -mod tests; - -/// Representation of a transaction executed in the virtual machine. -#[derive(Debug, Clone)] -pub enum TxExecutionResult { - /// Successful execution of the tx and the block tip dry run. - Success { - tx_result: Box, - tx_metrics: Box, - compressed_bytecodes: Vec, - call_tracer_result: Vec, - gas_remaining: u32, - }, - /// The VM rejected the tx for some reason. - RejectedByVm { reason: Halt }, - /// Bootloader gas limit is not enough to execute the tx. - BootloaderOutOfGasForTx, -} - -impl TxExecutionResult { - /// Returns a revert reason if either transaction was rejected or bootloader ran out of gas. - pub(super) fn err(&self) -> Option<&Halt> { - match self { - Self::Success { .. } => None, - Self::RejectedByVm { - reason: rejection_reason, - } => Some(rejection_reason), - Self::BootloaderOutOfGasForTx => Some(&Halt::BootloaderOutOfGas), - } - } -} - -/// An abstraction that allows us to create different kinds of batch executors. -/// The only requirement is to return a [`BatchExecutorHandle`], which does its work -/// by communicating with the externally initialized thread. -/// -/// This type is generic over the storage type accepted to create the VM instance, mostly for testing purposes. -pub trait BatchExecutor: 'static + Send + Sync + fmt::Debug { - fn init_batch( - &mut self, - storage: S, - l1_batch_params: L1BatchEnv, - system_env: SystemEnv, - ) -> BatchExecutorHandle; -} +use super::metrics::{ExecutorCommand, EXECUTOR_METRICS}; #[derive(Debug)] -enum HandleOrError { - Handle(JoinHandle>), +enum HandleOrError { + Handle(JoinHandle>>), Err(Arc), } -impl HandleOrError { +impl HandleOrError { async fn wait_for_error(&mut self) -> anyhow::Error { let err_arc = match self { Self::Handle(handle) => { let err = match handle.await { - Ok(Ok(())) => anyhow::anyhow!("batch executor unexpectedly stopped"), + Ok(Ok(_)) => anyhow::anyhow!("batch executor unexpectedly stopped"), Ok(Err(err)) => err, Err(err) => anyhow::Error::new(err).context("batch executor panicked"), }; @@ -90,7 +40,7 @@ impl HandleOrError { anyhow::Error::new(err_arc) } - async fn wait(self) -> anyhow::Result<()> { + async fn wait(self) -> anyhow::Result> { match self { Self::Handle(handle) => handle.await.context("batch executor panicked")?, Self::Err(err_arc) => Err(anyhow::Error::new(err_arc)), @@ -98,21 +48,16 @@ impl HandleOrError { } } -/// A public interface for interaction with the `BatchExecutor`. -/// `BatchExecutorHandle` is stored in the state keeper and is used to invoke or rollback transactions, and also seal -/// the batches. +/// "Main" [`BatchExecutor`] implementation instantiating a VM in a blocking Tokio thread. #[derive(Debug)] -pub struct BatchExecutorHandle { - handle: HandleOrError, +pub struct MainBatchExecutor { + handle: HandleOrError, commands: mpsc::Sender, } -impl BatchExecutorHandle { - /// Creates a batch executor handle from the provided sender and thread join handle. - /// Can be used to inject an alternative batch executor implementation. - #[doc(hidden)] - pub(super) fn from_raw( - handle: JoinHandle>, +impl MainBatchExecutor { + pub(super) fn new( + handle: JoinHandle>>, commands: mpsc::Sender, ) -> Self { Self { @@ -120,9 +65,18 @@ impl BatchExecutorHandle { commands, } } +} +#[async_trait] +impl BatchExecutor for MainBatchExecutor +where + S: ReadStorage + Send + 'static, +{ #[tracing::instrument(skip_all)] - pub async fn execute_tx(&mut self, tx: Transaction) -> anyhow::Result { + async fn execute_tx( + &mut self, + tx: Transaction, + ) -> anyhow::Result { let tx_gas_limit = tx.gas_limit().as_u64(); let (response_sender, response_receiver) = oneshot::channel(); @@ -144,9 +98,9 @@ impl BatchExecutorHandle { }; let elapsed = latency.observe(); - if let TxExecutionResult::Success { tx_metrics, .. } = &res { - let gas_per_nanosecond = tx_metrics.execution_metrics.computational_gas_used as f64 - / elapsed.as_nanos() as f64; + if !res.tx_result.result.is_failed() { + let gas_per_nanosecond = + res.tx_result.statistics.computational_gas_used as f64 / elapsed.as_nanos() as f64; EXECUTOR_METRICS .computational_gas_per_nanosecond .observe(gas_per_nanosecond); @@ -162,13 +116,13 @@ impl BatchExecutorHandle { } #[tracing::instrument(skip_all)] - pub async fn start_next_l2_block(&mut self, env: L2BlockEnv) -> anyhow::Result<()> { + async fn rollback_last_tx(&mut self) -> anyhow::Result<()> { // While we don't get anything from the channel, it's useful to have it as a confirmation that the operation // indeed has been processed. let (response_sender, response_receiver) = oneshot::channel(); let send_failed = self .commands - .send(Command::StartNextL2Block(env, response_sender)) + .send(Command::RollbackLastTx(response_sender)) .await .is_err(); if send_failed { @@ -176,7 +130,7 @@ impl BatchExecutorHandle { } let latency = EXECUTOR_METRICS.batch_executor_command_response_time - [&ExecutorCommand::StartNextL2Block] + [&ExecutorCommand::RollbackLastTx] .start(); if response_receiver.await.is_err() { return Err(self.handle.wait_for_error().await); @@ -186,13 +140,13 @@ impl BatchExecutorHandle { } #[tracing::instrument(skip_all)] - pub async fn rollback_last_tx(&mut self) -> anyhow::Result<()> { + async fn start_next_l2_block(&mut self, env: L2BlockEnv) -> anyhow::Result<()> { // While we don't get anything from the channel, it's useful to have it as a confirmation that the operation // indeed has been processed. let (response_sender, response_receiver) = oneshot::channel(); let send_failed = self .commands - .send(Command::RollbackLastTx(response_sender)) + .send(Command::StartNextL2Block(env, response_sender)) .await .is_err(); if send_failed { @@ -200,7 +154,7 @@ impl BatchExecutorHandle { } let latency = EXECUTOR_METRICS.batch_executor_command_response_time - [&ExecutorCommand::RollbackLastTx] + [&ExecutorCommand::StartNextL2Block] .start(); if response_receiver.await.is_err() { return Err(self.handle.wait_for_error().await); @@ -210,7 +164,9 @@ impl BatchExecutorHandle { } #[tracing::instrument(skip_all)] - pub async fn finish_batch(mut self) -> anyhow::Result { + async fn finish_batch( + mut self: Box, + ) -> anyhow::Result<(FinishedL1Batch, StorageView)> { let (response_sender, response_receiver) = oneshot::channel(); let send_failed = self .commands @@ -228,44 +184,19 @@ impl BatchExecutorHandle { Ok(batch) => batch, Err(_) => return Err(self.handle.wait_for_error().await), }; - self.handle.wait().await?; - latency.observe(); - Ok(finished_batch) - } - - pub async fn finish_batch_with_cache( - mut self, - ) -> anyhow::Result<(FinishedL1Batch, StorageViewCache)> { - let (response_sender, response_receiver) = oneshot::channel(); - let send_failed = self - .commands - .send(Command::FinishBatchWithCache(response_sender)) - .await - .is_err(); - if send_failed { - return Err(self.handle.wait_for_error().await); - } - - let latency = EXECUTOR_METRICS.batch_executor_command_response_time - [&ExecutorCommand::FinishBatchWithCache] - .start(); - let batch_with_cache = match response_receiver.await { - Ok(batch_with_cache) => batch_with_cache, - Err(_) => return Err(self.handle.wait_for_error().await), - }; - - self.handle.wait().await?; - latency.observe(); - Ok(batch_with_cache) + let storage_view = self.handle.wait().await?; + Ok((finished_batch, storage_view)) } } #[derive(Debug)] pub(super) enum Command { - ExecuteTx(Box, oneshot::Sender), + ExecuteTx( + Box, + oneshot::Sender, + ), StartNextL2Block(L2BlockEnv, oneshot::Sender<()>), RollbackLastTx(oneshot::Sender<()>), FinishBatch(oneshot::Sender), - FinishBatchWithCache(oneshot::Sender<(FinishedL1Batch, StorageViewCache)>), } diff --git a/core/node/state_keeper/src/batch_executor/main_executor.rs b/core/lib/vm_executor/src/batch/factory.rs similarity index 68% rename from core/node/state_keeper/src/batch_executor/main_executor.rs rename to core/lib/vm_executor/src/batch/factory.rs index 7d1bf5f47b17..17b125b0c41a 100644 --- a/core/node/state_keeper/src/batch_executor/main_executor.rs +++ b/core/lib/vm_executor/src/batch/factory.rs @@ -1,31 +1,31 @@ -use std::sync::Arc; +use std::{marker::PhantomData, rc::Rc, sync::Arc}; use anyhow::Context as _; use once_cell::sync::OnceCell; use tokio::sync::mpsc; use zksync_multivm::{ interface::{ + executor::{BatchExecutor, BatchExecutorFactory}, storage::{ReadStorage, StorageView}, - Call, CompressedBytecodeInfo, ExecutionResult, FinishedL1Batch, Halt, L1BatchEnv, - L2BlockEnv, SystemEnv, VmExecutionResultAndLogs, VmInterface, VmInterfaceHistoryEnabled, + BatchTransactionExecutionResult, ExecutionResult, FinishedL1Batch, Halt, L1BatchEnv, + L2BlockEnv, SystemEnv, VmInterface, VmInterfaceHistoryEnabled, }, tracers::CallTracer, vm_latest::HistoryEnabled, MultiVMTracer, VmInstance, }; -use zksync_shared_metrics::{InteractionType, TxStage, APP_METRICS}; use zksync_types::{vm::FastVmMode, Transaction}; -use super::{BatchExecutor, BatchExecutorHandle, Command, TxExecutionResult}; -use crate::{ - metrics::{TxExecutionStage, BATCH_TIP_METRICS, EXECUTOR_METRICS, KEEPER_METRICS}, - types::ExecutionMetricsForCriteria, +use super::{ + executor::{Command, MainBatchExecutor}, + metrics::{TxExecutionStage, BATCH_TIP_METRICS, KEEPER_METRICS}, }; +use crate::batch::metrics::{InteractionType, EXECUTOR_METRICS}; -/// The default implementation of [`BatchExecutor`]. -/// Creates a "real" batch executor which maintains the VM (as opposed to the test builder which doesn't use the VM). +/// The default implementation of [`BatchExecutorFactory`]. +/// Creates real batch executors which maintain the VM (as opposed to the test factories which don't use the VM). #[derive(Debug, Clone)] -pub struct MainBatchExecutor { +pub struct MainBatchExecutorFactory { save_call_traces: bool, /// Whether batch executor would allow transactions with bytecode that cannot be compressed. /// For new blocks, bytecode compression is mandatory -- if bytecode compression is not supported, @@ -37,7 +37,7 @@ pub struct MainBatchExecutor { fast_vm_mode: FastVmMode, } -impl MainBatchExecutor { +impl MainBatchExecutorFactory { pub fn new(save_call_traces: bool, optional_bytecode_compression: bool) -> Self { Self { save_call_traces, @@ -56,13 +56,13 @@ impl MainBatchExecutor { } } -impl BatchExecutor for MainBatchExecutor { +impl BatchExecutorFactory for MainBatchExecutorFactory { fn init_batch( &mut self, storage: S, l1_batch_params: L1BatchEnv, system_env: SystemEnv, - ) -> BatchExecutorHandle { + ) -> Box> { // Since we process `BatchExecutor` commands one-by-one (the next command is never enqueued // until a previous command is processed), capacity 1 is enough for the commands channel. let (commands_sender, commands_receiver) = mpsc::channel(1); @@ -71,21 +71,15 @@ impl BatchExecutor for MainBatchExecutor { optional_bytecode_compression: self.optional_bytecode_compression, fast_vm_mode: self.fast_vm_mode, commands: commands_receiver, + _storage: PhantomData, }; let handle = tokio::task::spawn_blocking(move || executor.run(storage, l1_batch_params, system_env)); - BatchExecutorHandle::from_raw(handle, commands_sender) + Box::new(MainBatchExecutor::new(handle, commands_sender)) } } -#[derive(Debug)] -struct TransactionOutput { - tx_result: VmExecutionResultAndLogs, - compressed_bytecodes: Vec, - calls: Vec, -} - /// Implementation of the "primary" (non-test) batch executor. /// Upon launch, it initializes the VM object with provided block context and properties, and keeps invoking the commands /// sent to it one by one until the batch is finished. @@ -93,20 +87,21 @@ struct TransactionOutput { /// One `CommandReceiver` can execute exactly one batch, so once the batch is sealed, a new `CommandReceiver` object must /// be constructed. #[derive(Debug)] -struct CommandReceiver { +struct CommandReceiver { save_call_traces: bool, optional_bytecode_compression: bool, fast_vm_mode: FastVmMode, commands: mpsc::Receiver, + _storage: PhantomData, } -impl CommandReceiver { - pub(super) fn run( +impl CommandReceiver { + pub(super) fn run( mut self, storage: S, l1_batch_params: L1BatchEnv, system_env: SystemEnv, - ) -> anyhow::Result<()> { + ) -> anyhow::Result> { tracing::info!("Starting executing L1 batch #{}", &l1_batch_params.number); let storage_view = StorageView::new(storage).to_rc_ptr(); @@ -116,13 +111,15 @@ impl CommandReceiver { storage_view.clone(), self.fast_vm_mode, ); + let mut batch_finished = false; while let Some(cmd) = self.commands.blocking_recv() { match cmd { Command::ExecuteTx(tx, resp) => { - let result = self - .execute_tx(&tx, &mut vm) - .with_context(|| format!("fatal error executing transaction {tx:?}"))?; + let tx_hash = tx.hash(); + let result = self.execute_tx(*tx, &mut vm).with_context(|| { + format!("fatal error executing transaction {tx_hash:?}") + })?; if resp.send(result).is_err() { break; } @@ -144,36 +141,34 @@ impl CommandReceiver { if resp.send(vm_block_result).is_err() { break; } - - // `storage_view` cannot be accessed while borrowed by the VM, - // so this is the only point at which storage metrics can be obtained - let metrics = storage_view.as_ref().borrow_mut().metrics(); - EXECUTOR_METRICS.batch_storage_interaction_duration[&InteractionType::GetValue] - .observe(metrics.time_spent_on_get_value); - EXECUTOR_METRICS.batch_storage_interaction_duration[&InteractionType::SetValue] - .observe(metrics.time_spent_on_set_value); - return Ok(()); - } - Command::FinishBatchWithCache(resp) => { - let vm_block_result = self.finish_batch(&mut vm)?; - let cache = (*storage_view).borrow().cache(); - if resp.send((vm_block_result, cache)).is_err() { - break; - } - return Ok(()); + batch_finished = true; + break; } } } - // State keeper can exit because of stop signal, so it's OK to exit mid-batch. - tracing::info!("State keeper exited with an unfinished L1 batch"); - Ok(()) + + drop(vm); + let storage_view = Rc::into_inner(storage_view) + .context("storage view leaked")? + .into_inner(); + if batch_finished { + let metrics = storage_view.metrics(); + EXECUTOR_METRICS.batch_storage_interaction_duration[&InteractionType::GetValue] + .observe(metrics.time_spent_on_get_value); + EXECUTOR_METRICS.batch_storage_interaction_duration[&InteractionType::SetValue] + .observe(metrics.time_spent_on_set_value); + } else { + // State keeper can exit because of stop signal, so it's OK to exit mid-batch. + tracing::info!("State keeper exited with an unfinished L1 batch"); + } + Ok(storage_view) } - fn execute_tx( + fn execute_tx( &self, - tx: &Transaction, + transaction: Transaction, vm: &mut VmInstance, - ) -> anyhow::Result { + ) -> anyhow::Result { // Executing a next transaction means that a previous transaction was either rolled back (in which case its snapshot // was already removed), or that we build on top of it (in which case, it can be removed now). vm.pop_snapshot_no_rollback(); @@ -182,47 +177,23 @@ impl CommandReceiver { // Execute the transaction. let latency = KEEPER_METRICS.tx_execution_time[&TxExecutionStage::Execution].start(); - let output = if self.optional_bytecode_compression { - self.execute_tx_in_vm_with_optional_compression(tx, vm)? + let result = if self.optional_bytecode_compression { + self.execute_tx_in_vm_with_optional_compression(&transaction, vm)? } else { - self.execute_tx_in_vm(tx, vm)? + self.execute_tx_in_vm(&transaction, vm)? }; latency.observe(); - APP_METRICS.processed_txs[&TxStage::StateKeeper].inc(); - APP_METRICS.processed_l1_txs[&TxStage::StateKeeper].inc_by(tx.is_l1().into()); - let TransactionOutput { - tx_result, - compressed_bytecodes, - calls, - } = output; - - if let ExecutionResult::Halt { reason } = tx_result.result { - return Ok(match reason { - Halt::BootloaderOutOfGas => TxExecutionResult::BootloaderOutOfGasForTx, - _ => TxExecutionResult::RejectedByVm { reason }, - }); - } - - let tx_metrics = ExecutionMetricsForCriteria::new(Some(tx), &tx_result); - let gas_remaining = tx_result.statistics.gas_remaining; - - Ok(TxExecutionResult::Success { - tx_result: Box::new(tx_result), - tx_metrics: Box::new(tx_metrics), - compressed_bytecodes, - call_tracer_result: calls, - gas_remaining, - }) + Ok(result) } - fn rollback_last_tx(&self, vm: &mut VmInstance) { + fn rollback_last_tx(&self, vm: &mut VmInstance) { let latency = KEEPER_METRICS.tx_execution_time[&TxExecutionStage::TxRollback].start(); vm.rollback_to_the_latest_snapshot(); latency.observe(); } - fn start_next_l2_block( + fn start_next_l2_block( &self, l2_block_env: L2BlockEnv, vm: &mut VmInstance, @@ -230,7 +201,7 @@ impl CommandReceiver { vm.start_new_l2_block(l2_block_env); } - fn finish_batch( + fn finish_batch( &self, vm: &mut VmInstance, ) -> anyhow::Result { @@ -249,11 +220,11 @@ impl CommandReceiver { /// Attempts to execute transaction with or without bytecode compression. /// If compression fails, the transaction will be re-executed without compression. - fn execute_tx_in_vm_with_optional_compression( + fn execute_tx_in_vm_with_optional_compression( &self, tx: &Transaction, vm: &mut VmInstance, - ) -> anyhow::Result { + ) -> anyhow::Result { // Note, that the space where we can put the calldata for compressing transactions // is limited and the transactions do not pay for taking it. // In order to not let the accounts spam the space of compressed bytecodes with bytecodes @@ -273,14 +244,14 @@ impl CommandReceiver { if let (Ok(compressed_bytecodes), tx_result) = vm.inspect_transaction_with_bytecode_compression(tracer.into(), tx.clone(), true) { - let calls = Arc::try_unwrap(call_tracer_result) + let call_traces = Arc::try_unwrap(call_tracer_result) .map_err(|_| anyhow::anyhow!("failed extracting call traces"))? .take() .unwrap_or_default(); - return Ok(TransactionOutput { - tx_result, + return Ok(BatchTransactionExecutionResult { + tx_result: Box::new(tx_result), compressed_bytecodes, - calls, + call_traces, }); } @@ -303,24 +274,24 @@ impl CommandReceiver { // TODO implement tracer manager which will be responsible // for collecting result from all tracers and save it to the database - let calls = Arc::try_unwrap(call_tracer_result) + let call_traces = Arc::try_unwrap(call_tracer_result) .map_err(|_| anyhow::anyhow!("failed extracting call traces"))? .take() .unwrap_or_default(); - Ok(TransactionOutput { - tx_result, + Ok(BatchTransactionExecutionResult { + tx_result: Box::new(tx_result), compressed_bytecodes, - calls, + call_traces, }) } /// Attempts to execute transaction with mandatory bytecode compression. /// If bytecode compression fails, the transaction will be rejected. - fn execute_tx_in_vm( + fn execute_tx_in_vm( &self, tx: &Transaction, vm: &mut VmInstance, - ) -> anyhow::Result { + ) -> anyhow::Result { let call_tracer_result = Arc::new(OnceCell::default()); let tracer = if self.save_call_traces { vec![CallTracer::new(call_tracer_result.clone()).into_tracer_pointer()] @@ -331,24 +302,24 @@ impl CommandReceiver { let (bytecodes_result, mut tx_result) = vm.inspect_transaction_with_bytecode_compression(tracer.into(), tx.clone(), true); if let Ok(compressed_bytecodes) = bytecodes_result { - let calls = Arc::try_unwrap(call_tracer_result) + let call_traces = Arc::try_unwrap(call_tracer_result) .map_err(|_| anyhow::anyhow!("failed extracting call traces"))? .take() .unwrap_or_default(); - Ok(TransactionOutput { - tx_result, + Ok(BatchTransactionExecutionResult { + tx_result: Box::new(tx_result), compressed_bytecodes, - calls, + call_traces, }) } else { // Transaction failed to publish bytecodes, we reject it so initiator doesn't pay fee. tx_result.result = ExecutionResult::Halt { reason: Halt::FailedToPublishCompressedBytecodes, }; - Ok(TransactionOutput { - tx_result, + Ok(BatchTransactionExecutionResult { + tx_result: Box::new(tx_result), compressed_bytecodes: vec![], - calls: vec![], + call_traces: vec![], }) } } diff --git a/core/lib/vm_executor/src/batch/metrics.rs b/core/lib/vm_executor/src/batch/metrics.rs new file mode 100644 index 000000000000..170ed4717989 --- /dev/null +++ b/core/lib/vm_executor/src/batch/metrics.rs @@ -0,0 +1,95 @@ +//! Main batch executor metrics. + +use std::time::Duration; + +use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Histogram, Metrics}; +use zksync_multivm::interface::VmExecutionResultAndLogs; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] +#[metrics(label = "command", rename_all = "snake_case")] +pub(super) enum ExecutorCommand { + ExecuteTx, + #[metrics(name = "start_next_miniblock")] + StartNextL2Block, + RollbackLastTx, + FinishBatch, +} + +const GAS_PER_NANOSECOND_BUCKETS: Buckets = Buckets::values(&[ + 0.01, 0.03, 0.1, 0.3, 0.5, 0.75, 1., 1.5, 3., 5., 10., 20., 50., +]); + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] +#[metrics(label = "stage", rename_all = "snake_case")] +pub(super) enum TxExecutionStage { + Execution, + TxRollback, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] +#[metrics(label = "interaction", rename_all = "snake_case")] +pub(super) enum InteractionType { + GetValue, + SetValue, +} + +/// Executor-related metrics. +#[derive(Debug, Metrics)] +#[metrics(prefix = "state_keeper")] +pub(super) struct ExecutorMetrics { + /// Latency to process a single command sent to the batch executor. + #[metrics(buckets = Buckets::LATENCIES)] + pub batch_executor_command_response_time: Family>, + #[metrics(buckets = GAS_PER_NANOSECOND_BUCKETS)] + pub computational_gas_per_nanosecond: Histogram, + #[metrics(buckets = GAS_PER_NANOSECOND_BUCKETS)] + pub failed_tx_gas_limit_per_nanosecond: Histogram, + /// Cumulative latency of interacting with the storage when executing a transaction + /// in the batch executor. + #[metrics(buckets = Buckets::LATENCIES)] + pub batch_storage_interaction_duration: Family>, +} + +#[vise::register] +pub(super) static EXECUTOR_METRICS: vise::Global = vise::Global::new(); + +/// Some more executor-related metrics with differing prefix. +#[derive(Debug, Metrics)] +#[metrics(prefix = "server_state_keeper")] +pub(super) struct StateKeeperMetrics { + /// Time spent by the state keeper on transaction execution. + #[metrics(buckets = Buckets::LATENCIES)] + pub tx_execution_time: Family>, +} + +#[vise::register] +pub(super) static KEEPER_METRICS: vise::Global = vise::Global::new(); + +#[derive(Debug, Metrics)] +#[metrics(prefix = "batch_tip")] +pub(super) struct BatchTipMetrics { + #[metrics(buckets = Buckets::exponential(60000.0..=80000000.0, 2.0))] + gas_used: Histogram, + #[metrics(buckets = Buckets::exponential(1.0..=60000.0, 2.0))] + pubdata_published: Histogram, + #[metrics(buckets = Buckets::exponential(1.0..=4096.0, 2.0))] + circuit_statistic: Histogram, + #[metrics(buckets = Buckets::exponential(1.0..=4096.0, 2.0))] + execution_metrics_size: Histogram, +} + +impl BatchTipMetrics { + pub fn observe(&self, execution_result: &VmExecutionResultAndLogs) { + self.gas_used + .observe(execution_result.statistics.gas_used as usize); + self.pubdata_published + .observe(execution_result.statistics.pubdata_published as usize); + self.circuit_statistic + .observe(execution_result.statistics.circuit_statistic.total()); + self.execution_metrics_size + .observe(execution_result.get_execution_metrics(None).size()); + } +} + +#[vise::register] +pub(super) static BATCH_TIP_METRICS: vise::Global = vise::Global::new(); diff --git a/core/lib/vm_executor/src/batch/mod.rs b/core/lib/vm_executor/src/batch/mod.rs new file mode 100644 index 000000000000..2407d2daba2c --- /dev/null +++ b/core/lib/vm_executor/src/batch/mod.rs @@ -0,0 +1,9 @@ +//! Main implementation of ZKsync VM [batch executor](crate::interface::BatchExecutor). +//! +//! This implementation is used by various ZKsync components, like the state keeper and components based on the VM runner. + +pub use self::{executor::MainBatchExecutor, factory::MainBatchExecutorFactory}; + +mod executor; +mod factory; +mod metrics; diff --git a/core/lib/vm_executor/src/lib.rs b/core/lib/vm_executor/src/lib.rs new file mode 100644 index 000000000000..24fb3d8f7eee --- /dev/null +++ b/core/lib/vm_executor/src/lib.rs @@ -0,0 +1,9 @@ +//! Implementations of ZKsync VM executors and executor-related utils. +//! +//! The included implementations are separated from the respective interfaces since they depend +//! on [VM implementations](zksync_multivm), are aware of ZKsync node storage etc. + +pub use zksync_multivm::interface::executor as interface; + +pub mod batch; +pub mod storage; diff --git a/core/lib/vm_utils/src/storage.rs b/core/lib/vm_executor/src/storage.rs similarity index 98% rename from core/lib/vm_utils/src/storage.rs rename to core/lib/vm_executor/src/storage.rs index 1e43543bc5aa..e39748786a30 100644 --- a/core/lib/vm_utils/src/storage.rs +++ b/core/lib/vm_executor/src/storage.rs @@ -1,13 +1,15 @@ +//! Utils to get data for L1 batch execution from storage. + use std::time::{Duration, Instant}; use anyhow::Context; use zksync_contracts::BaseSystemContracts; use zksync_dal::{Connection, Core, CoreDal, DalError}; +use zksync_multivm::interface::{L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode}; use zksync_types::{ block::L2BlockHeader, fee_model::BatchFeeInput, snapshots::SnapshotRecoveryStatus, Address, L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersionId, H256, ZKPORTER_IS_AVAILABLE, }; -use zksync_vm_interface::{L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode}; const BATCH_COMPUTATIONAL_GAS_LIMIT: u32 = u32::MAX; diff --git a/core/lib/vm_interface/Cargo.toml b/core/lib/vm_interface/Cargo.toml index 8fc7883f1df7..694576dca3b0 100644 --- a/core/lib/vm_interface/Cargo.toml +++ b/core/lib/vm_interface/Cargo.toml @@ -15,6 +15,8 @@ zksync_contracts.workspace = true zksync_system_constants.workspace = true zksync_types.workspace = true +anyhow.workspace = true +async-trait.workspace = true hex.workspace = true serde.workspace = true thiserror.workspace = true diff --git a/core/lib/vm_interface/src/executor.rs b/core/lib/vm_interface/src/executor.rs new file mode 100644 index 000000000000..ee6665abfcb1 --- /dev/null +++ b/core/lib/vm_interface/src/executor.rs @@ -0,0 +1,44 @@ +//! High-level executor traits. + +use std::fmt; + +use async_trait::async_trait; +use zksync_types::Transaction; + +use crate::{ + storage::StorageView, BatchTransactionExecutionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, + SystemEnv, +}; + +/// Factory of [`BatchExecutor`]s. +pub trait BatchExecutorFactory: 'static + Send + fmt::Debug { + /// Initializes an executor for a batch with the specified params and using the provided storage. + fn init_batch( + &mut self, + storage: S, + l1_batch_params: L1BatchEnv, + system_env: SystemEnv, + ) -> Box>; +} + +/// Handle for executing a single L1 batch. +/// +/// The handle is parametric by the transaction execution output in order to be able to represent different +/// levels of abstraction. +#[async_trait] +pub trait BatchExecutor: 'static + Send + fmt::Debug { + /// Executes a transaction. + async fn execute_tx( + &mut self, + tx: Transaction, + ) -> anyhow::Result; + + /// Rolls back the last executed transaction. + async fn rollback_last_tx(&mut self) -> anyhow::Result<()>; + + /// Starts a next L2 block with the specified params. + async fn start_next_l2_block(&mut self, env: L2BlockEnv) -> anyhow::Result<()>; + + /// Finished the current L1 batch. + async fn finish_batch(self: Box) -> anyhow::Result<(FinishedL1Batch, StorageView)>; +} diff --git a/core/lib/vm_interface/src/lib.rs b/core/lib/vm_interface/src/lib.rs index dba93a49ec86..315eb2bb36a7 100644 --- a/core/lib/vm_interface/src/lib.rs +++ b/core/lib/vm_interface/src/lib.rs @@ -28,17 +28,18 @@ pub use crate::{ VmExecutionMode, }, outputs::{ - BootloaderMemory, Call, CallType, CircuitStatistic, CompressedBytecodeInfo, - CurrentExecutionState, DeduplicatedWritesMetrics, ExecutionResult, FinishedL1Batch, - L2Block, Refunds, TransactionExecutionMetrics, TransactionExecutionResult, - TxExecutionStatus, VmEvent, VmExecutionLogs, VmExecutionMetrics, - VmExecutionResultAndLogs, VmExecutionStatistics, VmMemoryMetrics, + BatchTransactionExecutionResult, BootloaderMemory, Call, CallType, CircuitStatistic, + CompressedBytecodeInfo, CurrentExecutionState, DeduplicatedWritesMetrics, + ExecutionResult, FinishedL1Batch, L2Block, Refunds, TransactionExecutionMetrics, + TransactionExecutionResult, TxExecutionStatus, VmEvent, VmExecutionLogs, + VmExecutionMetrics, VmExecutionResultAndLogs, VmExecutionStatistics, VmMemoryMetrics, }, tracer, }, vm::{VmFactory, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled}, }; +pub mod executor; pub mod storage; mod types; mod vm; diff --git a/core/lib/vm_interface/src/storage/view.rs b/core/lib/vm_interface/src/storage/view.rs index 691a9d442ca8..101f5c82f497 100644 --- a/core/lib/vm_interface/src/storage/view.rs +++ b/core/lib/vm_interface/src/storage/view.rs @@ -102,7 +102,7 @@ where } } -impl StorageView { +impl StorageView { /// Creates a new storage view based on the underlying storage. pub fn new(storage_handle: S) -> Self { Self { diff --git a/core/lib/vm_interface/src/types/outputs/execution_result.rs b/core/lib/vm_interface/src/types/outputs/execution_result.rs index 37e122c6d9d9..d74d74652e28 100644 --- a/core/lib/vm_interface/src/types/outputs/execution_result.rs +++ b/core/lib/vm_interface/src/types/outputs/execution_result.rs @@ -297,6 +297,21 @@ impl Call { } } +/// Mid-level transaction execution output returned by a batch executor. +#[derive(Debug, Clone)] +pub struct BatchTransactionExecutionResult { + pub tx_result: Box, + pub compressed_bytecodes: Vec, + pub call_traces: Vec, +} + +impl BatchTransactionExecutionResult { + pub fn was_halted(&self) -> bool { + matches!(self.tx_result.result, ExecutionResult::Halt { .. }) + } +} + +/// High-level transaction execution result used by the API server sandbox etc. #[derive(Debug, Clone, PartialEq)] pub struct TransactionExecutionResult { pub transaction: Transaction, diff --git a/core/lib/vm_interface/src/types/outputs/finished_l1batch.rs b/core/lib/vm_interface/src/types/outputs/finished_l1batch.rs index 9c0afc6659f0..27241c2c0fae 100644 --- a/core/lib/vm_interface/src/types/outputs/finished_l1batch.rs +++ b/core/lib/vm_interface/src/types/outputs/finished_l1batch.rs @@ -1,6 +1,7 @@ use zksync_types::writes::StateDiffRecord; use super::{BootloaderMemory, CurrentExecutionState, VmExecutionResultAndLogs}; +use crate::{ExecutionResult, Refunds, VmExecutionLogs, VmExecutionStatistics}; /// State of the VM after the batch execution. #[derive(Debug, Clone)] @@ -16,3 +17,28 @@ pub struct FinishedL1Batch { /// List of state diffs. Could be none for old versions of the VM. pub state_diffs: Option>, } + +impl FinishedL1Batch { + pub fn mock() -> Self { + FinishedL1Batch { + block_tip_execution_result: VmExecutionResultAndLogs { + result: ExecutionResult::Success { output: vec![] }, + logs: VmExecutionLogs::default(), + statistics: VmExecutionStatistics::default(), + refunds: Refunds::default(), + }, + final_execution_state: CurrentExecutionState { + events: vec![], + deduplicated_storage_logs: vec![], + used_contract_hashes: vec![], + user_l2_to_l1_logs: vec![], + system_logs: vec![], + storage_refunds: Vec::new(), + pubdata_costs: Vec::new(), + }, + final_bootloader_memory: Some(vec![]), + pubdata_input: Some(vec![]), + state_diffs: Some(vec![]), + } + } +} diff --git a/core/lib/vm_interface/src/types/outputs/mod.rs b/core/lib/vm_interface/src/types/outputs/mod.rs index d24e1440f836..abefa59bbe7e 100644 --- a/core/lib/vm_interface/src/types/outputs/mod.rs +++ b/core/lib/vm_interface/src/types/outputs/mod.rs @@ -1,8 +1,9 @@ pub use self::{ bytecode::CompressedBytecodeInfo, execution_result::{ - Call, CallType, ExecutionResult, Refunds, TransactionExecutionResult, TxExecutionStatus, - VmEvent, VmExecutionLogs, VmExecutionResultAndLogs, + BatchTransactionExecutionResult, Call, CallType, ExecutionResult, Refunds, + TransactionExecutionResult, TxExecutionStatus, VmEvent, VmExecutionLogs, + VmExecutionResultAndLogs, }, execution_state::{BootloaderMemory, CurrentExecutionState}, finished_l1batch::FinishedL1Batch, diff --git a/core/lib/vm_utils/src/lib.rs b/core/lib/vm_utils/src/lib.rs deleted file mode 100644 index 30f61eb69f21..000000000000 --- a/core/lib/vm_utils/src/lib.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod storage; diff --git a/core/node/consensus/src/testonly.rs b/core/node/consensus/src/testonly.rs index 0537aaabc563..90063772da92 100644 --- a/core/node/consensus/src/testonly.rs +++ b/core/node/consensus/src/testonly.rs @@ -30,14 +30,15 @@ use zksync_node_sync::{ }; use zksync_node_test_utils::{create_l1_batch_metadata, l1_batch_metadata_to_commitment_artifacts}; use zksync_state_keeper::{ + executor::MainBatchExecutorFactory, io::{IoCursor, L1BatchParams, L2BlockParams}, seal_criteria::NoopSealer, testonly::{ fund, l1_transaction, l2_transaction, test_batch_executor::MockReadStorageFactory, MockBatchExecutor, }, - AsyncRocksdbCache, MainBatchExecutor, OutputHandler, StateKeeperPersistence, - TreeWritesPersistence, ZkSyncStateKeeper, + AsyncRocksdbCache, OutputHandler, StateKeeperPersistence, TreeWritesPersistence, + ZkSyncStateKeeper, }; use zksync_test_account::Account; use zksync_types::{ @@ -592,12 +593,13 @@ impl StateKeeperRunner { }); s.spawn_bg({ + let executor_factory = MainBatchExecutorFactory::new(false, false); let stop_recv = stop_recv.clone(); async { ZkSyncStateKeeper::new( stop_recv, Box::new(io), - Box::new(MainBatchExecutor::new(false, false)), + Box::new(executor_factory), OutputHandler::new(Box::new(persistence.with_tx_insertion())) .with_handler(Box::new(self.sync_state.clone())), Arc::new(NoopSealer), diff --git a/core/node/node_framework/Cargo.toml b/core/node/node_framework/Cargo.toml index 3a81a578c033..f9efb22bd610 100644 --- a/core/node/node_framework/Cargo.toml +++ b/core/node/node_framework/Cargo.toml @@ -38,6 +38,7 @@ zksync_eth_sender.workspace = true zksync_da_client.workspace = true zksync_da_dispatcher.workspace = true zksync_block_reverter.workspace = true +zksync_vm_executor.workspace = true zksync_state_keeper.workspace = true zksync_consistency_checker.workspace = true zksync_metadata_calculator.workspace = true diff --git a/core/node/node_framework/src/implementations/layers/state_keeper/main_batch_executor.rs b/core/node/node_framework/src/implementations/layers/state_keeper/main_batch_executor.rs index 3288b68bdebb..f369db2bbf01 100644 --- a/core/node/node_framework/src/implementations/layers/state_keeper/main_batch_executor.rs +++ b/core/node/node_framework/src/implementations/layers/state_keeper/main_batch_executor.rs @@ -1,5 +1,5 @@ -use zksync_state_keeper::MainBatchExecutor; use zksync_types::vm::FastVmMode; +use zksync_vm_executor::batch::MainBatchExecutorFactory; use crate::{ implementations::resources::state_keeper::BatchExecutorResource, @@ -39,8 +39,10 @@ impl WiringLayer for MainBatchExecutorLayer { } async fn wire(self, (): Self::Input) -> Result { - let mut executor = - MainBatchExecutor::new(self.save_call_traces, self.optional_bytecode_compression); + let mut executor = MainBatchExecutorFactory::new( + self.save_call_traces, + self.optional_bytecode_compression, + ); executor.set_fast_vm_mode(self.fast_vm_mode); Ok(executor.into()) } diff --git a/core/node/node_framework/src/implementations/layers/state_keeper/mod.rs b/core/node/node_framework/src/implementations/layers/state_keeper/mod.rs index a77344f3706e..55defd095be8 100644 --- a/core/node/node_framework/src/implementations/layers/state_keeper/mod.rs +++ b/core/node/node_framework/src/implementations/layers/state_keeper/mod.rs @@ -1,20 +1,14 @@ use std::sync::Arc; use anyhow::Context; -use zksync_state::{AsyncCatchupTask, ReadStorageFactory}; +pub use zksync_state::RocksdbStorageOptions; +use zksync_state::{AsyncCatchupTask, OwnedStorage, ReadStorageFactory}; use zksync_state_keeper::{ - seal_criteria::ConditionalSealer, AsyncRocksdbCache, BatchExecutor, OutputHandler, - StateKeeperIO, ZkSyncStateKeeper, + seal_criteria::ConditionalSealer, AsyncRocksdbCache, OutputHandler, StateKeeperIO, + ZkSyncStateKeeper, }; use zksync_storage::RocksDB; - -pub mod external_io; -pub mod main_batch_executor; -pub mod mempool_io; -pub mod output_handler; - -// Public re-export to not require the user to directly depend on `zksync_state`. -pub use zksync_state::RocksdbStorageOptions; +use zksync_vm_executor::interface::BatchExecutorFactory; use crate::{ implementations::resources::{ @@ -30,6 +24,11 @@ use crate::{ FromContext, IntoContext, }; +pub mod external_io; +pub mod main_batch_executor; +pub mod mempool_io; +pub mod output_handler; + /// Wiring layer for the state keeper. #[derive(Debug)] pub struct StateKeeperLayer { @@ -102,7 +101,7 @@ impl WiringLayer for StateKeeperLayer { let state_keeper = StateKeeperTask { io, - batch_executor: batch_executor_base, + executor_factory: batch_executor_base, output_handler, sealer, storage_factory: Arc::new(storage_factory), @@ -125,7 +124,7 @@ impl WiringLayer for StateKeeperLayer { #[derive(Debug)] pub struct StateKeeperTask { io: Box, - batch_executor: Box, + executor_factory: Box>, output_handler: OutputHandler, sealer: Arc, storage_factory: Arc, @@ -141,7 +140,7 @@ impl Task for StateKeeperTask { let state_keeper = ZkSyncStateKeeper::new( stop_receiver.0, self.io, - self.batch_executor, + self.executor_factory, self.output_handler, self.sealer, self.storage_factory, diff --git a/core/node/node_framework/src/implementations/layers/vm_runner/bwip.rs b/core/node/node_framework/src/implementations/layers/vm_runner/bwip.rs index ee2fb84416e1..858692d3c854 100644 --- a/core/node/node_framework/src/implementations/layers/vm_runner/bwip.rs +++ b/core/node/node_framework/src/implementations/layers/vm_runner/bwip.rs @@ -1,6 +1,6 @@ use zksync_config::configs::vm_runner::BasicWitnessInputProducerConfig; -use zksync_state_keeper::MainBatchExecutor; use zksync_types::L2ChainId; +use zksync_vm_executor::batch::MainBatchExecutorFactory; use zksync_vm_runner::{ impls::{BasicWitnessInputProducer, BasicWitnessInputProducerIo}, ConcurrentOutputHandlerFactoryTask, StorageSyncTask, @@ -76,12 +76,12 @@ impl WiringLayer for BasicWitnessInputProducerLayer { let connection_pool = master_pool.get_custom(self.config.window_size + 2).await?; // We don't get the executor from the context because it would contain state keeper-specific settings. - let batch_executor = Box::new(MainBatchExecutor::new(false, false)); + let batch_executor = MainBatchExecutorFactory::new(false, false); let (basic_witness_input_producer, tasks) = BasicWitnessInputProducer::new( connection_pool, object_store.0, - batch_executor, + Box::new(batch_executor), self.config.db_path, self.zksync_network_id, self.config.first_processed_batch, diff --git a/core/node/node_framework/src/implementations/resources/state_keeper.rs b/core/node/node_framework/src/implementations/resources/state_keeper.rs index 5db570d7989b..eed0e022774d 100644 --- a/core/node/node_framework/src/implementations/resources/state_keeper.rs +++ b/core/node/node_framework/src/implementations/resources/state_keeper.rs @@ -1,8 +1,8 @@ use std::sync::Arc; -use zksync_state_keeper::{ - seal_criteria::ConditionalSealer, BatchExecutor, OutputHandler, StateKeeperIO, -}; +use zksync_state::OwnedStorage; +use zksync_state_keeper::{seal_criteria::ConditionalSealer, OutputHandler, StateKeeperIO}; +use zksync_vm_executor::interface::BatchExecutorFactory; use crate::resource::{Resource, Unique}; @@ -23,10 +23,10 @@ impl From for StateKeeperIOResource { } } -/// A resource that provides [`BatchExecutor`] implementation to the service. +/// A resource that provides [`BatchExecutorFactory`] implementation to the service. /// This resource is unique, e.g. it's expected to be consumed by a single service. #[derive(Debug, Clone)] -pub struct BatchExecutorResource(pub Unique>); +pub struct BatchExecutorResource(pub Unique>>); impl Resource for BatchExecutorResource { fn name() -> String { @@ -34,7 +34,10 @@ impl Resource for BatchExecutorResource { } } -impl From for BatchExecutorResource { +impl From for BatchExecutorResource +where + T: BatchExecutorFactory, +{ fn from(executor: T) -> Self { Self(Unique::new(Box::new(executor))) } diff --git a/core/node/node_sync/Cargo.toml b/core/node/node_sync/Cargo.toml index 5f1ae04c5f50..ccfc8dd8a4e9 100644 --- a/core/node/node_sync/Cargo.toml +++ b/core/node/node_sync/Cargo.toml @@ -25,7 +25,7 @@ zksync_utils.workspace = true zksync_eth_client.workspace = true zksync_concurrency.workspace = true vise.workspace = true -zksync_vm_utils.workspace = true +zksync_vm_executor.workspace = true anyhow.workspace = true async-trait.workspace = true diff --git a/core/node/node_sync/src/external_io.rs b/core/node/node_sync/src/external_io.rs index 50734421341e..b7b8930c4957 100644 --- a/core/node/node_sync/src/external_io.rs +++ b/core/node/node_sync/src/external_io.rs @@ -20,7 +20,7 @@ use zksync_types::{ L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersionId, Transaction, H256, }; use zksync_utils::bytes_to_be_words; -use zksync_vm_utils::storage::L1BatchParamsProvider; +use zksync_vm_executor::storage::L1BatchParamsProvider; use super::{ client::MainNodeClient, diff --git a/core/node/node_sync/src/tests.rs b/core/node/node_sync/src/tests.rs index edd8306e72e0..d9a98c2bce36 100644 --- a/core/node/node_sync/src/tests.rs +++ b/core/node/node_sync/src/tests.rs @@ -121,15 +121,15 @@ impl StateKeeperHandles { .unwrap(); let (stop_sender, stop_receiver) = watch::channel(false); - let mut batch_executor_base = TestBatchExecutorBuilder::default(); + let mut batch_executor = TestBatchExecutorBuilder::default(); for &tx_hashes_in_l1_batch in tx_hashes { - batch_executor_base.push_successful_transactions(tx_hashes_in_l1_batch); + batch_executor.push_successful_transactions(tx_hashes_in_l1_batch); } let state_keeper = ZkSyncStateKeeper::new( stop_receiver, Box::new(io), - Box::new(batch_executor_base), + Box::new(batch_executor), output_handler, Arc::new(NoopSealer), Arc::new(MockReadStorageFactory), diff --git a/core/node/state_keeper/Cargo.toml b/core/node/state_keeper/Cargo.toml index 16eb657bc9b7..1810cc00de51 100644 --- a/core/node/state_keeper/Cargo.toml +++ b/core/node/state_keeper/Cargo.toml @@ -28,7 +28,7 @@ zksync_protobuf.workspace = true zksync_test_account.workspace = true zksync_node_genesis.workspace = true zksync_node_test_utils.workspace = true -zksync_vm_utils.workspace = true +zksync_vm_executor.workspace = true zksync_system_constants.workspace = true zksync_base_token_adjuster.workspace = true diff --git a/core/node/state_keeper/src/executor/mod.rs b/core/node/state_keeper/src/executor/mod.rs new file mode 100644 index 000000000000..2fa5c3b9c128 --- /dev/null +++ b/core/node/state_keeper/src/executor/mod.rs @@ -0,0 +1,60 @@ +use zksync_multivm::interface::{ + BatchTransactionExecutionResult, Call, CompressedBytecodeInfo, ExecutionResult, Halt, + VmExecutionResultAndLogs, +}; +use zksync_types::Transaction; +pub use zksync_vm_executor::batch::MainBatchExecutorFactory; + +use crate::ExecutionMetricsForCriteria; + +#[cfg(test)] +mod tests; + +/// State keeper representation of a transaction executed in the virtual machine. +/// +/// A separate type allows to be more typesafe when dealing with halted transactions. It also simplifies testing seal criteria +/// (i.e., without picking transactions that actually produce appropriate `ExecutionMetricsForCriteria`). +#[derive(Debug, Clone)] +pub enum TxExecutionResult { + /// Successful execution of the tx and the block tip dry run. + Success { + tx_result: Box, + tx_metrics: Box, + compressed_bytecodes: Vec, + call_tracer_result: Vec, + gas_remaining: u32, + }, + /// The VM rejected the tx for some reason. + RejectedByVm { reason: Halt }, + /// Bootloader gas limit is not enough to execute the tx. + BootloaderOutOfGasForTx, +} + +impl TxExecutionResult { + pub(crate) fn new(res: BatchTransactionExecutionResult, tx: &Transaction) -> Self { + match res.tx_result.result { + ExecutionResult::Halt { + reason: Halt::BootloaderOutOfGas, + } => Self::BootloaderOutOfGasForTx, + ExecutionResult::Halt { reason } => Self::RejectedByVm { reason }, + _ => Self::Success { + tx_metrics: Box::new(ExecutionMetricsForCriteria::new(Some(tx), &res.tx_result)), + gas_remaining: res.tx_result.statistics.gas_remaining, + tx_result: res.tx_result, + compressed_bytecodes: res.compressed_bytecodes, + call_tracer_result: res.call_traces, + }, + } + } + + /// Returns a revert reason if either transaction was rejected or bootloader ran out of gas. + pub(super) fn err(&self) -> Option<&Halt> { + match self { + Self::Success { .. } => None, + Self::RejectedByVm { + reason: rejection_reason, + } => Some(rejection_reason), + Self::BootloaderOutOfGasForTx => Some(&Halt::BootloaderOutOfGas), + } + } +} diff --git a/core/node/state_keeper/src/batch_executor/tests/mod.rs b/core/node/state_keeper/src/executor/tests/mod.rs similarity index 92% rename from core/node/state_keeper/src/batch_executor/tests/mod.rs rename to core/node/state_keeper/src/executor/tests/mod.rs index ab9115991deb..90ce236a38f8 100644 --- a/core/node/state_keeper/src/batch_executor/tests/mod.rs +++ b/core/node/state_keeper/src/executor/tests/mod.rs @@ -1,35 +1,38 @@ +// FIXME: move storage-agnostic tests to VM executor crate + use assert_matches::assert_matches; use test_casing::{test_casing, Product}; use zksync_dal::{ConnectionPool, Core}; +use zksync_multivm::interface::{BatchTransactionExecutionResult, ExecutionResult, Halt}; use zksync_test_account::Account; use zksync_types::{ get_nonce_key, utils::storage_key_for_eth_balance, vm::FastVmMode, PriorityOpId, }; use self::tester::{AccountLoadNextExecutable, StorageSnapshot, TestConfig, Tester}; -use super::TxExecutionResult; mod read_storage_factory; mod tester; /// Ensures that the transaction was executed successfully. -fn assert_executed(execution_result: &TxExecutionResult) { - assert_matches!(execution_result, TxExecutionResult::Success { .. }); +fn assert_executed(execution_result: &BatchTransactionExecutionResult) { + let result = &execution_result.tx_result.result; + assert_matches!( + result, + ExecutionResult::Success { .. } | ExecutionResult::Revert { .. } + ); } /// Ensures that the transaction was rejected by the VM. -fn assert_rejected(execution_result: &TxExecutionResult) { - assert_matches!(execution_result, TxExecutionResult::RejectedByVm { .. }); +fn assert_rejected(execution_result: &BatchTransactionExecutionResult) { + let result = &execution_result.tx_result.result; + assert_matches!(result, ExecutionResult::Halt { reason } if !matches!(reason, Halt::BootloaderOutOfGas)); } /// Ensures that the transaction was executed successfully but reverted by the VM. -fn assert_reverted(execution_result: &TxExecutionResult) { - assert_executed(execution_result); - if let TxExecutionResult::Success { tx_result, .. } = execution_result { - assert!(tx_result.result.is_failed()); - } else { - unreachable!(); - } +fn assert_reverted(execution_result: &BatchTransactionExecutionResult) { + let result = &execution_result.tx_result.result; + assert_matches!(result, ExecutionResult::Revert { .. }); } #[derive(Debug, Clone, Copy)] @@ -189,23 +192,11 @@ async fn rollback(vm_mode: FastVmMode) { executor.rollback_last_tx().await.unwrap(); // Execute the same transaction, it must succeed. - let res_new = executor.execute_tx(tx).await.unwrap(); + let res_new = executor.execute_tx(tx.clone()).await.unwrap(); assert_executed(&res_new); - let ( - TxExecutionResult::Success { - tx_metrics: tx_metrics_old, - .. - }, - TxExecutionResult::Success { - tx_metrics: tx_metrics_new, - .. - }, - ) = (res_old, res_new) - else { - unreachable!(); - }; - + let tx_metrics_old = res_old.tx_result.get_execution_metrics(Some(&tx)); + let tx_metrics_new = res_new.tx_result.get_execution_metrics(Some(&tx)); assert_eq!( tx_metrics_old, tx_metrics_new, "Execution results must be the same" @@ -426,7 +417,12 @@ async fn bootloader_out_of_gas_for_any_tx(vm_mode: FastVmMode) { .await; let res = executor.execute_tx(alice.execute()).await.unwrap(); - assert_matches!(res, TxExecutionResult::BootloaderOutOfGasForTx); + assert_matches!( + res.tx_result.result, + ExecutionResult::Halt { + reason: Halt::BootloaderOutOfGas + } + ); } /// Checks that we can handle the bootloader out of gas error on tip phase. @@ -447,7 +443,7 @@ async fn bootloader_tip_out_of_gas() { let res = executor.execute_tx(alice.execute()).await.unwrap(); assert_executed(&res); - let finished_batch = executor.finish_batch().await.unwrap(); + let (finished_batch, _) = executor.finish_batch().await.unwrap(); // Just a bit below the gas used for the previous batch execution should be fine to execute the tx // but not enough to execute the block tip. @@ -469,7 +465,12 @@ async fn bootloader_tip_out_of_gas() { .await; let res = second_executor.execute_tx(alice.execute()).await.unwrap(); - assert_matches!(res, TxExecutionResult::BootloaderOutOfGasForTx); + assert_matches!( + res.tx_result.result, + ExecutionResult::Halt { + reason: Halt::BootloaderOutOfGas + } + ); } #[tokio::test] diff --git a/core/node/state_keeper/src/batch_executor/tests/read_storage_factory.rs b/core/node/state_keeper/src/executor/tests/read_storage_factory.rs similarity index 100% rename from core/node/state_keeper/src/batch_executor/tests/read_storage_factory.rs rename to core/node/state_keeper/src/executor/tests/read_storage_factory.rs diff --git a/core/node/state_keeper/src/batch_executor/tests/tester.rs b/core/node/state_keeper/src/executor/tests/tester.rs similarity index 95% rename from core/node/state_keeper/src/batch_executor/tests/tester.rs rename to core/node/state_keeper/src/executor/tests/tester.rs index e70c8b06fe0d..a00d9ca5ec15 100644 --- a/core/node/state_keeper/src/batch_executor/tests/tester.rs +++ b/core/node/state_keeper/src/executor/tests/tester.rs @@ -9,13 +9,16 @@ use zksync_config::configs::chain::StateKeeperConfig; use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_multivm::{ - interface::{L1BatchEnv, L2BlockEnv, SystemEnv}, + interface::{ + executor::{BatchExecutor, BatchExecutorFactory}, + L1BatchEnv, L2BlockEnv, SystemEnv, + }, utils::StorageWritesDeduplicator, vm_latest::constants::INITIAL_STORAGE_WRITE_PUBDATA_BYTES, }; use zksync_node_genesis::{create_genesis_l1_batch, GenesisParams}; use zksync_node_test_utils::{recover, Snapshot}; -use zksync_state::{ReadStorageFactory, RocksdbStorageOptions}; +use zksync_state::{OwnedStorage, ReadStorageFactory, RocksdbStorageOptions}; use zksync_test_account::{Account, DeployContractsTx, TxType}; use zksync_types::{ block::L2BlockHasher, @@ -29,14 +32,14 @@ use zksync_types::{ StorageLog, Transaction, H256, L2_BASE_TOKEN_ADDRESS, U256, }; use zksync_utils::u256_to_h256; +use zksync_vm_executor::batch::MainBatchExecutorFactory; use super::{read_storage_factory::RocksdbStorageFactory, StorageType}; use crate::{ - batch_executor::{BatchExecutorHandle, TxExecutionResult}, testonly, testonly::BASE_SYSTEM_CONTRACTS, tests::{default_l1_batch_env, default_system_env}, - AsyncRocksdbCache, BatchExecutor, MainBatchExecutor, + AsyncRocksdbCache, }; /// Representation of configuration parameters used by the state keeper. @@ -97,7 +100,7 @@ impl Tester { pub(super) async fn create_batch_executor( &mut self, storage_type: StorageType, - ) -> BatchExecutorHandle { + ) -> Box> { let (l1_batch_env, system_env) = self.default_batch_params(); match storage_type { StorageType::AsyncRocksdbCache => { @@ -142,8 +145,8 @@ impl Tester { storage_factory: Arc, l1_batch_env: L1BatchEnv, system_env: SystemEnv, - ) -> BatchExecutorHandle { - let mut batch_executor = MainBatchExecutor::new(self.config.save_call_traces, false); + ) -> Box> { + let mut batch_executor = MainBatchExecutorFactory::new(self.config.save_call_traces, false); batch_executor.set_fast_vm_mode(self.config.fast_vm_mode); let (_stop_sender, stop_receiver) = watch::channel(false); @@ -158,7 +161,7 @@ impl Tester { pub(super) async fn recover_batch_executor( &mut self, snapshot: &SnapshotRecoveryStatus, - ) -> BatchExecutorHandle { + ) -> Box> { let (storage_factory, task) = AsyncRocksdbCache::new( self.pool(), self.state_keeper_db_path(), @@ -175,7 +178,7 @@ impl Tester { &mut self, storage_type: &StorageType, snapshot: &SnapshotRecoveryStatus, - ) -> BatchExecutorHandle { + ) -> Box> { match storage_type { StorageType::AsyncRocksdbCache => self.recover_batch_executor(snapshot).await, StorageType::Rocksdb => { @@ -199,7 +202,7 @@ impl Tester { &self, storage_factory: Arc, snapshot: &SnapshotRecoveryStatus, - ) -> BatchExecutorHandle { + ) -> Box> { let current_timestamp = snapshot.l2_block_timestamp + 1; let (mut l1_batch_env, system_env) = self.batch_params(snapshot.l1_batch_number + 1, current_timestamp); @@ -485,13 +488,10 @@ impl StorageSnapshot { let tx = alice.execute(); let tx_hash = tx.hash(); // probably incorrect let res = executor.execute_tx(tx).await.unwrap(); - if let TxExecutionResult::Success { tx_result, .. } = res { - let storage_logs = &tx_result.logs.storage_logs; - storage_writes_deduplicator - .apply(storage_logs.iter().filter(|log| log.log.is_write())); - } else { - panic!("Unexpected tx execution result: {res:?}"); - }; + assert!(!res.was_halted()); + let tx_result = res.tx_result; + let storage_logs = &tx_result.logs.storage_logs; + storage_writes_deduplicator.apply(storage_logs.iter().filter(|log| log.log.is_write())); let mut hasher = L2BlockHasher::new( L2BlockNumber(l2_block_env.number), @@ -506,7 +506,7 @@ impl StorageSnapshot { executor.start_next_l2_block(l2_block_env).await.unwrap(); } - let finished_batch = executor.finish_batch().await.unwrap(); + let (finished_batch, _) = executor.finish_batch().await.unwrap(); let storage_logs = &finished_batch.block_tip_execution_result.logs.storage_logs; storage_writes_deduplicator.apply(storage_logs.iter().filter(|log| log.log.is_write())); let modified_entries = storage_writes_deduplicator.into_modified_key_values(); diff --git a/core/node/state_keeper/src/io/common/tests.rs b/core/node/state_keeper/src/io/common/tests.rs index 4d2907e82913..9ea699234f8f 100644 --- a/core/node/state_keeper/src/io/common/tests.rs +++ b/core/node/state_keeper/src/io/common/tests.rs @@ -19,7 +19,7 @@ use zksync_types::{ block::L2BlockHasher, protocol_version::ProtocolSemanticVersion, L2ChainId, ProtocolVersion, ProtocolVersionId, }; -use zksync_vm_utils::storage::L1BatchParamsProvider; +use zksync_vm_executor::storage::L1BatchParamsProvider; use super::*; diff --git a/core/node/state_keeper/src/io/mempool.rs b/core/node/state_keeper/src/io/mempool.rs index c3d8dc1dee4d..5734977538bd 100644 --- a/core/node/state_keeper/src/io/mempool.rs +++ b/core/node/state_keeper/src/io/mempool.rs @@ -19,7 +19,7 @@ use zksync_types::{ }; // TODO (SMA-1206): use seconds instead of milliseconds. use zksync_utils::time::millis_since_epoch; -use zksync_vm_utils::storage::L1BatchParamsProvider; +use zksync_vm_executor::storage::L1BatchParamsProvider; use crate::{ io::{ diff --git a/core/node/state_keeper/src/io/mod.rs b/core/node/state_keeper/src/io/mod.rs index 384b0f45b0f6..f8106fd2423b 100644 --- a/core/node/state_keeper/src/io/mod.rs +++ b/core/node/state_keeper/src/io/mod.rs @@ -7,7 +7,7 @@ use zksync_types::{ block::L2BlockExecutionData, fee_model::BatchFeeInput, protocol_upgrade::ProtocolUpgradeTx, Address, L1BatchNumber, L2ChainId, ProtocolVersionId, Transaction, H256, }; -use zksync_vm_utils::storage::l1_batch_params; +use zksync_vm_executor::storage::l1_batch_params; pub use self::{ common::IoCursor, diff --git a/core/node/state_keeper/src/io/persistence.rs b/core/node/state_keeper/src/io/persistence.rs index 4dfb7400ffc6..24b1ffca631c 100644 --- a/core/node/state_keeper/src/io/persistence.rs +++ b/core/node/state_keeper/src/io/persistence.rs @@ -352,7 +352,7 @@ mod tests { use assert_matches::assert_matches; use futures::FutureExt; use zksync_dal::CoreDal; - use zksync_multivm::interface::VmExecutionMetrics; + use zksync_multivm::interface::{FinishedL1Batch, VmExecutionMetrics}; use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; use zksync_types::{ api::TransactionStatus, block::BlockGasCount, writes::StateDiffRecord, L1BatchNumber, @@ -363,7 +363,6 @@ mod tests { use super::*; use crate::{ io::L2BlockParams, - testonly::default_vm_batch_result, tests::{ create_execution_result, create_transaction, create_updates_manager, default_l1_batch_env, default_system_env, Query, @@ -473,7 +472,7 @@ mod tests { virtual_blocks: 1, }); - let mut batch_result = default_vm_batch_result(); + let mut batch_result = FinishedL1Batch::mock(); batch_result.final_execution_state.deduplicated_storage_logs = storage_logs.iter().map(|log| log.log).collect(); batch_result.state_diffs = Some( diff --git a/core/node/state_keeper/src/keeper.rs b/core/node/state_keeper/src/keeper.rs index a610194ab9ca..02f7f92e070a 100644 --- a/core/node/state_keeper/src/keeper.rs +++ b/core/node/state_keeper/src/keeper.rs @@ -1,34 +1,34 @@ use std::{ convert::Infallible, - fmt, sync::Arc, time::{Duration, Instant}, }; use anyhow::Context as _; -use async_trait::async_trait; use tokio::sync::watch; use tracing::{info_span, Instrument}; use zksync_multivm::{ - interface::{Halt, L1BatchEnv, SystemEnv}, + interface::{ + executor::{BatchExecutor, BatchExecutorFactory}, + Halt, L1BatchEnv, SystemEnv, + }, utils::StorageWritesDeduplicator, }; -use zksync_state::ReadStorageFactory; +use zksync_state::{OwnedStorage, ReadStorageFactory}; use zksync_types::{ block::L2BlockExecutionData, l2::TransactionType, protocol_upgrade::ProtocolUpgradeTx, protocol_version::ProtocolVersionId, utils::display_timestamp, L1BatchNumber, Transaction, }; -use super::{ - batch_executor::{BatchExecutor, BatchExecutorHandle, TxExecutionResult}, +use crate::{ + executor::TxExecutionResult, io::{IoCursor, L1BatchParams, L2BlockParams, OutputHandler, PendingBatchData, StateKeeperIO}, metrics::{AGGREGATION_METRICS, KEEPER_METRICS, L1_BATCH_METRICS}, - seal_criteria::{ConditionalSealer, SealData, SealResolution}, + seal_criteria::{ConditionalSealer, SealData, SealResolution, UnexecutableReason}, types::ExecutionMetricsForCriteria, updates::UpdatesManager, utils::gas_count_from_writes, }; -use crate::seal_criteria::UnexecutableReason; /// Amount of time to block on waiting for some resource. The exact value is not really important, /// we only need it to not block on waiting indefinitely and be able to process cancellation requests. @@ -52,45 +52,6 @@ impl Error { } } -/// Functionality [`BatchExecutor`] + [`ReadStorageFactory`] with an erased storage type. This allows to keep -/// [`ZkSyncStateKeeper`] not parameterized by the storage type, simplifying its dependency injection and usage in tests. -#[async_trait] -trait ErasedBatchExecutor: fmt::Debug + Send { - async fn init_batch( - &mut self, - l1_batch_env: L1BatchEnv, - system_env: SystemEnv, - stop_receiver: &watch::Receiver, - ) -> Result; -} - -/// The only [`ErasedBatchExecutor`] implementation. -#[derive(Debug)] -struct ErasedBatchExecutorImpl { - batch_executor: Box>, - storage_factory: Arc>, -} - -#[async_trait] -impl ErasedBatchExecutor for ErasedBatchExecutorImpl { - async fn init_batch( - &mut self, - l1_batch_env: L1BatchEnv, - system_env: SystemEnv, - stop_receiver: &watch::Receiver, - ) -> Result { - let storage = self - .storage_factory - .access_storage(stop_receiver, l1_batch_env.number - 1) - .await - .context("failed creating VM storage")? - .ok_or(Error::Canceled)?; - Ok(self - .batch_executor - .init_batch(storage, l1_batch_env, system_env)) - } -} - /// State keeper represents a logic layer of L1 batch / L2 block processing flow. /// It's responsible for taking all the data from the `StateKeeperIO`, feeding it into `BatchExecutor` objects /// and calling `SealManager` to decide whether an L2 block or L1 batch should be sealed. @@ -105,28 +66,27 @@ pub struct ZkSyncStateKeeper { stop_receiver: watch::Receiver, io: Box, output_handler: OutputHandler, - batch_executor: Box, + batch_executor: Box>, sealer: Arc, + storage_factory: Arc, } impl ZkSyncStateKeeper { - pub fn new( + pub fn new( stop_receiver: watch::Receiver, sequencer: Box, - batch_executor: Box>, + batch_executor: Box>, output_handler: OutputHandler, sealer: Arc, - storage_factory: Arc>, + storage_factory: Arc, ) -> Self { Self { stop_receiver, io: sequencer, - batch_executor: Box::new(ErasedBatchExecutorImpl { - batch_executor, - storage_factory, - }), + batch_executor, output_handler, sealer, + storage_factory, } } @@ -190,21 +150,20 @@ impl ZkSyncStateKeeper { .await?; let mut batch_executor = self - .batch_executor - .init_batch( - l1_batch_env.clone(), - system_env.clone(), - &self.stop_receiver, - ) - .await?; - self.restore_state(&mut batch_executor, &mut updates_manager, pending_l2_blocks) + .create_batch_executor(l1_batch_env.clone(), system_env.clone()) .await?; + self.restore_state( + &mut *batch_executor, + &mut updates_manager, + pending_l2_blocks, + ) + .await?; let mut l1_batch_seal_delta: Option = None; while !self.is_canceled() { // This function will run until the batch can be sealed. self.process_l1_batch( - &mut batch_executor, + &mut *batch_executor, &mut updates_manager, protocol_upgrade_tx, ) @@ -220,12 +179,12 @@ impl ZkSyncStateKeeper { Self::start_next_l2_block( new_l2_block_params, &mut updates_manager, - &mut batch_executor, + &mut *batch_executor, ) .await?; } - let finished_batch = batch_executor.finish_batch().await?; + let (finished_batch, _) = batch_executor.finish_batch().await?; let sealed_batch_protocol_version = updates_manager.protocol_version(); updates_manager.finish_batch(finished_batch); let mut next_cursor = updates_manager.io_cursor(); @@ -244,12 +203,7 @@ impl ZkSyncStateKeeper { (system_env, l1_batch_env) = self.wait_for_new_batch_env(&next_cursor).await?; updates_manager = UpdatesManager::new(&l1_batch_env, &system_env); batch_executor = self - .batch_executor - .init_batch( - l1_batch_env.clone(), - system_env.clone(), - &self.stop_receiver, - ) + .create_batch_executor(l1_batch_env.clone(), system_env.clone()) .await?; let version_changed = system_env.version != sealed_batch_protocol_version; @@ -262,6 +216,22 @@ impl ZkSyncStateKeeper { Err(Error::Canceled) } + async fn create_batch_executor( + &mut self, + l1_batch_env: L1BatchEnv, + system_env: SystemEnv, + ) -> Result>, Error> { + let storage = self + .storage_factory + .access_storage(&self.stop_receiver, l1_batch_env.number - 1) + .await + .context("failed creating VM storage")? + .ok_or(Error::Canceled)?; + Ok(self + .batch_executor + .init_batch(storage, l1_batch_env, system_env)) + } + /// This function is meant to be called only once during the state-keeper initialization. /// It will check if we should load a protocol upgrade or a `setChainId` transaction, /// perform some checks and return it. @@ -418,7 +388,7 @@ impl ZkSyncStateKeeper { async fn start_next_l2_block( params: L2BlockParams, updates_manager: &mut UpdatesManager, - batch_executor: &mut BatchExecutorHandle, + batch_executor: &mut dyn BatchExecutor, ) -> anyhow::Result<()> { updates_manager.push_l2_block(params); let block_env = updates_manager.l2_block.get_env(); @@ -460,7 +430,7 @@ impl ZkSyncStateKeeper { )] async fn restore_state( &mut self, - batch_executor: &mut BatchExecutorHandle, + batch_executor: &mut dyn BatchExecutor, updates_manager: &mut UpdatesManager, l2_blocks_to_reexecute: Vec, ) -> Result<(), Error> { @@ -491,6 +461,7 @@ impl ZkSyncStateKeeper { .execute_tx(tx.clone()) .await .with_context(|| format!("failed re-executing transaction {:?}", tx.hash()))?; + let result = TxExecutionResult::new(result, &tx); let TxExecutionResult::Success { tx_result, @@ -564,7 +535,7 @@ impl ZkSyncStateKeeper { )] async fn process_l1_batch( &mut self, - batch_executor: &mut BatchExecutorHandle, + batch_executor: &mut dyn BatchExecutor, updates_manager: &mut UpdatesManager, protocol_upgrade_tx: Option, ) -> Result<(), Error> { @@ -692,7 +663,7 @@ impl ZkSyncStateKeeper { async fn process_upgrade_tx( &mut self, - batch_executor: &mut BatchExecutorHandle, + batch_executor: &mut dyn BatchExecutor, updates_manager: &mut UpdatesManager, protocol_upgrade_tx: ProtocolUpgradeTx, ) -> anyhow::Result<()> { @@ -759,7 +730,7 @@ impl ZkSyncStateKeeper { #[tracing::instrument(skip_all)] async fn process_one_tx( &mut self, - batch_executor: &mut BatchExecutorHandle, + batch_executor: &mut dyn BatchExecutor, updates_manager: &mut UpdatesManager, tx: Transaction, ) -> anyhow::Result<(SealResolution, TxExecutionResult)> { @@ -768,6 +739,7 @@ impl ZkSyncStateKeeper { .execute_tx(tx.clone()) .await .with_context(|| format!("failed executing transaction {:?}", tx.hash()))?; + let exec_result = TxExecutionResult::new(exec_result, &tx); latency.observe(); let latency = KEEPER_METRICS.determine_seal_resolution.start(); diff --git a/core/node/state_keeper/src/lib.rs b/core/node/state_keeper/src/lib.rs index 1c12f7825486..c12e4163fdd4 100644 --- a/core/node/state_keeper/src/lib.rs +++ b/core/node/state_keeper/src/lib.rs @@ -1,18 +1,4 @@ -use std::sync::Arc; - -use tokio::sync::watch; -use zksync_config::configs::{ - chain::{MempoolConfig, StateKeeperConfig}, - wallets, -}; -use zksync_dal::{ConnectionPool, Core}; -use zksync_node_fee_model::BatchFeeModelInputProvider; -use zksync_types::L2ChainId; - pub use self::{ - batch_executor::{ - main_executor::MainBatchExecutor, BatchExecutor, BatchExecutorHandle, TxExecutionResult, - }, io::{ mempool::MempoolIO, L2BlockParams, L2BlockSealerTask, OutputHandler, StateKeeperIO, StateKeeperOutputHandler, StateKeeperPersistence, TreeWritesPersistence, @@ -25,7 +11,7 @@ pub use self::{ updates::UpdatesManager, }; -mod batch_executor; +pub mod executor; pub mod io; mod keeper; mod mempool_actor; @@ -38,41 +24,3 @@ pub(crate) mod tests; pub(crate) mod types; pub mod updates; pub(crate) mod utils; - -#[allow(clippy::too_many_arguments)] -pub async fn create_state_keeper( - state_keeper_config: StateKeeperConfig, - wallets: wallets::StateKeeper, - async_cache: AsyncRocksdbCache, - l2chain_id: L2ChainId, - mempool_config: &MempoolConfig, - pool: ConnectionPool, - mempool: MempoolGuard, - batch_fee_input_provider: Arc, - output_handler: OutputHandler, - stop_receiver: watch::Receiver, -) -> ZkSyncStateKeeper { - let batch_executor_base = MainBatchExecutor::new(state_keeper_config.save_call_traces, false); - - let io = MempoolIO::new( - mempool, - batch_fee_input_provider, - pool, - &state_keeper_config, - wallets.fee_account.address(), - mempool_config.delay_interval(), - l2chain_id, - ) - .expect("Failed initializing main node I/O for state keeper"); - - let sealer = SequencerSealer::new(state_keeper_config); - - ZkSyncStateKeeper::new( - stop_receiver, - Box::new(io), - Box::new(batch_executor_base), - output_handler, - Arc::new(sealer), - Arc::new(async_cache), - ) -} diff --git a/core/node/state_keeper/src/metrics.rs b/core/node/state_keeper/src/metrics.rs index 1bf314d1b91e..7da5babd2199 100644 --- a/core/node/state_keeper/src/metrics.rs +++ b/core/node/state_keeper/src/metrics.rs @@ -10,10 +10,7 @@ use vise::{ Metrics, }; use zksync_mempool::MempoolStore; -use zksync_multivm::interface::{ - DeduplicatedWritesMetrics, VmExecutionResultAndLogs, VmRevertReason, -}; -use zksync_shared_metrics::InteractionType; +use zksync_multivm::interface::{DeduplicatedWritesMetrics, VmRevertReason}; use zksync_types::ProtocolVersionId; use super::seal_criteria::SealResolution; @@ -84,13 +81,6 @@ pub struct StateKeeperMetrics { /// The time it takes for transactions to be included in a block. Representative of the time user must wait before their transaction is confirmed. #[metrics(buckets = INCLUSION_DELAY_BUCKETS)] pub transaction_inclusion_delay: Family>, - /// Time spent by the state keeper on transaction execution. - #[metrics(buckets = Buckets::LATENCIES)] - pub tx_execution_time: Family>, - /// Number of times gas price was reported as too high. - pub gas_price_too_high: Counter, - /// Number of times blob base fee was reported as too high. - pub blob_base_fee_too_high: Counter, /// The time it takes to match seal resolution for each tx. #[metrics(buckets = Buckets::LATENCIES)] pub match_seal_resolution: Histogram, @@ -439,52 +429,9 @@ impl SealProgress<'_> { } } -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] -#[metrics(label = "command", rename_all = "snake_case")] -pub(super) enum ExecutorCommand { - ExecuteTx, - #[metrics(name = "start_next_miniblock")] - StartNextL2Block, - RollbackLastTx, - FinishBatch, - FinishBatchWithCache, -} - -const GAS_PER_NANOSECOND_BUCKETS: Buckets = Buckets::values(&[ - 0.01, 0.03, 0.1, 0.3, 0.5, 0.75, 1., 1.5, 3., 5., 10., 20., 50., -]); - -/// Executor-related state keeper metrics. -#[derive(Debug, Metrics)] -#[metrics(prefix = "state_keeper")] -pub(super) struct ExecutorMetrics { - /// Latency to process a single command sent to the batch executor. - #[metrics(buckets = Buckets::LATENCIES)] - pub batch_executor_command_response_time: Family>, - /// Cumulative latency of interacting with the storage when executing a transaction - /// in the batch executor. - #[metrics(buckets = Buckets::LATENCIES)] - pub batch_storage_interaction_duration: Family>, - #[metrics(buckets = GAS_PER_NANOSECOND_BUCKETS)] - pub computational_gas_per_nanosecond: Histogram, - #[metrics(buckets = GAS_PER_NANOSECOND_BUCKETS)] - pub failed_tx_gas_limit_per_nanosecond: Histogram, -} - -#[vise::register] -pub(super) static EXECUTOR_METRICS: vise::Global = vise::Global::new(); - #[derive(Debug, Metrics)] #[metrics(prefix = "batch_tip")] pub(crate) struct BatchTipMetrics { - #[metrics(buckets = Buckets::exponential(60000.0..=80000000.0, 2.0))] - gas_used: Histogram, - #[metrics(buckets = Buckets::exponential(1.0..=60000.0, 2.0))] - pubdata_published: Histogram, - #[metrics(buckets = Buckets::exponential(1.0..=4096.0, 2.0))] - circuit_statistic: Histogram, - #[metrics(buckets = Buckets::exponential(1.0..=4096.0, 2.0))] - execution_metrics_size: Histogram, #[metrics(buckets = Buckets::exponential(1.0..=60000.0, 2.0))] block_writes_metrics_positive_size: Histogram, #[metrics(buckets = Buckets::exponential(1.0..=60000.0, 2.0))] @@ -492,17 +439,6 @@ pub(crate) struct BatchTipMetrics { } impl BatchTipMetrics { - pub fn observe(&self, execution_result: &VmExecutionResultAndLogs) { - self.gas_used - .observe(execution_result.statistics.gas_used as usize); - self.pubdata_published - .observe(execution_result.statistics.pubdata_published as usize); - self.circuit_statistic - .observe(execution_result.statistics.circuit_statistic.total()); - self.execution_metrics_size - .observe(execution_result.get_execution_metrics(None).size()); - } - pub fn observe_writes_metrics( &self, initial_writes_metrics: &DeduplicatedWritesMetrics, diff --git a/core/node/state_keeper/src/testonly/mod.rs b/core/node/state_keeper/src/testonly/mod.rs index d17261a3a0f7..0ce8c06be0e7 100644 --- a/core/node/state_keeper/src/testonly/mod.rs +++ b/core/node/state_keeper/src/testonly/mod.rs @@ -1,14 +1,17 @@ //! Test utilities that can be used for testing sequencer that may //! be useful outside of this crate. +use async_trait::async_trait; use once_cell::sync::Lazy; -use tokio::sync::mpsc; use zksync_contracts::BaseSystemContracts; use zksync_dal::{ConnectionPool, Core, CoreDal as _}; use zksync_multivm::interface::{ - storage::StorageViewCache, CurrentExecutionState, ExecutionResult, FinishedL1Batch, L1BatchEnv, - Refunds, SystemEnv, VmExecutionLogs, VmExecutionResultAndLogs, VmExecutionStatistics, + executor::{BatchExecutor, BatchExecutorFactory}, + storage::{InMemoryStorage, StorageView}, + BatchTransactionExecutionResult, ExecutionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, + SystemEnv, VmExecutionResultAndLogs, }; +use zksync_state::OwnedStorage; use zksync_test_account::Account; use zksync_types::{ fee::Fee, utils::storage_key_for_standard_token_balance, AccountTreeId, Address, Execute, @@ -17,94 +20,62 @@ use zksync_types::{ }; use zksync_utils::u256_to_h256; -use crate::{ - batch_executor::{BatchExecutor, BatchExecutorHandle, Command, TxExecutionResult}, - types::ExecutionMetricsForCriteria, -}; - pub mod test_batch_executor; pub(super) static BASE_SYSTEM_CONTRACTS: Lazy = Lazy::new(BaseSystemContracts::load_from_disk); -pub(super) fn default_vm_batch_result() -> FinishedL1Batch { - FinishedL1Batch { - block_tip_execution_result: VmExecutionResultAndLogs { - result: ExecutionResult::Success { output: vec![] }, - logs: VmExecutionLogs::default(), - statistics: VmExecutionStatistics::default(), - refunds: Refunds::default(), - }, - final_execution_state: CurrentExecutionState { - events: vec![], - deduplicated_storage_logs: vec![], - used_contract_hashes: vec![], - user_l2_to_l1_logs: vec![], - system_logs: vec![], - storage_refunds: Vec::new(), - pubdata_costs: Vec::new(), - }, - final_bootloader_memory: Some(vec![]), - pubdata_input: Some(vec![]), - state_diffs: Some(vec![]), - } -} - /// Creates a `TxExecutionResult` object denoting a successful tx execution. -pub(crate) fn successful_exec() -> TxExecutionResult { - TxExecutionResult::Success { +pub(crate) fn successful_exec() -> BatchTransactionExecutionResult { + BatchTransactionExecutionResult { tx_result: Box::new(VmExecutionResultAndLogs { result: ExecutionResult::Success { output: vec![] }, logs: Default::default(), statistics: Default::default(), refunds: Default::default(), }), - tx_metrics: Box::new(ExecutionMetricsForCriteria { - l1_gas: Default::default(), - execution_metrics: Default::default(), - }), compressed_bytecodes: vec![], - call_tracer_result: vec![], - gas_remaining: Default::default(), + call_traces: vec![], } } -pub(crate) fn storage_view_cache() -> StorageViewCache { - StorageViewCache::default() -} - /// `BatchExecutor` which doesn't check anything at all. Accepts all transactions. #[derive(Debug)] pub struct MockBatchExecutor; -impl BatchExecutor<()> for MockBatchExecutor { +impl BatchExecutorFactory for MockBatchExecutor { fn init_batch( &mut self, - _storage: (), - _l1batch_params: L1BatchEnv, + _storage: OwnedStorage, + _l1_batch_env: L1BatchEnv, _system_env: SystemEnv, - ) -> BatchExecutorHandle { - let (send, recv) = mpsc::channel(1); - let handle = tokio::task::spawn(async { - let mut recv = recv; - while let Some(cmd) = recv.recv().await { - match cmd { - Command::ExecuteTx(_, resp) => resp.send(successful_exec()).unwrap(), - Command::StartNextL2Block(_, resp) => resp.send(()).unwrap(), - Command::RollbackLastTx(_) => panic!("unexpected rollback"), - Command::FinishBatch(resp) => { - // Blanket result, it doesn't really matter. - resp.send(default_vm_batch_result()).unwrap(); - break; - } - Command::FinishBatchWithCache(resp) => resp - .send((default_vm_batch_result(), storage_view_cache())) - .unwrap(), - } - } - anyhow::Ok(()) - }); - BatchExecutorHandle::from_raw(handle, send) + ) -> Box> { + Box::new(Self) + } +} + +#[async_trait] +impl BatchExecutor for MockBatchExecutor { + async fn execute_tx( + &mut self, + _tx: Transaction, + ) -> anyhow::Result { + Ok(successful_exec()) + } + + async fn rollback_last_tx(&mut self) -> anyhow::Result<()> { + panic!("unexpected rollback"); + } + + async fn start_next_l2_block(&mut self, _env: L2BlockEnv) -> anyhow::Result<()> { + Ok(()) + } + + async fn finish_batch( + self: Box, + ) -> anyhow::Result<(FinishedL1Batch, StorageView)> { + let storage = OwnedStorage::boxed(InMemoryStorage::default()); + Ok((FinishedL1Batch::mock(), StorageView::new(storage))) } } diff --git a/core/node/state_keeper/src/testonly/test_batch_executor.rs b/core/node/state_keeper/src/testonly/test_batch_executor.rs index d8ee36990a1c..ffca8dff8643 100644 --- a/core/node/state_keeper/src/testonly/test_batch_executor.rs +++ b/core/node/state_keeper/src/testonly/test_batch_executor.rs @@ -13,27 +13,28 @@ use std::{ }; use async_trait::async_trait; -use tokio::sync::{mpsc, watch}; +use tokio::sync::watch; use zksync_contracts::BaseSystemContracts; use zksync_multivm::{ - interface::{ExecutionResult, L1BatchEnv, SystemEnv, VmExecutionResultAndLogs}, + interface::{ + executor::{BatchExecutor, BatchExecutorFactory}, + storage::InMemoryStorage, + BatchTransactionExecutionResult, ExecutionResult, FinishedL1Batch, Halt, L1BatchEnv, + L2BlockEnv, SystemEnv, VmExecutionLogs, VmExecutionResultAndLogs, + }, vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, }; use zksync_node_test_utils::create_l2_transaction; -use zksync_state::ReadStorageFactory; +use zksync_state::{interface::StorageView, OwnedStorage, ReadStorageFactory}; use zksync_types::{ - fee_model::BatchFeeInput, protocol_upgrade::ProtocolUpgradeTx, Address, L1BatchNumber, - L2BlockNumber, L2ChainId, ProtocolVersionId, Transaction, H256, + fee_model::BatchFeeInput, l2_to_l1_log::UserL2ToL1Log, protocol_upgrade::ProtocolUpgradeTx, + Address, L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersionId, Transaction, H256, }; use crate::{ - batch_executor::{BatchExecutor, BatchExecutorHandle, Command, TxExecutionResult}, io::{IoCursor, L1BatchParams, L2BlockParams, PendingBatchData, StateKeeperIO}, seal_criteria::{IoSealCriteria, SequencerSealer, UnexecutableReason}, - testonly::{ - default_vm_batch_result, storage_view_cache, successful_exec, BASE_SYSTEM_CONTRACTS, - }, - types::ExecutionMetricsForCriteria, + testonly::{successful_exec, BASE_SYSTEM_CONTRACTS}, updates::UpdatesManager, OutputHandler, StateKeeperOutputHandler, ZkSyncStateKeeper, }; @@ -110,7 +111,7 @@ impl TestScenario { mut self, description: &'static str, tx: Transaction, - result: TxExecutionResult, + result: BatchTransactionExecutionResult, ) -> Self { self.actions .push_back(ScenarioItem::Tx(description, tx, result)); @@ -198,13 +199,13 @@ impl TestScenario { pub(crate) async fn run(self, sealer: SequencerSealer) { assert!(!self.actions.is_empty(), "Test scenario can't be empty"); - let batch_executor_base = TestBatchExecutorBuilder::new(&self); + let batch_executor = TestBatchExecutorBuilder::new(&self); let (stop_sender, stop_receiver) = watch::channel(false); let (io, output_handler) = TestIO::new(stop_sender, self); let state_keeper = ZkSyncStateKeeper::new( stop_receiver, Box::new(io), - Box::new(batch_executor_base), + Box::new(batch_executor), output_handler, Arc::new(sealer), Arc::new(MockReadStorageFactory), @@ -253,27 +254,33 @@ pub(crate) fn random_upgrade_tx(tx_number: u64) -> ProtocolUpgradeTx { } /// Creates a `TxExecutionResult` object denoting a successful tx execution with the given execution metrics. -pub(crate) fn successful_exec_with_metrics( - tx_metrics: ExecutionMetricsForCriteria, -) -> TxExecutionResult { - TxExecutionResult::Success { +pub(crate) fn successful_exec_with_log() -> BatchTransactionExecutionResult { + BatchTransactionExecutionResult { tx_result: Box::new(VmExecutionResultAndLogs { result: ExecutionResult::Success { output: vec![] }, - logs: Default::default(), + logs: VmExecutionLogs { + user_l2_to_l1_logs: vec![UserL2ToL1Log::default()], + ..VmExecutionLogs::default() + }, statistics: Default::default(), refunds: Default::default(), }), - tx_metrics: Box::new(tx_metrics), compressed_bytecodes: vec![], - call_tracer_result: vec![], - gas_remaining: Default::default(), + call_traces: vec![], } } /// Creates a `TxExecutionResult` object denoting a tx that was rejected. -pub(crate) fn rejected_exec() -> TxExecutionResult { - TxExecutionResult::RejectedByVm { - reason: zksync_multivm::interface::Halt::InnerTxError, +pub(crate) fn rejected_exec(reason: Halt) -> BatchTransactionExecutionResult { + BatchTransactionExecutionResult { + tx_result: Box::new(VmExecutionResultAndLogs { + result: ExecutionResult::Halt { reason }, + logs: Default::default(), + statistics: Default::default(), + refunds: Default::default(), + }), + compressed_bytecodes: vec![], + call_traces: vec![], } } @@ -283,7 +290,7 @@ enum ScenarioItem { NoTxsUntilNextAction(&'static str), /// Increments protocol version in IO state. IncrementProtocolVersion(&'static str), - Tx(&'static str, Transaction, TxExecutionResult), + Tx(&'static str, Transaction, BatchTransactionExecutionResult), Rollback(&'static str, Transaction), Reject(&'static str, Transaction, UnexecutableReason), L2BlockSeal( @@ -332,7 +339,7 @@ impl fmt::Debug for ScenarioItem { } } -type ExpectedTransactions = VecDeque>>; +type ExpectedTransactions = VecDeque>>; #[derive(Debug, Default)] pub struct TestBatchExecutorBuilder { @@ -348,7 +355,7 @@ pub struct TestBatchExecutorBuilder { impl TestBatchExecutorBuilder { pub(crate) fn new(scenario: &TestScenario) -> Self { let mut txs = VecDeque::new(); - let mut batch_txs = HashMap::new(); + let mut batch_txs = HashMap::<_, VecDeque>::new(); let mut rollback_set = HashSet::new(); // Insert data about the pending batch, if it exists. @@ -369,9 +376,7 @@ impl TestBatchExecutorBuilder { ScenarioItem::Tx(_, tx, result) => { batch_txs .entry(tx.hash()) - .and_modify(|txs: &mut VecDeque| { - txs.push_back(result.clone()) - }) + .and_modify(|txs| txs.push_back(result.clone())) .or_insert_with(|| { let mut txs = VecDeque::with_capacity(1); txs.push_back(result.clone()); @@ -410,34 +415,24 @@ impl TestBatchExecutorBuilder { } } -impl BatchExecutor<()> for TestBatchExecutorBuilder { +impl BatchExecutorFactory for TestBatchExecutorBuilder { fn init_batch( &mut self, - _storage: (), - _l1_batch_params: L1BatchEnv, + _storage: OwnedStorage, + _l1_batch_env: L1BatchEnv, _system_env: SystemEnv, - ) -> BatchExecutorHandle { - let (commands_sender, commands_receiver) = mpsc::channel(1); - - let executor = TestBatchExecutor::new( - commands_receiver, - self.txs.pop_front().unwrap(), - self.rollback_set.clone(), - ); - let handle = tokio::task::spawn_blocking(move || { - executor.run(); - Ok(()) - }); - BatchExecutorHandle::from_raw(handle, commands_sender) + ) -> Box> { + let executor = + TestBatchExecutor::new(self.txs.pop_front().unwrap(), self.rollback_set.clone()); + Box::new(executor) } } #[derive(Debug)] pub(super) struct TestBatchExecutor { - commands: mpsc::Receiver, /// Mapping tx -> response. /// The same transaction can be executed several times, so we use a sequence of responses and consume them by one. - txs: HashMap>, + txs: HashMap>, /// Set of transactions that are expected to be rolled back. rollback_set: HashSet, /// Last executed tx hash. @@ -446,64 +441,63 @@ pub(super) struct TestBatchExecutor { impl TestBatchExecutor { pub(super) fn new( - commands: mpsc::Receiver, - txs: HashMap>, + txs: HashMap>, rollback_set: HashSet, ) -> Self { Self { - commands, txs, rollback_set, last_tx: H256::default(), // We don't expect rollbacks until the first tx is executed. } } +} - pub(super) fn run(mut self) { - while let Some(cmd) = self.commands.blocking_recv() { - match cmd { - Command::ExecuteTx(tx, resp) => { - let result = self - .txs - .get_mut(&tx.hash()) - .unwrap() - .pop_front() - .unwrap_or_else(|| { - panic!( - "Received a request to execute an unknown transaction: {:?}", - tx - ) - }); - resp.send(result).unwrap(); - self.last_tx = tx.hash(); - } - Command::StartNextL2Block(_, resp) => { - resp.send(()).unwrap(); - } - Command::RollbackLastTx(resp) => { - // This is an additional safety check: IO would check that every rollback is included in the - // test scenario, but here we want to additionally check that each such request goes to the - // the batch executor as well. - if !self.rollback_set.contains(&self.last_tx) { - // Request to rollback an unexpected tx. - panic!( - "Received a request to rollback an unexpected tx. Last executed tx: {:?}", - self.last_tx - ) - } - resp.send(()).unwrap(); - // It's OK to not update `last_executed_tx`, since state keeper never should rollback more than 1 - // tx in a row, and it's going to cause a panic anyway. - } - Command::FinishBatch(resp) => { - // Blanket result, it doesn't really matter. - resp.send(default_vm_batch_result()).unwrap(); - return; - } - Command::FinishBatchWithCache(resp) => resp - .send((default_vm_batch_result(), storage_view_cache())) - .unwrap(), - } +#[async_trait] +impl BatchExecutor for TestBatchExecutor { + async fn execute_tx( + &mut self, + tx: Transaction, + ) -> anyhow::Result { + let result = self + .txs + .get_mut(&tx.hash()) + .unwrap() + .pop_front() + .unwrap_or_else(|| { + panic!( + "Received a request to execute an unknown transaction: {:?}", + tx + ) + }); + self.last_tx = tx.hash(); + Ok(result) + } + + async fn rollback_last_tx(&mut self) -> anyhow::Result<()> { + // This is an additional safety check: IO would check that every rollback is included in the + // test scenario, but here we want to additionally check that each such request goes to the + // the batch executor as well. + if !self.rollback_set.contains(&self.last_tx) { + // Request to rollback an unexpected tx. + panic!( + "Received a request to rollback an unexpected tx. Last executed tx: {:?}", + self.last_tx + ) } + // It's OK to not update `last_executed_tx`, since state keeper never should rollback more than 1 + // tx in a row, and it's going to cause a panic anyway. + Ok(()) + } + + async fn start_next_l2_block(&mut self, _env: L2BlockEnv) -> anyhow::Result<()> { + Ok(()) + } + + async fn finish_batch( + self: Box, + ) -> anyhow::Result<(FinishedL1Batch, StorageView)> { + let storage = OwnedStorage::boxed(InMemoryStorage::default()); + Ok((FinishedL1Batch::mock(), StorageView::new(storage))) } } @@ -809,12 +803,13 @@ impl StateKeeperIO for TestIO { pub struct MockReadStorageFactory; #[async_trait] -impl ReadStorageFactory<()> for MockReadStorageFactory { +impl ReadStorageFactory for MockReadStorageFactory { async fn access_storage( &self, _stop_receiver: &watch::Receiver, _l1_batch_number: L1BatchNumber, - ) -> anyhow::Result> { - Ok(Some(())) + ) -> anyhow::Result> { + let storage = InMemoryStorage::default(); + Ok(Some(OwnedStorage::boxed(storage))) } } diff --git a/core/node/state_keeper/src/tests/mod.rs b/core/node/state_keeper/src/tests/mod.rs index e9a0a57c6977..80de0f0beff9 100644 --- a/core/node/state_keeper/src/tests/mod.rs +++ b/core/node/state_keeper/src/tests/mod.rs @@ -11,7 +11,7 @@ use zksync_config::configs::chain::StateKeeperConfig; use zksync_multivm::{ interface::{ ExecutionResult, Halt, L1BatchEnv, L2BlockEnv, Refunds, SystemEnv, TxExecutionMode, - VmExecutionLogs, VmExecutionMetrics, VmExecutionResultAndLogs, VmExecutionStatistics, + VmExecutionLogs, VmExecutionResultAndLogs, VmExecutionStatistics, }, vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, }; @@ -27,7 +27,6 @@ use zksync_types::{ use zksync_utils::u256_to_h256; use crate::{ - batch_executor::TxExecutionResult, io::PendingBatchData, keeper::POLL_WAIT_DURATION, seal_criteria::{ @@ -37,14 +36,13 @@ use crate::{ testonly::{ successful_exec, test_batch_executor::{ - random_tx, random_upgrade_tx, rejected_exec, successful_exec_with_metrics, + random_tx, random_upgrade_tx, rejected_exec, successful_exec_with_log, MockReadStorageFactory, TestBatchExecutorBuilder, TestIO, TestScenario, FEE_ACCOUNT, }, BASE_SYSTEM_CONTRACTS, }, - types::ExecutionMetricsForCriteria, updates::UpdatesManager, - utils::l1_batch_base_cost, + utils::{gas_count_from_tx_and_metrics, l1_batch_base_cost}, ZkSyncStateKeeper, }; @@ -194,29 +192,28 @@ async fn sealed_by_number_of_txs() { #[tokio::test] async fn sealed_by_gas() { + let first_tx = random_tx(1); + let execution_result = successful_exec_with_log(); + let exec_metrics = execution_result + .tx_result + .get_execution_metrics(Some(&first_tx)); + assert!(exec_metrics.size() > 0); + let l1_gas_per_tx = gas_count_from_tx_and_metrics(&first_tx, &exec_metrics); + assert!(l1_gas_per_tx.commit > 0); + let config = StateKeeperConfig { - max_single_tx_gas: 62_002, + max_single_tx_gas: 62_000 + l1_gas_per_tx.commit * 2, reject_tx_at_gas_percentage: 1.0, close_block_at_gas_percentage: 0.5, ..StateKeeperConfig::default() }; let sealer = SequencerSealer::with_sealers(config, vec![Box::new(GasCriterion)]); - let l1_gas_per_tx = BlockGasCount { - commit: 1, // Both txs together with `block_base_cost` would bring it over the block `31_001` commit bound. - prove: 0, - execute: 0, - }; - let execution_result = successful_exec_with_metrics(ExecutionMetricsForCriteria { - l1_gas: l1_gas_per_tx, - execution_metrics: VmExecutionMetrics::default(), - }); - TestScenario::new() .seal_l2_block_when(|updates| { updates.l2_block.executed_transactions.len() == 1 }) - .next_tx("First tx", random_tx(1), execution_result.clone()) + .next_tx("First tx", first_tx, execution_result.clone()) .l2_block_sealed_with("L2 block with a single tx", move |updates| { assert_eq!( updates.l2_block.l1_gas_count, @@ -226,11 +223,11 @@ async fn sealed_by_gas() { }) .next_tx("Second tx", random_tx(1), execution_result) .l2_block_sealed("L2 block 2") - .batch_sealed_with("Batch sealed with both txs", |updates| { + .batch_sealed_with("Batch sealed with both txs", move |updates| { assert_eq!( updates.l1_batch.l1_gas_count, BlockGasCount { - commit: l1_batch_base_cost(AggregatedActionType::Commit) + 2, + commit: l1_batch_base_cost(AggregatedActionType::Commit) + l1_gas_per_tx.commit * 2, prove: l1_batch_base_cost(AggregatedActionType::PublishProofOnchain), execute: l1_batch_base_cost(AggregatedActionType::Execute), }, @@ -254,14 +251,7 @@ async fn sealed_by_gas_then_by_num_tx() { vec![Box::new(GasCriterion), Box::new(SlotsCriterion)], ); - let execution_result = successful_exec_with_metrics(ExecutionMetricsForCriteria { - l1_gas: BlockGasCount { - commit: 1, - prove: 0, - execute: 0, - }, - execution_metrics: VmExecutionMetrics::default(), - }); + let execution_result = successful_exec_with_log(); // 1st tx is sealed by gas sealer; 2nd, 3rd, & 4th are sealed by slots sealer. TestScenario::new() @@ -316,7 +306,11 @@ async fn rejected_tx() { let rejected_tx = random_tx(1); TestScenario::new() .seal_l2_block_when(|updates| updates.l2_block.executed_transactions.len() == 1) - .next_tx("Rejected tx", rejected_tx.clone(), rejected_exec()) + .next_tx( + "Rejected tx", + rejected_tx.clone(), + rejected_exec(Halt::InnerTxError), + ) .tx_rejected( "Tx got rejected", rejected_tx, @@ -349,7 +343,7 @@ async fn bootloader_tip_out_of_gas_flow() { .next_tx( "Tx -> Bootloader tip out of gas", bootloader_out_of_gas_tx.clone(), - TxExecutionResult::BootloaderOutOfGasForTx, + rejected_exec(Halt::BootloaderOutOfGas), ) .tx_rollback( "Last tx rolled back to seal the block", @@ -424,7 +418,7 @@ async fn pending_batch_is_applied() { async fn load_upgrade_tx() { let sealer = SequencerSealer::default(); let scenario = TestScenario::new(); - let batch_executor_base = TestBatchExecutorBuilder::new(&scenario); + let batch_executor = TestBatchExecutorBuilder::new(&scenario); let (stop_sender, stop_receiver) = watch::channel(false); let (mut io, output_handler) = TestIO::new(stop_sender, scenario); @@ -434,7 +428,7 @@ async fn load_upgrade_tx() { let mut sk = ZkSyncStateKeeper::new( stop_receiver, Box::new(io), - Box::new(batch_executor_base), + Box::new(batch_executor), output_handler, Arc::new(sealer), Arc::new(MockReadStorageFactory), diff --git a/core/node/tee_verifier_input_producer/Cargo.toml b/core/node/tee_verifier_input_producer/Cargo.toml index c975bbcd280a..7a5a4de5d0c9 100644 --- a/core/node/tee_verifier_input_producer/Cargo.toml +++ b/core/node/tee_verifier_input_producer/Cargo.toml @@ -18,7 +18,7 @@ zksync_queued_job_processor.workspace = true zksync_tee_verifier.workspace = true zksync_types.workspace = true zksync_utils.workspace = true -zksync_vm_utils.workspace = true +zksync_vm_executor.workspace = true vise.workspace = true anyhow.workspace = true diff --git a/core/node/tee_verifier_input_producer/src/lib.rs b/core/node/tee_verifier_input_producer/src/lib.rs index abd70542a42f..08382903ad6d 100644 --- a/core/node/tee_verifier_input_producer/src/lib.rs +++ b/core/node/tee_verifier_input_producer/src/lib.rs @@ -21,7 +21,7 @@ use zksync_queued_job_processor::JobProcessor; use zksync_tee_verifier::Verify; use zksync_types::{tee_types::TeeType, L1BatchNumber, L2ChainId}; use zksync_utils::u256_to_h256; -use zksync_vm_utils::storage::L1BatchParamsProvider; +use zksync_vm_executor::storage::L1BatchParamsProvider; use self::metrics::METRICS; diff --git a/core/node/vm_runner/Cargo.toml b/core/node/vm_runner/Cargo.toml index 565b33c0c347..ceb11a982477 100644 --- a/core/node/vm_runner/Cargo.toml +++ b/core/node/vm_runner/Cargo.toml @@ -11,17 +11,16 @@ keywords.workspace = true categories.workspace = true [dependencies] -zksync_multivm.workspace = true +zksync_vm_interface.workspace = true zksync_types.workspace = true zksync_dal.workspace = true zksync_contracts.workspace = true zksync_state.workspace = true zksync_storage.workspace = true -zksync_state_keeper.workspace = true zksync_utils.workspace = true zksync_prover_interface.workspace = true zksync_object_store.workspace = true -zksync_vm_utils.workspace = true +zksync_vm_executor.workspace = true zksync_health_check.workspace = true serde.workspace = true diff --git a/core/node/vm_runner/src/impls/bwip.rs b/core/node/vm_runner/src/impls/bwip.rs index f7f8c099609f..f23f63533ff5 100644 --- a/core/node/vm_runner/src/impls/bwip.rs +++ b/core/node/vm_runner/src/impls/bwip.rs @@ -6,16 +6,18 @@ use tokio::sync::watch; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_object_store::ObjectStore; use zksync_prover_interface::inputs::VMRunWitnessInputData; -use zksync_state_keeper::{BatchExecutor, StateKeeperOutputHandler, UpdatesManager}; +use zksync_state::OwnedStorage; use zksync_types::{ block::StorageOracleInfo, witness_block_state::WitnessStorageState, L1BatchNumber, L2ChainId, H256, }; use zksync_utils::{bytes_to_chunks, h256_to_u256, u256_to_h256}; +use zksync_vm_interface::{executor::BatchExecutorFactory, L1BatchEnv, L2BlockEnv, SystemEnv}; use crate::{ storage::StorageSyncTask, ConcurrentOutputHandlerFactory, ConcurrentOutputHandlerFactoryTask, - OutputHandlerFactory, VmRunner, VmRunnerIo, VmRunnerStorage, + L1BatchOutput, L2BlockOutput, OutputHandler, OutputHandlerFactory, VmRunner, VmRunnerIo, + VmRunnerStorage, }; /// A standalone component that retrieves all needed data for basic witness generation and saves it to the bucket @@ -30,7 +32,7 @@ impl BasicWitnessInputProducer { pub async fn new( pool: ConnectionPool, object_store: Arc, - batch_executor: Box, + batch_executor_factory: Box>, rocksdb_path: String, chain_id: L2ChainId, first_processed_batch: L1BatchNumber, @@ -53,7 +55,7 @@ impl BasicWitnessInputProducer { Box::new(io), Arc::new(loader), Box::new(output_handler_factory), - batch_executor, + batch_executor_factory, ); Ok(( Self { vm_runner }, @@ -145,30 +147,38 @@ impl VmRunnerIo for BasicWitnessInputProducerIo { struct BasicWitnessInputProducerOutputHandler { pool: ConnectionPool, object_store: Arc, + system_env: SystemEnv, + l1_batch_number: L1BatchNumber, } #[async_trait] -impl StateKeeperOutputHandler for BasicWitnessInputProducerOutputHandler { - async fn handle_l2_block(&mut self, _updates_manager: &UpdatesManager) -> anyhow::Result<()> { +impl OutputHandler for BasicWitnessInputProducerOutputHandler { + async fn handle_l2_block( + &mut self, + _env: L2BlockEnv, + _output: &L2BlockOutput, + ) -> anyhow::Result<()> { Ok(()) } #[tracing::instrument( name = "BasicWitnessInputProducerOutputHandler::handle_l1_batch", skip_all, - fields(l1_batch = %updates_manager.l1_batch.number) + fields(l1_batch = %self.l1_batch_number) )] - async fn handle_l1_batch( - &mut self, - updates_manager: Arc, - ) -> anyhow::Result<()> { - let l1_batch_number = updates_manager.l1_batch.number; + async fn handle_l1_batch(self: Box, output: Arc) -> anyhow::Result<()> { + let l1_batch_number = self.l1_batch_number; let mut connection = self.pool.connection().await?; tracing::info!(%l1_batch_number, "Started saving VM run data"); - let result = - get_updates_manager_witness_input_data(&mut connection, updates_manager).await?; + let result = get_updates_manager_witness_input_data( + &mut connection, + &self.system_env, + l1_batch_number, + &output, + ) + .await?; assert_database_witness_input_data(&mut connection, l1_batch_number, &result).await; @@ -193,18 +203,13 @@ impl StateKeeperOutputHandler for BasicWitnessInputProducerOutputHandler { #[tracing::instrument(skip_all)] async fn get_updates_manager_witness_input_data( connection: &mut Connection<'_, Core>, - updates_manager: Arc, + system_env: &SystemEnv, + l1_batch_number: L1BatchNumber, + output: &L1BatchOutput, ) -> anyhow::Result { - let l1_batch_number = updates_manager.l1_batch.number; - let finished_batch = updates_manager - .l1_batch - .finished - .clone() - .ok_or_else(|| anyhow!("L1 batch {l1_batch_number:?} is not finished"))?; - - let initial_heap_content = finished_batch.final_bootloader_memory.unwrap(); // might be just empty - let default_aa = updates_manager.base_system_contract_hashes().default_aa; - let bootloader = updates_manager.base_system_contract_hashes().bootloader; + let initial_heap_content = output.batch.final_bootloader_memory.clone().unwrap(); // might be just empty + let default_aa = system_env.base_system_smart_contracts.hashes().default_aa; + let bootloader = system_env.base_system_smart_contracts.hashes().bootloader; let bootloader_code_bytes = connection .factory_deps_dal() .get_sealed_factory_dep(bootloader) @@ -220,9 +225,8 @@ async fn get_updates_manager_witness_input_data( .ok_or_else(|| anyhow!("Default account bytecode should exist"))?; let account_bytecode = bytes_to_chunks(&account_bytecode_bytes); - let hashes: HashSet = finished_batch - .final_execution_state - .used_contract_hashes + let used_contract_hashes = &output.batch.final_execution_state.used_contract_hashes; + let hashes: HashSet = used_contract_hashes .iter() // SMA-1555: remove this hack once updated to the latest version of `zkevm_test_harness` .filter(|&&hash| hash != h256_to_u256(bootloader)) @@ -232,33 +236,22 @@ async fn get_updates_manager_witness_input_data( .factory_deps_dal() .get_factory_deps(&hashes) .await; - if finished_batch - .final_execution_state - .used_contract_hashes - .contains(&account_code_hash) - { + if used_contract_hashes.contains(&account_code_hash) { used_bytecodes.insert(account_code_hash, account_bytecode); } - let storage_refunds = finished_batch.final_execution_state.storage_refunds; - let pubdata_costs = finished_batch.final_execution_state.pubdata_costs; - - let storage_view_cache = updates_manager - .storage_view_cache() - .expect("Storage view cache was not initialized"); - + let storage_refunds = output.batch.final_execution_state.storage_refunds.clone(); + let pubdata_costs = output.batch.final_execution_state.pubdata_costs.clone(); let witness_block_state = WitnessStorageState { - read_storage_key: storage_view_cache.read_storage_keys(), - is_write_initial: storage_view_cache.initial_writes(), + read_storage_key: output.storage_view_cache.read_storage_keys(), + is_write_initial: output.storage_view_cache.initial_writes(), }; Ok(VMRunWitnessInputData { l1_batch_number, used_bytecodes, initial_heap_content, - - protocol_version: updates_manager.protocol_version(), - + protocol_version: system_env.version, bootloader_code, default_account_code_hash: account_code_hash, storage_refunds, @@ -389,11 +382,14 @@ struct BasicWitnessInputProducerOutputHandlerFactory { impl OutputHandlerFactory for BasicWitnessInputProducerOutputHandlerFactory { async fn create_handler( &mut self, - _l1_batch_number: L1BatchNumber, - ) -> anyhow::Result> { + system_env: SystemEnv, + l1_batch_env: L1BatchEnv, + ) -> anyhow::Result> { Ok(Box::new(BasicWitnessInputProducerOutputHandler { pool: self.pool.clone(), object_store: self.object_store.clone(), + system_env, + l1_batch_number: l1_batch_env.number, })) } } diff --git a/core/node/vm_runner/src/impls/playground.rs b/core/node/vm_runner/src/impls/playground.rs index 461d36116096..091fa15fc953 100644 --- a/core/node/vm_runner/src/impls/playground.rs +++ b/core/node/vm_runner/src/impls/playground.rs @@ -15,13 +15,15 @@ use tokio::{ use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_health_check::{Health, HealthStatus, HealthUpdater, ReactiveHealthCheck}; use zksync_state::RocksdbStorage; -use zksync_state_keeper::{MainBatchExecutor, StateKeeperOutputHandler, UpdatesManager}; use zksync_types::{vm::FastVmMode, L1BatchNumber, L2ChainId}; +use zksync_vm_executor::batch::MainBatchExecutorFactory; +use zksync_vm_interface::{L1BatchEnv, L2BlockEnv, SystemEnv}; use crate::{ storage::{PostgresLoader, StorageLoader}, - ConcurrentOutputHandlerFactory, ConcurrentOutputHandlerFactoryTask, OutputHandlerFactory, - StorageSyncTask, VmRunner, VmRunnerIo, VmRunnerStorage, + ConcurrentOutputHandlerFactory, ConcurrentOutputHandlerFactoryTask, L1BatchOutput, + L2BlockOutput, OutputHandler, OutputHandlerFactory, StorageSyncTask, VmRunner, VmRunnerIo, + VmRunnerStorage, }; #[derive(Debug, Serialize)] @@ -80,7 +82,7 @@ enum VmPlaygroundStorage { #[derive(Debug)] pub struct VmPlayground { pool: ConnectionPool, - batch_executor: MainBatchExecutor, + batch_executor_factory: MainBatchExecutorFactory, storage: VmPlaygroundStorage, chain_id: L2ChainId, io: VmPlaygroundIo, @@ -125,8 +127,8 @@ impl VmPlayground { latest_processed_batch.unwrap_or(cursor.first_processed_batch) }; - let mut batch_executor = MainBatchExecutor::new(false, false); - batch_executor.set_fast_vm_mode(vm_mode); + let mut batch_executor_factory = MainBatchExecutorFactory::new(false, false); + batch_executor_factory.set_fast_vm_mode(vm_mode); let io = VmPlaygroundIo { cursor_file_path, @@ -157,7 +159,7 @@ impl VmPlayground { }; let this = Self { pool, - batch_executor, + batch_executor_factory, storage, chain_id, io, @@ -247,7 +249,7 @@ impl VmPlayground { Box::new(self.io), loader, Box::new(self.output_handler_factory), - Box::new(self.batch_executor), + Box::new(self.batch_executor_factory), ); vm_runner.run(&stop_receiver).await } @@ -392,9 +394,17 @@ impl VmRunnerIo for VmPlaygroundIo { struct VmPlaygroundOutputHandler; #[async_trait] -impl StateKeeperOutputHandler for VmPlaygroundOutputHandler { - async fn handle_l2_block(&mut self, updates_manager: &UpdatesManager) -> anyhow::Result<()> { - tracing::trace!("Processed L2 block #{}", updates_manager.l2_block.number); +impl OutputHandler for VmPlaygroundOutputHandler { + async fn handle_l2_block( + &mut self, + env: L2BlockEnv, + _output: &L2BlockOutput, + ) -> anyhow::Result<()> { + tracing::trace!("Processed L2 block #{}", env.number); + Ok(()) + } + + async fn handle_l1_batch(self: Box, _output: Arc) -> anyhow::Result<()> { Ok(()) } } @@ -403,8 +413,9 @@ impl StateKeeperOutputHandler for VmPlaygroundOutputHandler { impl OutputHandlerFactory for VmPlaygroundOutputHandler { async fn create_handler( &mut self, - _l1_batch_number: L1BatchNumber, - ) -> anyhow::Result> { + _system_env: SystemEnv, + _l1_batch_env: L1BatchEnv, + ) -> anyhow::Result> { Ok(Box::new(Self)) } } diff --git a/core/node/vm_runner/src/impls/protective_reads.rs b/core/node/vm_runner/src/impls/protective_reads.rs index dfd5251fd39b..b620675b78e2 100644 --- a/core/node/vm_runner/src/impls/protective_reads.rs +++ b/core/node/vm_runner/src/impls/protective_reads.rs @@ -1,15 +1,16 @@ use std::sync::Arc; -use anyhow::Context; use async_trait::async_trait; use tokio::sync::watch; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; -use zksync_state_keeper::{MainBatchExecutor, StateKeeperOutputHandler, UpdatesManager}; use zksync_types::{L1BatchNumber, L2ChainId, StorageLog}; +use zksync_vm_executor::batch::MainBatchExecutorFactory; +use zksync_vm_interface::{L1BatchEnv, L2BlockEnv, SystemEnv}; use crate::{ storage::StorageSyncTask, ConcurrentOutputHandlerFactory, ConcurrentOutputHandlerFactoryTask, - OutputHandlerFactory, VmRunner, VmRunnerIo, VmRunnerStorage, + L1BatchOutput, L2BlockOutput, OutputHandler, OutputHandlerFactory, VmRunner, VmRunnerIo, + VmRunnerStorage, }; /// A standalone component that writes protective reads asynchronously to state keeper. @@ -37,7 +38,7 @@ impl ProtectiveReadsWriter { let output_handler_factory = ProtectiveReadsOutputHandlerFactory { pool: pool.clone() }; let (output_handler_factory, output_handler_factory_task) = ConcurrentOutputHandlerFactory::new(pool.clone(), io.clone(), output_handler_factory); - let batch_processor = MainBatchExecutor::new(false, false); + let batch_processor = MainBatchExecutorFactory::new(false, false); let vm_runner = VmRunner::new( pool, Box::new(io), @@ -133,30 +134,29 @@ impl VmRunnerIo for ProtectiveReadsIo { #[derive(Debug)] struct ProtectiveReadsOutputHandler { + l1_batch_number: L1BatchNumber, pool: ConnectionPool, } #[async_trait] -impl StateKeeperOutputHandler for ProtectiveReadsOutputHandler { - async fn handle_l2_block(&mut self, _updates_manager: &UpdatesManager) -> anyhow::Result<()> { +impl OutputHandler for ProtectiveReadsOutputHandler { + async fn handle_l2_block( + &mut self, + _env: L2BlockEnv, + _output: &L2BlockOutput, + ) -> anyhow::Result<()> { Ok(()) } #[tracing::instrument( name = "ProtectiveReadsOutputHandler::handle_l1_batch", skip_all, - fields(l1_batch = %updates_manager.l1_batch.number) + fields(l1_batch = %self.l1_batch_number) )] - async fn handle_l1_batch( - &mut self, - updates_manager: Arc, - ) -> anyhow::Result<()> { - let finished_batch = updates_manager - .l1_batch - .finished - .as_ref() - .context("L1 batch is not actually finished")?; - let (_, computed_protective_reads): (Vec, Vec) = finished_batch + async fn handle_l1_batch(self: Box, output: Arc) -> anyhow::Result<()> { + let l1_batch_number = self.l1_batch_number; + let (_, computed_protective_reads): (Vec, Vec) = output + .batch .final_execution_state .deduplicated_storage_logs .iter() @@ -168,12 +168,12 @@ impl StateKeeperOutputHandler for ProtectiveReadsOutputHandler { .await?; let mut written_protective_reads = connection .storage_logs_dedup_dal() - .get_protective_reads_for_l1_batch(updates_manager.l1_batch.number) + .get_protective_reads_for_l1_batch(l1_batch_number) .await?; if !written_protective_reads.is_empty() { tracing::debug!( - l1_batch_number = %updates_manager.l1_batch.number, + l1_batch_number = %l1_batch_number, "Protective reads have already been written, validating" ); for protective_read in computed_protective_reads { @@ -181,7 +181,7 @@ impl StateKeeperOutputHandler for ProtectiveReadsOutputHandler { let key = protective_read.key.key(); if !written_protective_reads.remove(&protective_read.key) { tracing::error!( - l1_batch_number = %updates_manager.l1_batch.number, + l1_batch_number = %l1_batch_number, address = %address, key = %key, "VM runner produced a protective read that did not happen in state keeper" @@ -190,7 +190,7 @@ impl StateKeeperOutputHandler for ProtectiveReadsOutputHandler { } for remaining_read in written_protective_reads { tracing::error!( - l1_batch_number = %updates_manager.l1_batch.number, + l1_batch_number = %l1_batch_number, address = %remaining_read.address(), key = %remaining_read.key(), "State keeper produced a protective read that did not happen in VM runner" @@ -198,15 +198,12 @@ impl StateKeeperOutputHandler for ProtectiveReadsOutputHandler { } } else { tracing::debug!( - l1_batch_number = %updates_manager.l1_batch.number, + l1_batch_number = %l1_batch_number, "Protective reads have not been written, writing" ); connection .storage_logs_dedup_dal() - .insert_protective_reads( - updates_manager.l1_batch.number, - &computed_protective_reads, - ) + .insert_protective_reads(l1_batch_number, &computed_protective_reads) .await?; } @@ -223,10 +220,12 @@ struct ProtectiveReadsOutputHandlerFactory { impl OutputHandlerFactory for ProtectiveReadsOutputHandlerFactory { async fn create_handler( &mut self, - _l1_batch_number: L1BatchNumber, - ) -> anyhow::Result> { + _system_env: SystemEnv, + l1_batch_env: L1BatchEnv, + ) -> anyhow::Result> { Ok(Box::new(ProtectiveReadsOutputHandler { pool: self.pool.clone(), + l1_batch_number: l1_batch_env.number, })) } } diff --git a/core/node/vm_runner/src/lib.rs b/core/node/vm_runner/src/lib.rs index 03e3f43baedc..63e2b5881aaf 100644 --- a/core/node/vm_runner/src/lib.rs +++ b/core/node/vm_runner/src/lib.rs @@ -16,7 +16,8 @@ mod tests; pub use self::{ io::VmRunnerIo, output_handler::{ - ConcurrentOutputHandlerFactory, ConcurrentOutputHandlerFactoryTask, OutputHandlerFactory, + ConcurrentOutputHandlerFactory, ConcurrentOutputHandlerFactoryTask, L1BatchOutput, + L2BlockOutput, OutputHandler, OutputHandlerFactory, }, process::VmRunner, storage::{BatchExecuteData, StorageSyncTask, VmRunnerStorage}, diff --git a/core/node/vm_runner/src/output_handler.rs b/core/node/vm_runner/src/output_handler.rs index 4052c245a44f..25eae5e36845 100644 --- a/core/node/vm_runner/src/output_handler.rs +++ b/core/node/vm_runner/src/output_handler.rs @@ -1,9 +1,4 @@ -use std::{ - fmt::{Debug, Formatter}, - mem, - sync::Arc, - time::Duration, -}; +use std::{fmt, sync::Arc, time::Duration}; use anyhow::Context; use async_trait::async_trait; @@ -13,13 +8,52 @@ use tokio::{ task::JoinHandle, }; use zksync_dal::{ConnectionPool, Core}; -use zksync_state_keeper::{StateKeeperOutputHandler, UpdatesManager}; -use zksync_types::L1BatchNumber; +use zksync_state::interface::StorageViewCache; +use zksync_types::{L1BatchNumber, Transaction}; +use zksync_vm_interface::{ + BatchTransactionExecutionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, +}; use crate::{metrics::METRICS, VmRunnerIo}; type BatchReceiver = oneshot::Receiver>>; +/// Output from executing a single L2 block. +#[derive(Debug, Default)] +pub struct L2BlockOutput { + /// Executed transactions together with execution results. + pub transactions: Vec<(Transaction, BatchTransactionExecutionResult)>, +} + +impl L2BlockOutput { + pub(crate) fn push(&mut self, tx: Transaction, exec_result: BatchTransactionExecutionResult) { + self.transactions.push((tx, exec_result)); + } +} + +/// Output from executing L1 batch tip. +#[derive(Debug)] +pub struct L1BatchOutput { + /// Finished L1 batch. + pub batch: FinishedL1Batch, + /// Information about storage accesses for the batch. + pub storage_view_cache: StorageViewCache, +} + +/// Handler of batch execution. +#[async_trait] +pub trait OutputHandler: fmt::Debug + Send { + /// Handles an L2 block processed by the VM. + async fn handle_l2_block( + &mut self, + env: L2BlockEnv, + output: &L2BlockOutput, + ) -> anyhow::Result<()>; + + /// Handles an L1 batch processed by the VM. + async fn handle_l1_batch(self: Box, output: Arc) -> anyhow::Result<()>; +} + /// Functionality to produce a [`StateKeeperOutputHandler`] implementation for a specific L1 batch. /// /// The idea behind this trait is that often handling output data is independent of the order of the @@ -27,7 +61,7 @@ type BatchReceiver = oneshot::Receiver>>; /// simultaneously. Implementing this trait signifies that this property is held for the data the /// implementation is responsible for. #[async_trait] -pub trait OutputHandlerFactory: Debug + Send { +pub trait OutputHandlerFactory: fmt::Debug + Send { /// Creates a [`StateKeeperOutputHandler`] implementation for the provided L1 batch. Only /// supposed to be used for the L1 batch data it was created against. Using it for anything else /// will lead to errors. @@ -37,8 +71,9 @@ pub trait OutputHandlerFactory: Debug + Send { /// Propagates DB errors. async fn create_handler( &mut self, - l1_batch_number: L1BatchNumber, - ) -> anyhow::Result>; + system_env: SystemEnv, + l1_batch_env: L1BatchEnv, + ) -> anyhow::Result>; } /// A delegator factory that requires an underlying factory `F` that does the actual work, however @@ -57,8 +92,12 @@ pub struct ConcurrentOutputHandlerFactory Debug for ConcurrentOutputHandlerFactory { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { +impl fmt::Debug for ConcurrentOutputHandlerFactory +where + Io: VmRunnerIo, + F: OutputHandlerFactory, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("ConcurrentOutputHandlerFactory") .field("pool", &self.pool) .field("io", &self.io) @@ -101,8 +140,10 @@ impl OutputHandlerFactory { async fn create_handler( &mut self, - l1_batch_number: L1BatchNumber, - ) -> anyhow::Result> { + system_env: SystemEnv, + l1_batch_env: L1BatchEnv, + ) -> anyhow::Result> { + let l1_batch_number = l1_batch_env.number; let mut conn = self.pool.connection_tagged(self.io.name()).await?; let latest_processed_batch = self.io.latest_processed_batch(&mut conn).await?; let last_processable_batch = self.io.last_ready_to_be_loaded_batch(&mut conn).await?; @@ -121,70 +162,50 @@ impl OutputHandlerFactory last_processable_batch ); - let handler = self.factory.create_handler(l1_batch_number).await?; + let handler = self + .factory + .create_handler(system_env, l1_batch_env) + .await?; let (sender, receiver) = oneshot::channel(); self.state.insert(l1_batch_number, receiver); - Ok(Box::new(AsyncOutputHandler::Running { handler, sender })) + Ok(Box::new(AsyncOutputHandler { handler, sender })) } } -enum AsyncOutputHandler { - Running { - handler: Box, - sender: oneshot::Sender>>, - }, - Finished, +struct AsyncOutputHandler { + handler: Box, + sender: oneshot::Sender>>, } -impl Debug for AsyncOutputHandler { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { - match self { - AsyncOutputHandler::Running { handler, .. } => f - .debug_struct("AsyncOutputHandler::Running") - .field("handler", handler) - .finish(), - AsyncOutputHandler::Finished => f.debug_struct("AsyncOutputHandler::Finished").finish(), - } +impl fmt::Debug for AsyncOutputHandler { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("AsyncOutputHandler::Running") + .field("handler", &self.handler) + .finish() } } #[async_trait] -impl StateKeeperOutputHandler for AsyncOutputHandler { - async fn handle_l2_block(&mut self, updates_manager: &UpdatesManager) -> anyhow::Result<()> { - match self { - AsyncOutputHandler::Running { handler, .. } => { - handler.handle_l2_block(updates_manager).await - } - AsyncOutputHandler::Finished => { - Err(anyhow::anyhow!("Cannot handle any more L2 blocks")) - } - } - } - - async fn handle_l1_batch( +impl OutputHandler for AsyncOutputHandler { + async fn handle_l2_block( &mut self, - updates_manager: Arc, + env: L2BlockEnv, + output: &L2BlockOutput, ) -> anyhow::Result<()> { - let state = mem::replace(self, AsyncOutputHandler::Finished); - match state { - AsyncOutputHandler::Running { - mut handler, - sender, - } => { - sender - .send(tokio::task::spawn(async move { - let latency = METRICS.output_handle_time.start(); - let result = handler.handle_l1_batch(updates_manager).await; - latency.observe(); - result - })) - .ok(); - Ok(()) - } - AsyncOutputHandler::Finished => { - Err(anyhow::anyhow!("Cannot handle any more L1 batches")) - } - } + self.handler.handle_l2_block(env, output).await + } + + async fn handle_l1_batch(self: Box, output: Arc) -> anyhow::Result<()> { + let handler = self.handler; + self.sender + .send(tokio::task::spawn(async move { + let latency = METRICS.output_handle_time.start(); + let result = handler.handle_l1_batch(output).await; + latency.observe(); + result + })) + .ok(); + Ok(()) } } @@ -196,8 +217,8 @@ pub struct ConcurrentOutputHandlerFactoryTask { state: Arc>, } -impl Debug for ConcurrentOutputHandlerFactoryTask { - fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { +impl fmt::Debug for ConcurrentOutputHandlerFactoryTask { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("ConcurrentOutputHandlerFactoryTask") .field("pool", &self.pool) .field("io", &self.io) diff --git a/core/node/vm_runner/src/process.rs b/core/node/vm_runner/src/process.rs index 3c5a00e074c0..e2a678ccdce4 100644 --- a/core/node/vm_runner/src/process.rs +++ b/core/node/vm_runner/src/process.rs @@ -3,14 +3,17 @@ use std::{sync::Arc, time::Duration}; use anyhow::Context; use tokio::{sync::watch, task::JoinHandle}; use zksync_dal::{ConnectionPool, Core}; -use zksync_multivm::interface::L2BlockEnv; -use zksync_state_keeper::{ - BatchExecutor, BatchExecutorHandle, ExecutionMetricsForCriteria, L2BlockParams, - StateKeeperOutputHandler, TxExecutionResult, UpdatesManager, -}; +use zksync_state::OwnedStorage; use zksync_types::{block::L2BlockExecutionData, L1BatchNumber}; +use zksync_vm_interface::{ + executor::{BatchExecutor, BatchExecutorFactory}, + L2BlockEnv, +}; -use crate::{metrics::METRICS, storage::StorageLoader, OutputHandlerFactory, VmRunnerIo}; +use crate::{ + metrics::METRICS, output_handler::OutputHandler, storage::StorageLoader, L1BatchOutput, + L2BlockOutput, OutputHandlerFactory, VmRunnerIo, +}; /// VM runner represents a logic layer of L1 batch / L2 block processing flow akin to that of state /// keeper. The difference is that VM runner is designed to be run on batches/blocks that have @@ -29,7 +32,7 @@ pub struct VmRunner { io: Box, loader: Arc, output_handler_factory: Box, - batch_processor: Box, + batch_executor_factory: Box>, } impl VmRunner { @@ -44,32 +47,27 @@ impl VmRunner { io: Box, loader: Arc, output_handler_factory: Box, - batch_processor: Box, + batch_executor_factory: Box>, ) -> Self { Self { pool, io, loader, output_handler_factory, - batch_processor, + batch_executor_factory, } } async fn process_batch( - mut batch_executor: BatchExecutorHandle, + mut batch_executor: Box>, l2_blocks: Vec, - mut updates_manager: UpdatesManager, - mut output_handler: Box, + mut output_handler: Box, ) -> anyhow::Result<()> { let latency = METRICS.run_vm_time.start(); for (i, l2_block) in l2_blocks.into_iter().enumerate() { + let block_env = L2BlockEnv::from_l2_block_data(&l2_block); if i > 0 { // First L2 block in every batch is already preloaded - updates_manager.push_l2_block(L2BlockParams { - timestamp: l2_block.timestamp, - virtual_blocks: l2_block.virtual_blocks, - }); - let block_env = L2BlockEnv::from_l2_block_data(&l2_block); batch_executor .start_next_l2_block(block_env) .await @@ -77,51 +75,36 @@ impl VmRunner { format!("failed starting L2 block with {block_env:?} in batch executor") })?; } + + let mut block_output = L2BlockOutput::default(); for tx in l2_block.txs { let exec_result = batch_executor .execute_tx(tx.clone()) .await .with_context(|| format!("failed executing transaction {:?}", tx.hash()))?; - let TxExecutionResult::Success { - tx_result, - tx_metrics, - call_tracer_result, - compressed_bytecodes, - .. - } = exec_result - else { - anyhow::bail!("Unexpected non-successful transaction"); - }; - let ExecutionMetricsForCriteria { - l1_gas: tx_l1_gas_this_tx, - execution_metrics: tx_execution_metrics, - } = *tx_metrics; - updates_manager.extend_from_executed_transaction( - tx, - *tx_result, - compressed_bytecodes, - tx_l1_gas_this_tx, - tx_execution_metrics, - call_tracer_result, + anyhow::ensure!( + !exec_result.was_halted(), + "Unexpected non-successful transaction" ); + block_output.push(tx, exec_result); } output_handler - .handle_l2_block(&updates_manager) + .handle_l2_block(block_env, &block_output) .await .context("VM runner failed to handle L2 block")?; } - let (finished_batch, storage_view_cache) = batch_executor - .finish_batch_with_cache() + let (batch, storage_view) = batch_executor + .finish_batch() .await - .context("Failed getting storage view cache")?; - updates_manager.finish_batch(finished_batch); - // this is needed for Basic Witness Input Producer to use in memory reads, but not database queries - updates_manager.update_storage_view_cache(storage_view_cache); - + .context("VM runner failed to execute batch tip")?; + let output = L1BatchOutput { + batch, + storage_view_cache: storage_view.cache(), + }; latency.observe(); output_handler - .handle_l1_batch(Arc::new(updates_manager)) + .handle_l1_batch(Arc::new(output)) .await .context("VM runner failed to handle L1 batch")?; Ok(()) @@ -178,16 +161,14 @@ impl VmRunner { tokio::time::sleep(SLEEP_INTERVAL).await; continue; }; - let updates_manager = - UpdatesManager::new(&batch_data.l1_batch_env, &batch_data.system_env); - let batch_executor = self.batch_processor.init_batch( + let batch_executor = self.batch_executor_factory.init_batch( storage, - batch_data.l1_batch_env, - batch_data.system_env, + batch_data.l1_batch_env.clone(), + batch_data.system_env.clone(), ); let output_handler = self .output_handler_factory - .create_handler(next_batch) + .create_handler(batch_data.system_env, batch_data.l1_batch_env) .await?; self.io @@ -196,7 +177,6 @@ impl VmRunner { let handle = tokio::task::spawn(Self::process_batch( batch_executor, batch_data.l2_blocks, - updates_manager, output_handler, )); task_handles.push((next_batch, handle)); diff --git a/core/node/vm_runner/src/storage.rs b/core/node/vm_runner/src/storage.rs index d08ef2830f3f..baee426007c5 100644 --- a/core/node/vm_runner/src/storage.rs +++ b/core/node/vm_runner/src/storage.rs @@ -9,13 +9,13 @@ use anyhow::Context as _; use async_trait::async_trait; use tokio::sync::{watch, RwLock}; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; -use zksync_multivm::interface::{L1BatchEnv, SystemEnv}; use zksync_state::{ AsyncCatchupTask, BatchDiff, OwnedStorage, RocksdbCell, RocksdbStorage, RocksdbStorageBuilder, RocksdbWithMemory, }; use zksync_types::{block::L2BlockExecutionData, L1BatchNumber, L2ChainId}; -use zksync_vm_utils::storage::L1BatchParamsProvider; +use zksync_vm_executor::storage::L1BatchParamsProvider; +use zksync_vm_interface::{L1BatchEnv, SystemEnv}; use crate::{metrics::METRICS, VmRunnerIo}; diff --git a/core/node/vm_runner/src/tests/mod.rs b/core/node/vm_runner/src/tests/mod.rs index 525a306eabf5..530016408140 100644 --- a/core/node/vm_runner/src/tests/mod.rs +++ b/core/node/vm_runner/src/tests/mod.rs @@ -4,13 +4,11 @@ use async_trait::async_trait; use rand::{prelude::SliceRandom, Rng}; use tokio::sync::RwLock; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; -use zksync_multivm::interface::TransactionExecutionMetrics; use zksync_node_genesis::GenesisParams; use zksync_node_test_utils::{ create_l1_batch_metadata, create_l2_block, execute_l2_transaction, l1_batch_metadata_to_commitment_artifacts, }; -use zksync_state_keeper::{StateKeeperOutputHandler, UpdatesManager}; use zksync_test_account::Account; use zksync_types::{ block::{L1BatchHeader, L2BlockHasher}, @@ -22,8 +20,9 @@ use zksync_types::{ StorageLog, StorageLogKind, StorageValue, H160, H256, L2_BASE_TOKEN_ADDRESS, U256, }; use zksync_utils::{bytecode::hash_bytecode, h256_to_u256, u256_to_h256}; +use zksync_vm_interface::{L1BatchEnv, L2BlockEnv, SystemEnv, TransactionExecutionMetrics}; -use super::{OutputHandlerFactory, VmRunnerIo}; +use super::*; mod output_handler; mod playground; @@ -155,25 +154,27 @@ struct TestOutputFactory { impl OutputHandlerFactory for TestOutputFactory { async fn create_handler( &mut self, - l1_batch_number: L1BatchNumber, - ) -> anyhow::Result> { + _system_env: SystemEnv, + l1_batch_env: L1BatchEnv, + ) -> anyhow::Result> { #[derive(Debug)] struct TestOutputHandler { delay: Option, } #[async_trait] - impl StateKeeperOutputHandler for TestOutputHandler { + impl OutputHandler for TestOutputHandler { async fn handle_l2_block( &mut self, - _updates_manager: &UpdatesManager, + _env: L2BlockEnv, + _output: &L2BlockOutput, ) -> anyhow::Result<()> { Ok(()) } async fn handle_l1_batch( - &mut self, - _updates_manager: Arc, + self: Box, + _output: Arc, ) -> anyhow::Result<()> { if let Some(delay) = self.delay { tokio::time::sleep(delay).await @@ -182,7 +183,7 @@ impl OutputHandlerFactory for TestOutputFactory { } } - let delay = self.delays.get(&l1_batch_number).copied(); + let delay = self.delays.get(&l1_batch_env.number).copied(); Ok(Box::new(TestOutputHandler { delay })) } } diff --git a/core/node/vm_runner/src/tests/output_handler.rs b/core/node/vm_runner/src/tests/output_handler.rs index 453507328c4f..1bf30effdbe5 100644 --- a/core/node/vm_runner/src/tests/output_handler.rs +++ b/core/node/vm_runner/src/tests/output_handler.rs @@ -6,13 +6,13 @@ use tokio::{ }; use zksync_contracts::{BaseSystemContracts, SystemContractCode}; use zksync_dal::{ConnectionPool, Core}; -use zksync_multivm::interface::{L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode}; -use zksync_state_keeper::UpdatesManager; +use zksync_state::interface::StorageViewCache; use zksync_types::L1BatchNumber; +use zksync_vm_interface::{FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode}; use crate::{ tests::{wait, IoMock, TestOutputFactory}, - ConcurrentOutputHandlerFactory, OutputHandlerFactory, + ConcurrentOutputHandlerFactory, L1BatchOutput, L2BlockOutput, OutputHandlerFactory, }; struct OutputHandlerTester { @@ -40,47 +40,53 @@ impl OutputHandlerTester { } async fn spawn_test_task(&mut self, l1_batch_number: L1BatchNumber) -> anyhow::Result<()> { - let mut output_handler = self.output_factory.create_handler(l1_batch_number).await?; - let join_handle = tokio::task::spawn(async move { - let l1_batch_env = L1BatchEnv { - previous_batch_hash: None, - number: Default::default(), + let l1_batch_env = L1BatchEnv { + previous_batch_hash: None, + number: l1_batch_number, + timestamp: 0, + fee_input: Default::default(), + fee_account: Default::default(), + enforced_base_fee: None, + first_l2_block: L2BlockEnv { + number: 0, timestamp: 0, - fee_input: Default::default(), - fee_account: Default::default(), - enforced_base_fee: None, - first_l2_block: L2BlockEnv { - number: 0, - timestamp: 0, - prev_block_hash: Default::default(), - max_virtual_blocks_to_create: 0, + prev_block_hash: Default::default(), + max_virtual_blocks_to_create: 0, + }, + }; + let system_env = SystemEnv { + zk_porter_available: false, + version: Default::default(), + base_system_smart_contracts: BaseSystemContracts { + bootloader: SystemContractCode { + code: vec![], + hash: Default::default(), }, - }; - let system_env = SystemEnv { - zk_porter_available: false, - version: Default::default(), - base_system_smart_contracts: BaseSystemContracts { - bootloader: SystemContractCode { - code: vec![], - hash: Default::default(), - }, - default_aa: SystemContractCode { - code: vec![], - hash: Default::default(), - }, + default_aa: SystemContractCode { + code: vec![], + hash: Default::default(), }, - bootloader_gas_limit: 0, - execution_mode: TxExecutionMode::VerifyExecute, - default_validation_computational_gas_limit: 0, - chain_id: Default::default(), - }; - let updates_manager = UpdatesManager::new(&l1_batch_env, &system_env); + }, + bootloader_gas_limit: 0, + execution_mode: TxExecutionMode::VerifyExecute, + default_validation_computational_gas_limit: 0, + chain_id: Default::default(), + }; + + let mut output_handler = self + .output_factory + .create_handler(system_env, l1_batch_env.clone()) + .await?; + let join_handle = tokio::task::spawn(async move { output_handler - .handle_l2_block(&updates_manager) + .handle_l2_block(l1_batch_env.first_l2_block, &L2BlockOutput::default()) .await .unwrap(); output_handler - .handle_l1_batch(Arc::new(updates_manager)) + .handle_l1_batch(Arc::new(L1BatchOutput { + batch: FinishedL1Batch::mock(), + storage_view_cache: StorageViewCache::default(), + })) .await .unwrap(); }); diff --git a/core/node/vm_runner/src/tests/process.rs b/core/node/vm_runner/src/tests/process.rs index 2ac976021e0b..fec3fd2ba60a 100644 --- a/core/node/vm_runner/src/tests/process.rs +++ b/core/node/vm_runner/src/tests/process.rs @@ -5,9 +5,9 @@ use test_casing::test_casing; use tokio::sync::{watch, RwLock}; use zksync_dal::{ConnectionPool, Core}; use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; -use zksync_state_keeper::MainBatchExecutor; use zksync_test_account::Account; use zksync_types::{L1BatchNumber, L2ChainId}; +use zksync_vm_executor::batch::MainBatchExecutorFactory; use super::*; use crate::{ConcurrentOutputHandlerFactory, VmRunner, VmRunnerStorage}; @@ -54,7 +54,7 @@ async fn process_batches((batch_count, window): (u32, u32)) -> anyhow::Result<() tokio::task::spawn(async move { task.run(output_stop_receiver).await.unwrap() }); let storage = Arc::new(storage); - let batch_executor = MainBatchExecutor::new(false, false); + let batch_executor = MainBatchExecutorFactory::new(false, false); let vm_runner = VmRunner::new( connection_pool, Box::new(io.clone()), diff --git a/core/node/vm_runner/src/tests/storage_writer.rs b/core/node/vm_runner/src/tests/storage_writer.rs index 6cad2da6974a..76d0867125a8 100644 --- a/core/node/vm_runner/src/tests/storage_writer.rs +++ b/core/node/vm_runner/src/tests/storage_writer.rs @@ -3,17 +3,18 @@ use test_casing::test_casing; use tokio::sync::watch; use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; use zksync_state::OwnedStorage; -use zksync_state_keeper::MainBatchExecutor; -use zksync_types::L2ChainId; +use zksync_types::{L2ChainId, StorageLogWithPreviousValue}; +use zksync_vm_executor::batch::MainBatchExecutorFactory; use super::*; use crate::{ storage::{PostgresLoader, StorageLoader}, - ConcurrentOutputHandlerFactory, VmRunner, + ConcurrentOutputHandlerFactory, L1BatchOutput, L2BlockOutput, OutputHandler, VmRunner, }; #[derive(Debug, Clone)] struct StorageWriterIo { + last_processed_block: L2BlockNumber, last_processed_batch: Arc>, pool: ConnectionPool, insert_protective_reads: bool, @@ -72,43 +73,43 @@ impl VmRunnerIo for StorageWriterIo { impl StorageWriterIo { async fn write_storage_logs( conn: &mut Connection<'_, Core>, - updates_manager: &UpdatesManager, + block_number: L2BlockNumber, + storage_logs: impl Iterator, ) -> anyhow::Result<()> { - let storage_logs = updates_manager - .l2_block - .storage_logs - .iter() - .filter_map(|log| log.log.is_write().then_some(log.log)); + let storage_logs = storage_logs.filter_map(|log| log.log.is_write().then_some(log.log)); let storage_logs: Vec<_> = storage_logs.collect(); conn.storage_logs_dal() - .append_storage_logs(updates_manager.l2_block.number, &storage_logs) + .append_storage_logs(block_number, &storage_logs) .await?; Ok(()) } } #[async_trait] -impl StateKeeperOutputHandler for StorageWriterIo { - async fn handle_l2_block(&mut self, updates_manager: &UpdatesManager) -> anyhow::Result<()> { +impl OutputHandler for StorageWriterIo { + async fn handle_l2_block( + &mut self, + env: L2BlockEnv, + output: &L2BlockOutput, + ) -> anyhow::Result<()> { let mut conn = self.pool.connection().await?; - Self::write_storage_logs(&mut conn, updates_manager).await?; + let storage_logs = output + .transactions + .iter() + .flat_map(|(_, exec_result)| &exec_result.tx_result.logs.storage_logs); + let block_number = L2BlockNumber(env.number); + Self::write_storage_logs(&mut conn, block_number, storage_logs).await?; + self.last_processed_block = block_number; Ok(()) } - async fn handle_l1_batch( - &mut self, - updates_manager: Arc, - ) -> anyhow::Result<()> { + async fn handle_l1_batch(self: Box, output: Arc) -> anyhow::Result<()> { let mut conn = self.pool.connection().await?; // Storage logs are added to the fictive block *after* `handle_l2_block()` is called for it, so we need to call it again here. - Self::write_storage_logs(&mut conn, &updates_manager).await?; - - let finished_batch = updates_manager - .l1_batch - .finished - .as_ref() - .expect("L1 batch is not finished"); - let state_diffs = finished_batch.state_diffs.as_ref().expect("no state diffs"); + let storage_logs = &output.batch.block_tip_execution_result.logs.storage_logs; + Self::write_storage_logs(&mut conn, self.last_processed_block, storage_logs.iter()).await?; + + let state_diffs = output.batch.state_diffs.as_ref().expect("no state diffs"); let initial_writes: Vec<_> = state_diffs .iter() .filter(|diff| diff.is_write_initial()) @@ -119,12 +120,14 @@ impl StateKeeperOutputHandler for StorageWriterIo { )) }) .collect(); + let l1_batch_number = *self.last_processed_batch.borrow() + 1; conn.storage_logs_dedup_dal() - .insert_initial_writes(updates_manager.l1_batch.number, &initial_writes) + .insert_initial_writes(l1_batch_number, &initial_writes) .await?; if self.insert_protective_reads { - let protective_reads: Vec<_> = finished_batch + let protective_reads: Vec<_> = output + .batch .final_execution_state .deduplicated_storage_logs .iter() @@ -132,12 +135,11 @@ impl StateKeeperOutputHandler for StorageWriterIo { .copied() .collect(); conn.storage_logs_dedup_dal() - .insert_protective_reads(updates_manager.l1_batch.number, &protective_reads) + .insert_protective_reads(l1_batch_number, &protective_reads) .await?; } - self.last_processed_batch - .send_replace(updates_manager.l1_batch.number); + self.last_processed_batch.send_replace(l1_batch_number); Ok(()) } } @@ -146,9 +148,10 @@ impl StateKeeperOutputHandler for StorageWriterIo { impl OutputHandlerFactory for StorageWriterIo { async fn create_handler( &mut self, - l1_batch_number: L1BatchNumber, - ) -> anyhow::Result> { - assert_eq!(l1_batch_number, self.batch() + 1); + _system_env: SystemEnv, + l1_batch_env: L1BatchEnv, + ) -> anyhow::Result> { + assert_eq!(l1_batch_env.number, self.batch() + 1); Ok(Box::new(self.clone())) } } @@ -166,6 +169,7 @@ pub(super) async fn write_storage_logs(pool: ConnectionPool, insert_protec drop(conn); let io = Box::new(StorageWriterIo { last_processed_batch: Arc::new(watch::channel(L1BatchNumber(0)).0), + last_processed_block: L2BlockNumber(0), pool: pool.clone(), insert_protective_reads, }); @@ -175,8 +179,8 @@ pub(super) async fn write_storage_logs(pool: ConnectionPool, insert_protec .await .unwrap(); let loader = Arc::new(loader); - let batch_executor = Box::new(MainBatchExecutor::new(false, false)); - let vm_runner = VmRunner::new(pool, io.clone(), loader, io, batch_executor); + let batch_executor = MainBatchExecutorFactory::new(false, false); + let vm_runner = VmRunner::new(pool, io.clone(), loader, io, Box::new(batch_executor)); let (stop_sender, stop_receiver) = watch::channel(false); let vm_runner_handle = tokio::spawn(async move { vm_runner.run(&stop_receiver).await }); @@ -233,13 +237,13 @@ async fn storage_writer_works(insert_protective_reads: bool) { let (output_factory, output_factory_task) = ConcurrentOutputHandlerFactory::new(pool.clone(), io.clone(), TestOutputFactory::default()); let output_factory_handle = tokio::spawn(output_factory_task.run(stop_receiver.clone())); - let batch_executor = Box::new(MainBatchExecutor::new(false, false)); + let batch_executor = MainBatchExecutorFactory::new(false, false); let vm_runner = VmRunner::new( pool, Box::new(io.clone()), loader, Box::new(output_factory), - batch_executor, + Box::new(batch_executor), ); let vm_runner_handle = tokio::spawn(async move { vm_runner.run(&stop_receiver).await }); diff --git a/prover/Cargo.lock b/prover/Cargo.lock index 8fe3b6f36f67..09b13a80e397 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -8356,6 +8356,8 @@ dependencies = [ name = "zksync_vm_interface" version = "0.1.0" dependencies = [ + "anyhow", + "async-trait", "hex", "serde", "thiserror", From 19ca51208db5c739d3f3e66f47d68f451997fa8e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mat=C3=ADas=20Ignacio=20Gonz=C3=A1lez?= Date: Tue, 3 Sep 2024 12:41:48 -0300 Subject: [PATCH 011/116] feat(zk_toolbox): Add zks contracts (#2781) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Add zks contracts. Builds contracts --- zk_toolbox/README.md | 8 ++ .../zk_supervisor/src/commands/contracts.rs | 135 ++++++++++++++++++ .../crates/zk_supervisor/src/commands/mod.rs | 1 + zk_toolbox/crates/zk_supervisor/src/main.rs | 12 +- .../crates/zk_supervisor/src/messages.rs | 13 ++ 5 files changed, 165 insertions(+), 4 deletions(-) create mode 100644 zk_toolbox/crates/zk_supervisor/src/commands/contracts.rs diff --git a/zk_toolbox/README.md b/zk_toolbox/README.md index b35d4c8d56f1..469e36a65f64 100644 --- a/zk_toolbox/README.md +++ b/zk_toolbox/README.md @@ -320,6 +320,14 @@ Create a snapshot of the current chain: zks snapshot create ``` +### Contracts + +Build contracts: + +```bash +zks contracts +``` + ### Format Format code: diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/contracts.rs b/zk_toolbox/crates/zk_supervisor/src/commands/contracts.rs new file mode 100644 index 000000000000..0c635b2b0d34 --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/commands/contracts.rs @@ -0,0 +1,135 @@ +use std::path::PathBuf; + +use clap::{Parser, ValueEnum}; +use common::{cmd::Cmd, logger, spinner::Spinner}; +use config::EcosystemConfig; +use strum::EnumIter; +use xshell::{cmd, Shell}; + +use crate::messages::{ + MSG_BUILDING_CONTRACTS, MSG_BUILDING_CONTRACTS_SUCCESS, MSG_BUILDING_L1_CONTRACTS_SPINNER, + MSG_BUILDING_L2_CONTRACTS_SPINNER, MSG_BUILDING_SYSTEM_CONTRACTS_SPINNER, + MSG_BUILD_L1_CONTRACTS_HELP, MSG_BUILD_L2_CONTRACTS_HELP, MSG_BUILD_SYSTEM_CONTRACTS_HELP, + MSG_CONTRACTS_DEPS_SPINNER, MSG_NOTHING_TO_BUILD_MSG, +}; + +#[derive(Debug, Parser)] +pub struct ContractsArgs { + #[clap(long, alias = "l1", help = MSG_BUILD_L1_CONTRACTS_HELP, default_missing_value = "true", num_args = 0..=1)] + pub l1_contracts: Option, + #[clap(long, alias = "l2", help = MSG_BUILD_L2_CONTRACTS_HELP, default_missing_value = "true", num_args = 0..=1)] + pub l2_contracts: Option, + #[clap(long, alias = "sc", help = MSG_BUILD_SYSTEM_CONTRACTS_HELP, default_missing_value = "true", num_args = 0..=1)] + pub system_contracts: Option, +} + +impl ContractsArgs { + fn contracts(&self) -> Vec { + if self.l1_contracts.is_none() + && self.l2_contracts.is_none() + && self.system_contracts.is_none() + { + return vec![ + ContractType::L1, + ContractType::L2, + ContractType::SystemContracts, + ]; + } + + let mut contracts = vec![]; + + if self.l1_contracts.unwrap_or(false) { + contracts.push(ContractType::L1); + } + if self.l2_contracts.unwrap_or(false) { + contracts.push(ContractType::L2); + } + if self.system_contracts.unwrap_or(false) { + contracts.push(ContractType::SystemContracts); + } + + contracts + } +} + +#[derive(Debug, ValueEnum, EnumIter, strum::Display, PartialEq, Eq, Clone, Copy)] +#[strum(serialize_all = "lowercase")] +pub enum ContractType { + L1, + L2, + SystemContracts, +} + +#[derive(Debug)] +struct ContractBuilder { + dir: PathBuf, + cmd: String, + msg: String, +} + +impl ContractBuilder { + fn new(ecosystem: &EcosystemConfig, contract_type: ContractType) -> Self { + match contract_type { + ContractType::L1 => Self { + dir: ecosystem.path_to_foundry(), + cmd: "forge build".to_string(), + msg: MSG_BUILDING_L1_CONTRACTS_SPINNER.to_string(), + }, + ContractType::L2 => Self { + dir: ecosystem.link_to_code.clone(), + cmd: "yarn l2-contracts build".to_string(), + msg: MSG_BUILDING_L2_CONTRACTS_SPINNER.to_string(), + }, + ContractType::SystemContracts => Self { + dir: ecosystem.link_to_code.join("contracts"), + cmd: "yarn sc build".to_string(), + msg: MSG_BUILDING_SYSTEM_CONTRACTS_SPINNER.to_string(), + }, + } + } + + fn build(&self, shell: &Shell) -> anyhow::Result<()> { + let spinner = Spinner::new(&self.msg); + let _dir_guard = shell.push_dir(&self.dir); + + let mut args = self.cmd.split_whitespace().collect::>(); + let command = args.remove(0); // It's safe to unwrap here because we know that the vec is not empty + let mut cmd = cmd!(shell, "{command}"); + + for arg in args { + cmd = cmd.arg(arg); + } + + Cmd::new(cmd).run()?; + + spinner.finish(); + Ok(()) + } +} + +pub fn run(shell: &Shell, args: ContractsArgs) -> anyhow::Result<()> { + let contracts = args.contracts(); + if contracts.is_empty() { + logger::outro(MSG_NOTHING_TO_BUILD_MSG); + return Ok(()); + } + + logger::info(MSG_BUILDING_CONTRACTS); + + let ecosystem = EcosystemConfig::from_file(shell)?; + let link_to_code = ecosystem.link_to_code.clone(); + + let spinner = Spinner::new(MSG_CONTRACTS_DEPS_SPINNER); + let _dir_guard = shell.push_dir(&link_to_code); + Cmd::new(cmd!(shell, "yarn install")).run()?; + spinner.finish(); + + contracts + .iter() + .map(|contract| ContractBuilder::new(&ecosystem, *contract)) + .try_for_each(|builder| builder.build(shell))?; + + logger::outro(MSG_BUILDING_CONTRACTS_SUCCESS); + + Ok(()) +} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs b/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs index 181ce50c2134..e45512d50d89 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs @@ -1,4 +1,5 @@ pub mod clean; +pub mod contracts; pub mod database; pub mod fmt; pub mod lint; diff --git a/zk_toolbox/crates/zk_supervisor/src/main.rs b/zk_toolbox/crates/zk_supervisor/src/main.rs index 9a1c1ad74bcd..6b5bfa46943e 100644 --- a/zk_toolbox/crates/zk_supervisor/src/main.rs +++ b/zk_toolbox/crates/zk_supervisor/src/main.rs @@ -1,6 +1,7 @@ use clap::{Parser, Subcommand}; use commands::{ - database::DatabaseCommands, lint::LintArgs, snapshot::SnapshotCommands, test::TestCommands, + contracts::ContractsArgs, database::DatabaseCommands, lint::LintArgs, + snapshot::SnapshotCommands, test::TestCommands, }; use common::{ check_general_prerequisites, @@ -10,9 +11,9 @@ use common::{ }; use config::EcosystemConfig; use messages::{ - msg_global_chain_does_not_exist, MSG_PROVER_VERSION_ABOUT, MSG_SUBCOMMAND_CLEAN, - MSG_SUBCOMMAND_DATABASE_ABOUT, MSG_SUBCOMMAND_FMT_ABOUT, MSG_SUBCOMMAND_LINT_ABOUT, - MSG_SUBCOMMAND_SNAPSHOTS_CREATOR_ABOUT, MSG_SUBCOMMAND_TESTS_ABOUT, + msg_global_chain_does_not_exist, MSG_CONTRACTS_ABOUT, MSG_PROVER_VERSION_ABOUT, + MSG_SUBCOMMAND_CLEAN, MSG_SUBCOMMAND_DATABASE_ABOUT, MSG_SUBCOMMAND_FMT_ABOUT, + MSG_SUBCOMMAND_LINT_ABOUT, MSG_SUBCOMMAND_SNAPSHOTS_CREATOR_ABOUT, MSG_SUBCOMMAND_TESTS_ABOUT, }; use xshell::Shell; @@ -49,6 +50,8 @@ enum SupervisorSubcommands { Markdown, #[command(about = MSG_PROVER_VERSION_ABOUT)] ProverVersion, + #[command(about = MSG_CONTRACTS_ABOUT)] + Contracts(ContractsArgs), } #[derive(Parser, Debug)] @@ -106,6 +109,7 @@ async fn run_subcommand(args: Supervisor, shell: &Shell) -> anyhow::Result<()> { SupervisorSubcommands::Lint(args) => commands::lint::run(shell, args)?, SupervisorSubcommands::Fmt(args) => commands::fmt::run(shell.clone(), args).await?, SupervisorSubcommands::ProverVersion => commands::prover_version::run(shell).await?, + SupervisorSubcommands::Contracts(args) => commands::contracts::run(shell, args)?, } Ok(()) } diff --git a/zk_toolbox/crates/zk_supervisor/src/messages.rs b/zk_toolbox/crates/zk_supervisor/src/messages.rs index 2374cd69f0e6..17f01e664678 100644 --- a/zk_toolbox/crates/zk_supervisor/src/messages.rs +++ b/zk_toolbox/crates/zk_supervisor/src/messages.rs @@ -13,6 +13,7 @@ pub(super) const MSG_SUBCOMMAND_DATABASE_ABOUT: &str = "Database related command pub(super) const MSG_SUBCOMMAND_TESTS_ABOUT: &str = "Run tests"; pub(super) const MSG_SUBCOMMAND_CLEAN: &str = "Clean artifacts"; pub(super) const MSG_SUBCOMMAND_LINT_ABOUT: &str = "Lint code"; +pub(super) const MSG_CONTRACTS_ABOUT: &str = "Build contracts"; pub(super) const MSG_SUBCOMMAND_FMT_ABOUT: &str = "Format code"; @@ -104,6 +105,18 @@ pub(super) const MSG_PROVER_TEST_SUCCESS: &str = "Prover tests ran successfully" pub(super) const MSG_POSTGRES_CONFIG_NOT_FOUND_ERR: &str = "Postgres config not found"; pub(super) const MSG_RESETTING_TEST_DATABASES: &str = "Resetting test databases"; +// Contract building related messages +pub(super) const MSG_NOTHING_TO_BUILD_MSG: &str = "Nothing to build!"; +pub(super) const MSG_BUILDING_CONTRACTS: &str = "Building contracts"; +pub(super) const MSG_CONTRACTS_DEPS_SPINNER: &str = "Installing dependencies.."; +pub(super) const MSG_BUILDING_L2_CONTRACTS_SPINNER: &str = "Building L2 contracts.."; +pub(super) const MSG_BUILDING_L1_CONTRACTS_SPINNER: &str = "Building L1 contracts.."; +pub(super) const MSG_BUILDING_SYSTEM_CONTRACTS_SPINNER: &str = "Building system contracts.."; +pub(super) const MSG_BUILDING_CONTRACTS_SUCCESS: &str = "Contracts built successfully"; +pub(super) const MSG_BUILD_L1_CONTRACTS_HELP: &str = "Build L1 contracts"; +pub(super) const MSG_BUILD_L2_CONTRACTS_HELP: &str = "Build L2 contracts"; +pub(super) const MSG_BUILD_SYSTEM_CONTRACTS_HELP: &str = "Build system contracts"; + // Integration tests related messages pub(super) fn msg_integration_tests_run(external_node: bool) -> String { let base = "Running integration tests"; From b2dd9a5c08fecf0a878632b33a32a78aac11c065 Mon Sep 17 00:00:00 2001 From: perekopskiy <53865202+perekopskiy@users.noreply.github.com> Date: Tue, 3 Sep 2024 18:47:48 +0300 Subject: [PATCH 012/116] feat(genesis): Validate genesis config against L1 (#2786) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Validate protocol version and vk hash from genesis config against L1 ## Why ❔ Right now nothing prevents from initializing contracts and node with different protocol versions or vk hashs, and it already happened 🥲 ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- core/node/genesis/src/lib.rs | 61 ++++++++++++++++--- .../src/main_node/genesis.rs | 6 ++ 2 files changed, 58 insertions(+), 9 deletions(-) diff --git a/core/node/genesis/src/lib.rs b/core/node/genesis/src/lib.rs index bbad6b9a2223..6713e5a4bcc2 100644 --- a/core/node/genesis/src/lib.rs +++ b/core/node/genesis/src/lib.rs @@ -6,9 +6,12 @@ use std::fmt::Formatter; use anyhow::Context as _; use zksync_config::GenesisConfig; -use zksync_contracts::{BaseSystemContracts, BaseSystemContractsHashes, SET_CHAIN_ID_EVENT}; +use zksync_contracts::{ + hyperchain_contract, verifier_contract, BaseSystemContracts, BaseSystemContractsHashes, + SET_CHAIN_ID_EVENT, +}; use zksync_dal::{Connection, Core, CoreDal, DalError}; -use zksync_eth_client::EthInterface; +use zksync_eth_client::{CallFunctionArgs, EthInterface}; use zksync_merkle_tree::{domain::ZkSyncTree, TreeInstruction}; use zksync_multivm::utils::get_max_gas_per_pubdata_byte; use zksync_system_constants::PRIORITY_EXPIRATION; @@ -21,7 +24,7 @@ use zksync_types::{ system_contracts::get_system_smart_contracts, web3::{BlockNumber, FilterBuilder}, AccountTreeId, Address, Bloom, L1BatchNumber, L1ChainId, L2BlockNumber, L2ChainId, - ProtocolVersion, ProtocolVersionId, StorageKey, H256, + ProtocolVersion, ProtocolVersionId, StorageKey, H256, U256, }; use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; @@ -110,12 +113,9 @@ impl GenesisParams { }, ))); } - // Try to convert value from config to the real protocol version and return error - // if the version doesn't exist - let _: ProtocolVersionId = config - .protocol_version - .map(|p| p.minor) - .ok_or(GenesisError::MalformedConfig("protocol_version"))?; + if config.protocol_version.is_none() { + return Err(GenesisError::MalformedConfig("protocol_version")); + } Ok(GenesisParams { base_system_contracts, system_contracts, @@ -264,6 +264,49 @@ pub async fn is_genesis_needed(storage: &mut Connection<'_, Core>) -> Result anyhow::Result<()> { + let hyperchain_abi = hyperchain_contract(); + let verifier_abi = verifier_contract(); + + let packed_protocol_version: U256 = CallFunctionArgs::new("getProtocolVersion", ()) + .for_contract(diamond_proxy_address, &hyperchain_abi) + .call(query_client) + .await?; + + let protocol_version = ProtocolSemanticVersion::try_from_packed(packed_protocol_version) + .map_err(|err| anyhow::format_err!("Failed to unpack semver: {err}"))?; + + if protocol_version != genesis_params.protocol_version() { + return Err(anyhow::anyhow!( + "Protocol version mismatch: {protocol_version} on contract, {} in config", + genesis_params.protocol_version() + )); + } + + let verifier_address: Address = CallFunctionArgs::new("getVerifier", ()) + .for_contract(diamond_proxy_address, &hyperchain_abi) + .call(query_client) + .await?; + + let verification_key_hash: H256 = CallFunctionArgs::new("verificationKeyHash", ()) + .for_contract(verifier_address, &verifier_abi) + .call(query_client) + .await?; + + if verification_key_hash != genesis_params.config().recursion_scheduler_level_vk_hash { + return Err(anyhow::anyhow!( + "Verification key hash mismatch: {verification_key_hash:?} on contract, {:?} in config", + genesis_params.config().recursion_scheduler_level_vk_hash + )); + } + + Ok(()) +} + pub async fn ensure_genesis_state( storage: &mut Connection<'_, Core>, genesis_params: &GenesisParams, diff --git a/core/node/node_storage_init/src/main_node/genesis.rs b/core/node/node_storage_init/src/main_node/genesis.rs index db2eef51912e..e98473840370 100644 --- a/core/node/node_storage_init/src/main_node/genesis.rs +++ b/core/node/node_storage_init/src/main_node/genesis.rs @@ -30,6 +30,12 @@ impl InitializeStorage for MainNodeGenesis { } let params = GenesisParams::load_genesis_params(self.genesis.clone())?; + zksync_node_genesis::validate_genesis_params( + ¶ms, + &self.l1_client, + self.contracts.diamond_proxy_addr, + ) + .await?; zksync_node_genesis::ensure_genesis_state(&mut storage, ¶ms).await?; if let Some(ecosystem_contracts) = &self.contracts.ecosystem_contracts { From 87b02e3ab5c1f61d59dd0f0eefa9ec33a7b55488 Mon Sep 17 00:00:00 2001 From: Fedor Sakharov Date: Wed, 4 Sep 2024 09:36:02 +0300 Subject: [PATCH 013/116] feat: Integrate tracers and implement circuits tracer in vm2 (#2653) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Integrates tracers support into the codebase and implements the circuits tracer. ## Why ❔ Tracers are required for some VM applications, e.g. to determine batch seal criteria and for tracing calls. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --------- Co-authored-by: Joonatan Saarhelo --- Cargo.lock | 11 +- Cargo.toml | 2 +- core/lib/multivm/src/versions/shadow.rs | 5 + .../src/versions/vm_fast/circuits_tracer.rs | 157 ++++++++++++++++++ core/lib/multivm/src/versions/vm_fast/mod.rs | 1 + .../src/versions/vm_fast/tests/code_oracle.rs | 10 +- .../tests/tester/transaction_test_info.rs | 10 +- .../src/versions/vm_fast/tests/utils.rs | 5 +- core/lib/multivm/src/versions/vm_fast/vm.rs | 56 +++---- .../lib/vm_interface/src/storage/in_memory.rs | 2 +- .../state_keeper/src/executor/tests/mod.rs | 22 +++ .../state_keeper/src/executor/tests/tester.rs | 26 ++- core/tests/test_account/src/lib.rs | 2 +- .../contracts/failed-call/failed_call.sol | 24 +++ infrastructure/zk/src/prover_setup.ts | 6 +- prover/Cargo.lock | 11 +- 16 files changed, 301 insertions(+), 49 deletions(-) create mode 100644 core/lib/multivm/src/versions/vm_fast/circuits_tracer.rs create mode 100644 etc/contracts-test-data/contracts/failed-call/failed_call.sol diff --git a/Cargo.lock b/Cargo.lock index e57c437d4bf1..cfa185345280 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1981,6 +1981,14 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" +[[package]] +name = "eravm-stable-interface" +version = "0.1.0" +source = "git+https://github.com/matter-labs/vm2.git?rev=4ef15d46410ffc11744771a3a6c7c09dd9470c90#4ef15d46410ffc11744771a3a6c7c09dd9470c90" +dependencies = [ + "primitive-types", +] + [[package]] name = "errno" version = "0.3.9" @@ -7307,9 +7315,10 @@ dependencies = [ [[package]] name = "vm2" version = "0.1.0" -source = "git+https://github.com/matter-labs/vm2.git?rev=2276b7b5af520fca0477bdafe43781b51896d235#2276b7b5af520fca0477bdafe43781b51896d235" +source = "git+https://github.com/matter-labs/vm2.git?rev=4ef15d46410ffc11744771a3a6c7c09dd9470c90#4ef15d46410ffc11744771a3a6c7c09dd9470c90" dependencies = [ "enum_dispatch", + "eravm-stable-interface", "primitive-types", "zk_evm_abstractions 0.150.4", "zkevm_opcode_defs 0.150.4", diff --git a/Cargo.toml b/Cargo.toml index 334c85870f27..7d28cd7fe21b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -217,7 +217,7 @@ zk_evm_1_4_1 = { package = "zk_evm", version = "0.141.0" } zk_evm_1_5_0 = { package = "zk_evm", version = "=0.150.4" } # New VM; pinned to a specific commit because of instability -vm2 = { git = "https://github.com/matter-labs/vm2.git", rev = "2276b7b5af520fca0477bdafe43781b51896d235" } +vm2 = { git = "https://github.com/matter-labs/vm2.git", rev = "4ef15d46410ffc11744771a3a6c7c09dd9470c90" } # Consensus dependencies. zksync_concurrency = "=0.1.0-rc.11" diff --git a/core/lib/multivm/src/versions/shadow.rs b/core/lib/multivm/src/versions/shadow.rs index 7394c4617509..32a4463c425d 100644 --- a/core/lib/multivm/src/versions/shadow.rs +++ b/core/lib/multivm/src/versions/shadow.rs @@ -165,6 +165,11 @@ impl DivergenceErrors { let shadow_logs = UniqueStorageLogs::new(&shadow_result.logs.storage_logs); self.check_match("logs.storage_logs", &main_logs, &shadow_logs); self.check_match("refunds", &main_result.refunds, &shadow_result.refunds); + self.check_match( + "statistics.circuit_statistic", + &main_result.statistics.circuit_statistic, + &shadow_result.statistics.circuit_statistic, + ); self.check_match( "gas_remaining", &main_result.statistics.gas_remaining, diff --git a/core/lib/multivm/src/versions/vm_fast/circuits_tracer.rs b/core/lib/multivm/src/versions/vm_fast/circuits_tracer.rs new file mode 100644 index 000000000000..061d91be60b7 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/circuits_tracer.rs @@ -0,0 +1,157 @@ +use circuit_sequencer_api_1_5_0::{geometry_config::get_geometry_config, toolset::GeometryConfig}; +use vm2::{CycleStats, Opcode, OpcodeType, StateInterface, Tracer}; +use zksync_vm_interface::CircuitStatistic; + +use crate::vm_latest::tracers::circuits_capacity::*; + +#[derive(Debug, Default, Clone, PartialEq)] +pub(crate) struct CircuitsTracer { + main_vm_cycles: u32, + ram_permutation_cycles: u32, + storage_application_cycles: u32, + storage_sorter_cycles: u32, + code_decommitter_cycles: u32, + code_decommitter_sorter_cycles: u32, + log_demuxer_cycles: u32, + events_sorter_cycles: u32, + keccak256_cycles: u32, + ecrecover_cycles: u32, + sha256_cycles: u32, + secp256k1_verify_cycles: u32, + transient_storage_checker_cycles: u32, +} + +impl Tracer for CircuitsTracer { + fn after_instruction(&mut self, _state: &mut S) { + self.main_vm_cycles += 1; + + match OP::VALUE { + Opcode::Nop + | Opcode::Add + | Opcode::Sub + | Opcode::Mul + | Opcode::Div + | Opcode::Jump + | Opcode::Xor + | Opcode::And + | Opcode::Or + | Opcode::ShiftLeft + | Opcode::ShiftRight + | Opcode::RotateLeft + | Opcode::RotateRight + | Opcode::PointerAdd + | Opcode::PointerSub + | Opcode::PointerPack + | Opcode::PointerShrink => { + self.ram_permutation_cycles += RICH_ADDRESSING_OPCODE_RAM_CYCLES; + } + Opcode::This + | Opcode::Caller + | Opcode::CodeAddress + | Opcode::ContextMeta + | Opcode::ErgsLeft + | Opcode::SP + | Opcode::ContextU128 + | Opcode::SetContextU128 + | Opcode::AuxMutating0 + | Opcode::IncrementTxNumber + | Opcode::Ret(_) + | Opcode::NearCall => { + self.ram_permutation_cycles += AVERAGE_OPCODE_RAM_CYCLES; + } + Opcode::StorageRead => { + self.ram_permutation_cycles += STORAGE_READ_RAM_CYCLES; + self.log_demuxer_cycles += STORAGE_READ_LOG_DEMUXER_CYCLES; + self.storage_sorter_cycles += STORAGE_READ_STORAGE_SORTER_CYCLES; + } + Opcode::TransientStorageRead => { + self.ram_permutation_cycles += TRANSIENT_STORAGE_READ_RAM_CYCLES; + self.log_demuxer_cycles += TRANSIENT_STORAGE_READ_LOG_DEMUXER_CYCLES; + self.transient_storage_checker_cycles += + TRANSIENT_STORAGE_READ_TRANSIENT_STORAGE_CHECKER_CYCLES; + } + Opcode::StorageWrite => { + self.ram_permutation_cycles += STORAGE_WRITE_RAM_CYCLES; + self.log_demuxer_cycles += STORAGE_WRITE_LOG_DEMUXER_CYCLES; + self.storage_sorter_cycles += STORAGE_WRITE_STORAGE_SORTER_CYCLES; + } + Opcode::TransientStorageWrite => { + self.ram_permutation_cycles += TRANSIENT_STORAGE_WRITE_RAM_CYCLES; + self.log_demuxer_cycles += TRANSIENT_STORAGE_WRITE_LOG_DEMUXER_CYCLES; + self.transient_storage_checker_cycles += + TRANSIENT_STORAGE_WRITE_TRANSIENT_STORAGE_CHECKER_CYCLES; + } + Opcode::L2ToL1Message | Opcode::Event => { + self.ram_permutation_cycles += EVENT_RAM_CYCLES; + self.log_demuxer_cycles += EVENT_LOG_DEMUXER_CYCLES; + self.events_sorter_cycles += EVENT_EVENTS_SORTER_CYCLES; + } + Opcode::PrecompileCall => { + self.ram_permutation_cycles += PRECOMPILE_RAM_CYCLES; + self.log_demuxer_cycles += PRECOMPILE_LOG_DEMUXER_CYCLES; + } + Opcode::Decommit => { + // Note, that for decommit the log demuxer circuit is not used. + self.ram_permutation_cycles += LOG_DECOMMIT_RAM_CYCLES; + self.code_decommitter_sorter_cycles += LOG_DECOMMIT_DECOMMITTER_SORTER_CYCLES; + } + Opcode::FarCall(_) => { + self.ram_permutation_cycles += FAR_CALL_RAM_CYCLES; + self.code_decommitter_sorter_cycles += FAR_CALL_CODE_DECOMMITTER_SORTER_CYCLES; + self.storage_sorter_cycles += FAR_CALL_STORAGE_SORTER_CYCLES; + self.log_demuxer_cycles += FAR_CALL_LOG_DEMUXER_CYCLES; + } + Opcode::AuxHeapWrite | Opcode::HeapWrite /* StaticMemoryWrite */ => { + self.ram_permutation_cycles += UMA_WRITE_RAM_CYCLES; + } + Opcode::AuxHeapRead | Opcode::HeapRead | Opcode::PointerRead /* StaticMemoryRead */ => { + self.ram_permutation_cycles += UMA_READ_RAM_CYCLES; + } + } + } + + fn on_extra_prover_cycles(&mut self, stats: CycleStats) { + match stats { + CycleStats::Keccak256(cycles) => self.keccak256_cycles += cycles, + CycleStats::Sha256(cycles) => self.sha256_cycles += cycles, + CycleStats::EcRecover(cycles) => self.ecrecover_cycles += cycles, + CycleStats::Secp256k1Verify(cycles) => self.secp256k1_verify_cycles += cycles, + CycleStats::Decommit(cycles) => self.code_decommitter_cycles += cycles, + CycleStats::StorageRead => self.storage_application_cycles += 1, + CycleStats::StorageWrite => self.storage_application_cycles += 2, + } + } +} + +impl CircuitsTracer { + pub(crate) fn circuit_statistic(&self) -> CircuitStatistic { + CircuitStatistic { + main_vm: self.main_vm_cycles as f32 / GEOMETRY_CONFIG.cycles_per_vm_snapshot as f32, + ram_permutation: self.ram_permutation_cycles as f32 + / GEOMETRY_CONFIG.cycles_per_ram_permutation as f32, + storage_application: self.storage_application_cycles as f32 + / GEOMETRY_CONFIG.cycles_per_storage_application as f32, + storage_sorter: self.storage_sorter_cycles as f32 + / GEOMETRY_CONFIG.cycles_per_storage_sorter as f32, + code_decommitter: self.code_decommitter_cycles as f32 + / GEOMETRY_CONFIG.cycles_per_code_decommitter as f32, + code_decommitter_sorter: self.code_decommitter_sorter_cycles as f32 + / GEOMETRY_CONFIG.cycles_code_decommitter_sorter as f32, + log_demuxer: self.log_demuxer_cycles as f32 + / GEOMETRY_CONFIG.cycles_per_log_demuxer as f32, + events_sorter: self.events_sorter_cycles as f32 + / GEOMETRY_CONFIG.cycles_per_events_or_l1_messages_sorter as f32, + keccak256: self.keccak256_cycles as f32 + / GEOMETRY_CONFIG.cycles_per_keccak256_circuit as f32, + ecrecover: self.ecrecover_cycles as f32 + / GEOMETRY_CONFIG.cycles_per_ecrecover_circuit as f32, + sha256: self.sha256_cycles as f32 / GEOMETRY_CONFIG.cycles_per_sha256_circuit as f32, + secp256k1_verify: self.secp256k1_verify_cycles as f32 + / GEOMETRY_CONFIG.cycles_per_secp256r1_verify_circuit as f32, + transient_storage_checker: self.transient_storage_checker_cycles as f32 + / GEOMETRY_CONFIG.cycles_per_transient_storage_sorter as f32, + } + } +} + +const GEOMETRY_CONFIG: GeometryConfig = get_geometry_config(); diff --git a/core/lib/multivm/src/versions/vm_fast/mod.rs b/core/lib/multivm/src/versions/vm_fast/mod.rs index 4deb6b9dbf74..f0d8bafe69ec 100644 --- a/core/lib/multivm/src/versions/vm_fast/mod.rs +++ b/core/lib/multivm/src/versions/vm_fast/mod.rs @@ -2,6 +2,7 @@ pub use self::vm::Vm; mod bootloader_state; mod bytecode; +mod circuits_tracer; mod events; mod glue; mod hook; diff --git a/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs b/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs index 946ad0c38b0c..29df17d7293c 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs @@ -207,11 +207,11 @@ fn refunds_in_code_oracle() { let account = &mut vm.rich_accounts[0]; if decommit { - let (_, is_fresh) = vm - .vm - .inner - .world_diff - .decommit_opcode(&mut vm.vm.world, h256_to_u256(normal_zkevm_bytecode_hash)); + let (_, is_fresh) = vm.vm.inner.world_diff.decommit_opcode( + &mut vm.vm.world, + &mut vm.vm.tracer, + h256_to_u256(normal_zkevm_bytecode_hash), + ); assert!(is_fresh); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs index 0d8c6b20764a..5b8f0cb0b10f 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs @@ -7,7 +7,7 @@ use crate::{ VmExecutionMode, VmExecutionResultAndLogs, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, VmRevertReason, }, - vm_fast::Vm, + vm_fast::{circuits_tracer::CircuitsTracer, vm::World, Vm}, }; #[derive(Debug, Clone)] @@ -185,14 +185,14 @@ impl TransactionTestInfo { // TODO this doesn't include all the state of ModifiedWorld #[derive(Debug, PartialEq)] -struct VmStateDump { - state: vm2::State, +struct VmStateDump { + state: vm2::State>, storage_writes: Vec<((H160, U256), U256)>, events: Box<[vm2::Event]>, } -impl Vm { - fn dump_state(&self) -> VmStateDump { +impl Vm { + fn dump_state(&self) -> VmStateDump { VmStateDump { state: self.inner.state.clone(), storage_writes: self diff --git a/core/lib/multivm/src/versions/vm_fast/tests/utils.rs b/core/lib/multivm/src/versions/vm_fast/tests/utils.rs index d696aa582d64..d75ae12c30c1 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/utils.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/utils.rs @@ -17,7 +17,10 @@ use crate::interface::storage::ReadStorage; pub(crate) static BASE_SYSTEM_CONTRACTS: Lazy = Lazy::new(BaseSystemContracts::load_from_disk); -pub(crate) fn verify_required_memory(state: &State, required_values: Vec<(U256, HeapId, u32)>) { +pub(crate) fn verify_required_memory( + state: &State, + required_values: Vec<(U256, HeapId, u32)>, +) { for (required_value, memory_page, cell) in required_values { let current_value = state.heaps[memory_page].read_u256(cell * 32); assert_eq!(current_value, required_value); diff --git a/core/lib/multivm/src/versions/vm_fast/vm.rs b/core/lib/multivm/src/versions/vm_fast/vm.rs index 3a01a10d1871..d40ea075f19c 100644 --- a/core/lib/multivm/src/versions/vm_fast/vm.rs +++ b/core/lib/multivm/src/versions/vm_fast/vm.rs @@ -2,7 +2,7 @@ use std::{collections::HashMap, fmt}; use vm2::{ decode::decode_program, fat_pointer::FatPointer, instruction_handlers::HeapInterface, - ExecutionEnd, Program, Settings, VirtualMachine, + ExecutionEnd, Program, Settings, Tracer, VirtualMachine, }; use zk_evm_1_5_0::zkevm_opcode_defs::system_params::INITIAL_FRAME_FORMAL_EH_LOCATION; use zksync_contracts::SystemContractCode; @@ -23,6 +23,7 @@ use zksync_utils::{bytecode::hash_bytecode, h256_to_u256, u256_to_h256}; use super::{ bootloader_state::{BootloaderState, BootloaderStateSnapshot}, bytecode::compress_bytecodes, + circuits_tracer::CircuitsTracer, hook::Hook, initial_bootloader_memory::bootloader_initial_memory, transaction_data::TransactionData, @@ -55,14 +56,14 @@ use crate::{ const VM_VERSION: MultiVMSubversion = MultiVMSubversion::IncreasedBootloaderMemory; pub struct Vm { - pub(crate) world: World, - pub(crate) inner: VirtualMachine, - suspended_at: u16, + pub(crate) world: World, + pub(crate) inner: VirtualMachine>, gas_for_account_validation: u32, pub(crate) bootloader_state: BootloaderState, pub(crate) batch_env: L1BatchEnv, pub(crate) system_env: SystemEnv, snapshot: Option, + pub(crate) tracer: CircuitsTracer, } impl Vm { @@ -79,14 +80,8 @@ impl Vm { let mut pubdata_before = self.inner.world_diff.pubdata() as u32; let result = loop { - let hook = match self.inner.resume_from(self.suspended_at, &mut self.world) { - ExecutionEnd::SuspendedOnHook { - hook, - pc_to_resume_from, - } => { - self.suspended_at = pc_to_resume_from; - hook - } + let hook = match self.inner.run(&mut self.world, &mut self.tracer) { + ExecutionEnd::SuspendedOnHook(hook) => hook, ExecutionEnd::ProgramFinished(output) => break ExecutionResult::Success { output }, ExecutionEnd::Reverted(output) => { break match TxRevertReason::parse_error(&output) { @@ -394,7 +389,6 @@ impl Vm { let mut me = Self { world: World::new(storage, program_cache), inner, - suspended_at: 0, gas_for_account_validation: system_env.default_validation_computational_gas_limit, bootloader_state: BootloaderState::new( system_env.execution_mode, @@ -404,6 +398,7 @@ impl Vm { system_env, batch_env, snapshot: None, + tracer: CircuitsTracer::default(), }; me.write_to_bootloader_heap(bootloader_memory); @@ -470,6 +465,8 @@ impl VmInterface for Vm { track_refunds = true; } + self.tracer = Default::default(); + let start = self.inner.world_diff.snapshot(); let pubdata_before = self.inner.world_diff.pubdata(); @@ -525,6 +522,9 @@ impl VmInterface for Vm { }; let pubdata_after = self.inner.world_diff.pubdata(); + + let circuit_statistic = self.tracer.circuit_statistic(); + VmExecutionResultAndLogs { result, logs, @@ -537,7 +537,7 @@ impl VmInterface for Vm { computational_gas_used: 0, total_log_queries: 0, pubdata_published: (pubdata_after - pubdata_before).max(0) as u32, - circuit_statistic: Default::default(), + circuit_statistic, }, refunds, } @@ -599,7 +599,6 @@ impl VmInterface for Vm { struct VmSnapshot { vm_snapshot: vm2::Snapshot, bootloader_snapshot: BootloaderStateSnapshot, - suspended_at: u16, gas_for_account_validation: u32, } @@ -614,7 +613,6 @@ impl VmInterfaceHistoryEnabled for Vm { self.snapshot = Some(VmSnapshot { vm_snapshot: self.inner.snapshot(), bootloader_snapshot: self.bootloader_state.get_snapshot(), - suspended_at: self.suspended_at, gas_for_account_validation: self.gas_for_account_validation, }); } @@ -623,13 +621,11 @@ impl VmInterfaceHistoryEnabled for Vm { let VmSnapshot { vm_snapshot, bootloader_snapshot, - suspended_at, gas_for_account_validation, } = self.snapshot.take().expect("no snapshots to rollback to"); self.inner.rollback(vm_snapshot); self.bootloader_state.apply_snapshot(bootloader_snapshot); - self.suspended_at = suspended_at; self.gas_for_account_validation = gas_for_account_validation; self.delete_history_if_appropriate(); @@ -644,7 +640,6 @@ impl VmInterfaceHistoryEnabled for Vm { impl fmt::Debug for Vm { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Vm") - .field("suspended_at", &self.suspended_at) .field( "gas_for_account_validation", &self.gas_for_account_validation, @@ -659,16 +654,16 @@ impl fmt::Debug for Vm { } } -#[derive(Debug)] -pub(crate) struct World { +#[derive(Debug, Clone)] +pub(crate) struct World { pub(crate) storage: S, // TODO (PLA-1008): Store `Program`s in an LRU cache - program_cache: HashMap, + program_cache: HashMap>>, pub(crate) bytecode_cache: HashMap>, } -impl World { - fn new(storage: S, program_cache: HashMap) -> Self { +impl World { + fn new(storage: S, program_cache: HashMap>>) -> Self { Self { storage, program_cache, @@ -677,7 +672,7 @@ impl World { } } -impl vm2::World for World { +impl vm2::World for World { fn decommit_code(&mut self, hash: U256) -> Vec { self.decommit(hash) .code_page() @@ -691,7 +686,7 @@ impl vm2::World for World { .collect() } - fn decommit(&mut self, hash: U256) -> Program { + fn decommit(&mut self, hash: U256) -> Program> { self.program_cache .entry(hash) .or_insert_with(|| { @@ -703,7 +698,9 @@ impl vm2::World for World { }) .clone() } +} +impl vm2::StorageInterface for World { fn read_storage(&mut self, contract: H160, key: U256) -> Option { let key = &StorageKey::new(AccountTreeId::new(contract), u256_to_h256(key)); if self.storage.is_write_initial(key) { @@ -748,7 +745,7 @@ impl vm2::World for World { } } -fn bytecode_to_program(bytecode: &[u8]) -> Program { +fn bytecode_to_program>(bytecode: &[u8]) -> Program { Program::new( decode_program( &bytecode @@ -764,7 +761,10 @@ fn bytecode_to_program(bytecode: &[u8]) -> Program { ) } -fn convert_system_contract_code(code: &SystemContractCode, is_bootloader: bool) -> (U256, Program) { +fn convert_system_contract_code>( + code: &SystemContractCode, + is_bootloader: bool, +) -> (U256, Program) { ( h256_to_u256(code.hash), Program::new( diff --git a/core/lib/vm_interface/src/storage/in_memory.rs b/core/lib/vm_interface/src/storage/in_memory.rs index d4b5e57f1fa0..6a8b56433455 100644 --- a/core/lib/vm_interface/src/storage/in_memory.rs +++ b/core/lib/vm_interface/src/storage/in_memory.rs @@ -12,7 +12,7 @@ use super::ReadStorage; pub const IN_MEMORY_STORAGE_DEFAULT_NETWORK_ID: u32 = 270; /// In-memory storage. -#[derive(Debug, Default, Clone)] +#[derive(Debug, Default, Clone, PartialEq)] pub struct InMemoryStorage { state: HashMap, factory_deps: HashMap>, diff --git a/core/node/state_keeper/src/executor/tests/mod.rs b/core/node/state_keeper/src/executor/tests/mod.rs index 90ce236a38f8..6fa4522d43fd 100644 --- a/core/node/state_keeper/src/executor/tests/mod.rs +++ b/core/node/state_keeper/src/executor/tests/mod.rs @@ -2,6 +2,7 @@ use assert_matches::assert_matches; use test_casing::{test_casing, Product}; +use tester::AccountFailedCall; use zksync_dal::{ConnectionPool, Core}; use zksync_multivm::interface::{BatchTransactionExecutionResult, ExecutionResult, Halt}; use zksync_test_account::Account; @@ -300,6 +301,27 @@ async fn deploy_and_call_loadtest(vm_mode: FastVmMode) { executor.finish_batch().await.unwrap(); } +#[test_casing(3, FAST_VM_MODES)] +#[tokio::test] +async fn deploy_failedcall(vm_mode: FastVmMode) { + let connection_pool = ConnectionPool::::constrained_test_pool(1).await; + let mut alice = Account::random(); + + let mut tester = Tester::new(connection_pool, vm_mode); + tester.genesis().await; + tester.fund(&[alice.address()]).await; + let mut executor = tester + .create_batch_executor(StorageType::AsyncRocksdbCache) + .await; + + let tx = alice.deploy_failedcall_tx(); + + let execute_tx = executor.execute_tx(tx.tx).await.unwrap(); + assert_executed(&execute_tx); + + executor.finish_batch().await.unwrap(); +} + /// Checks that a tx that is reverted by the VM still can be included into a batch. #[test_casing(3, FAST_VM_MODES)] #[tokio::test] diff --git a/core/node/state_keeper/src/executor/tests/tester.rs b/core/node/state_keeper/src/executor/tests/tester.rs index a00d9ca5ec15..8256435f2f5b 100644 --- a/core/node/state_keeper/src/executor/tests/tester.rs +++ b/core/node/state_keeper/src/executor/tests/tester.rs @@ -6,7 +6,10 @@ use std::{collections::HashMap, fmt::Debug, sync::Arc}; use tempfile::TempDir; use tokio::{sync::watch, task::JoinHandle}; use zksync_config::configs::chain::StateKeeperConfig; -use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; +use zksync_contracts::{ + get_loadnext_contract, load_contract, read_bytecode, + test_contracts::LoadnextContractExecutionParams, TestContract, +}; use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_multivm::{ interface::{ @@ -262,9 +265,8 @@ impl Tester { /// Adds funds for specified account list. /// Expects genesis to be performed (i.e. `setup_storage` called beforehand). pub(super) async fn fund(&self, addresses: &[Address]) { - let mut storage = self.pool.connection_tagged("state_keeper").await.unwrap(); - let eth_amount = U256::from(10u32).pow(U256::from(32)); //10^32 wei + let mut storage = self.pool.connection_tagged("state_keeper").await.unwrap(); for address in addresses { let key = storage_key_for_standard_token_balance( @@ -336,6 +338,24 @@ pub trait AccountLoadNextExecutable { ) -> Transaction; } +pub trait AccountFailedCall { + fn deploy_failedcall_tx(&mut self) -> DeployContractsTx; +} + +impl AccountFailedCall for Account { + fn deploy_failedcall_tx(&mut self) -> DeployContractsTx { + let bytecode = read_bytecode( + "etc/contracts-test-data/artifacts-zk/contracts/failed-call/failed_call.sol/FailedCall.json"); + let failedcall_contract = TestContract { + bytecode, + contract: load_contract("etc/contracts-test-data/artifacts-zk/contracts/failed-call/failed_call.sol/FailedCall.json"), + factory_deps: vec![], + }; + + self.get_deploy_tx(&failedcall_contract.bytecode, None, TxType::L2) + } +} + impl AccountLoadNextExecutable for Account { fn deploy_loadnext_tx(&mut self) -> DeployContractsTx { let loadnext_contract = get_loadnext_contract(); diff --git a/core/tests/test_account/src/lib.rs b/core/tests/test_account/src/lib.rs index e259ce209c63..28e3d609e63d 100644 --- a/core/tests/test_account/src/lib.rs +++ b/core/tests/test_account/src/lib.rs @@ -89,7 +89,7 @@ impl Account { pub fn default_fee() -> Fee { Fee { - gas_limit: U256::from(2000000000u32), + gas_limit: U256::from(2_000_000_000u32), max_fee_per_gas: U256::from(BASE_FEE), max_priority_fee_per_gas: U256::from(100), gas_per_pubdata_limit: U256::from(DEFAULT_L2_TX_GAS_PER_PUBDATA_BYTE), diff --git a/etc/contracts-test-data/contracts/failed-call/failed_call.sol b/etc/contracts-test-data/contracts/failed-call/failed_call.sol new file mode 100644 index 000000000000..7a8f43fbd895 --- /dev/null +++ b/etc/contracts-test-data/contracts/failed-call/failed_call.sol @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: MIT OR Apache-2.0 + +pragma solidity ^0.8.0; + +contract FailedCall { + bool public success; + bytes1 public data_first_byte; + + constructor() { + address MSG_VALUE_SIMULATOR = 0x0000000000000000000000000000000000008009; + + while (gasleft() > 20000) { + // Burn gas so that there's about 20k left before the external call. + } + + // This call fails because MSG_VALUE_SIMULATOR forcibly takes 27k gas + (bool s, bytes memory data) = MSG_VALUE_SIMULATOR.call( + abi.encodeWithSignature("deadBeef()") + ); + + success = s; + data_first_byte = data[0]; + } +} diff --git a/infrastructure/zk/src/prover_setup.ts b/infrastructure/zk/src/prover_setup.ts index b5bd4c828aec..0ef3515cc750 100644 --- a/infrastructure/zk/src/prover_setup.ts +++ b/infrastructure/zk/src/prover_setup.ts @@ -30,7 +30,8 @@ export async function setupProver(proverType: ProverType) { } else { env.modify( 'FRI_PROVER_SETUP_DATA_PATH', - `${process.env.ZKSYNC_HOME}/etc/hyperchains/prover-keys/${process.env.ZKSYNC_ENV}/${proverType === ProverType.GPU ? 'gpu' : 'cpu' + `${process.env.ZKSYNC_HOME}/etc/hyperchains/prover-keys/${process.env.ZKSYNC_ENV}/${ + proverType === ProverType.GPU ? 'gpu' : 'cpu' }/`, process.env.ENV_FILE! ); @@ -97,7 +98,8 @@ async function setupProverKeys(proverType: ProverType) { env.modify( 'FRI_PROVER_SETUP_DATA_PATH', - `${process.env.ZKSYNC_HOME}/etc/hyperchains/prover-keys/${process.env.ZKSYNC_ENV}/${proverType === ProverType.GPU ? 'gpu' : 'cpu' + `${process.env.ZKSYNC_HOME}/etc/hyperchains/prover-keys/${process.env.ZKSYNC_ENV}/${ + proverType === ProverType.GPU ? 'gpu' : 'cpu' }/`, process.env.ENV_FILE! ); diff --git a/prover/Cargo.lock b/prover/Cargo.lock index 09b13a80e397..fe6f04d74c82 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -1880,6 +1880,14 @@ dependencies = [ "serde_json", ] +[[package]] +name = "eravm-stable-interface" +version = "0.1.0" +source = "git+https://github.com/matter-labs/vm2.git?rev=4ef15d46410ffc11744771a3a6c7c09dd9470c90#4ef15d46410ffc11744771a3a6c7c09dd9470c90" +dependencies = [ + "primitive-types", +] + [[package]] name = "errno" version = "0.3.9" @@ -6816,9 +6824,10 @@ dependencies = [ [[package]] name = "vm2" version = "0.1.0" -source = "git+https://github.com/matter-labs/vm2.git?rev=2276b7b5af520fca0477bdafe43781b51896d235#2276b7b5af520fca0477bdafe43781b51896d235" +source = "git+https://github.com/matter-labs/vm2.git?rev=4ef15d46410ffc11744771a3a6c7c09dd9470c90#4ef15d46410ffc11744771a3a6c7c09dd9470c90" dependencies = [ "enum_dispatch", + "eravm-stable-interface", "primitive-types", "zk_evm_abstractions 0.150.4", "zkevm_opcode_defs 0.150.4", From e239260d77b55fcce0b1f485029762a605cdb6d0 Mon Sep 17 00:00:00 2001 From: Igor Aleksanov Date: Wed, 4 Sep 2024 12:13:25 +0400 Subject: [PATCH 014/116] feat(prover): Extract keystore into a separate crate (#2797) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ - Extract `keystore` and some adjacent code from the `vk_setup_generator_server_fri` into a separate library crate. - ⚠️ this PR just moves code around. I didn't introduce any changes to not make review harder than it should be. Changes will come as follow-ups. ## Why ❔ - `vk_setup_generator_server_fri` served as both binary and library, which was super confusing. - Better logical separation of code. - Prerequisite for further refactoring. ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- prover/Cargo.lock | 45 ++++++++++++------- prover/Cargo.toml | 1 + .../bin/proof_fri_compressor/Cargo.toml | 2 +- .../proof_fri_compressor/src/compressor.rs | 2 +- prover/crates/bin/prover_fri/Cargo.toml | 4 +- .../src/gpu_prover_job_processor.rs | 2 +- .../prover_fri/src/prover_job_processor.rs | 2 +- .../crates/bin/prover_fri/tests/basic_test.rs | 4 +- .../Cargo.toml | 20 +-------- .../src/commitment_generator.rs | 8 ++-- .../src/main.rs | 3 +- .../src/tests.rs | 2 +- .../src/vk_commitment_helper.rs | 3 +- .../crates/bin/witness_generator/Cargo.toml | 2 +- .../witness_generator/src/leaf_aggregation.rs | 2 +- .../crates/bin/witness_generator/src/main.rs | 5 ++- .../witness_generator/src/node_aggregation.rs | 2 +- .../witness_generator/src/recursion_tip.rs | 2 +- .../bin/witness_generator/src/scheduler.rs | 2 +- .../bin/witness_vector_generator/Cargo.toml | 2 +- .../witness_vector_generator/src/generator.rs | 2 +- .../tests/basic_test.rs | 2 +- prover/crates/lib/keystore/Cargo.toml | 35 +++++++++++++++ .../keystore}/src/commitment_utils.rs | 0 .../keystore}/src/keystore.rs | 1 + .../keystore}/src/lib.rs | 1 - .../keystore}/src/setup_data_generator.rs | 0 .../keystore}/src/utils.rs | 0 28 files changed, 95 insertions(+), 61 deletions(-) create mode 100644 prover/crates/lib/keystore/Cargo.toml rename prover/crates/{bin/vk_setup_data_generator_server_fri => lib/keystore}/src/commitment_utils.rs (100%) rename prover/crates/{bin/vk_setup_data_generator_server_fri => lib/keystore}/src/keystore.rs (99%) rename prover/crates/{bin/vk_setup_data_generator_server_fri => lib/keystore}/src/lib.rs (99%) rename prover/crates/{bin/vk_setup_data_generator_server_fri => lib/keystore}/src/setup_data_generator.rs (100%) rename prover/crates/{bin/vk_setup_data_generator_server_fri => lib/keystore}/src/utils.rs (100%) diff --git a/prover/Cargo.lock b/prover/Cargo.lock index fe6f04d74c82..3ac54b477380 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -8023,10 +8023,10 @@ dependencies = [ "zksync_prover_dal", "zksync_prover_fri_types", "zksync_prover_interface", + "zksync_prover_keystore", "zksync_queued_job_processor", "zksync_types", "zksync_utils", - "zksync_vk_setup_data_generator_server_fri", "zksync_vlog", ] @@ -8123,10 +8123,10 @@ dependencies = [ "zksync_prover_dal", "zksync_prover_fri_types", "zksync_prover_fri_utils", + "zksync_prover_keystore", "zksync_queued_job_processor", "zksync_types", "zksync_utils", - "zksync_vk_setup_data_generator_server_fri", "zksync_vlog", ] @@ -8217,6 +8217,29 @@ dependencies = [ "zksync_vlog", ] +[[package]] +name = "zksync_prover_keystore" +version = "0.1.0" +dependencies = [ + "anyhow", + "bincode", + "circuit_definitions", + "hex", + "md5", + "once_cell", + "serde", + "serde_json", + "sha3 0.10.8", + "shivini", + "tracing", + "zkevm_test_harness", + "zksync_config", + "zksync_env_config", + "zksync_prover_fri_types", + "zksync_types", + "zksync_utils", +] + [[package]] name = "zksync_queued_job_processor" version = "0.1.0" @@ -8310,29 +8333,17 @@ name = "zksync_vk_setup_data_generator_server_fri" version = "0.1.0" dependencies = [ "anyhow", - "bincode", "circuit_definitions", "clap 4.5.4", - "hex", "indicatif", - "itertools 0.10.5", - "md5", - "once_cell", "proptest", - "serde", - "serde_derive", - "serde_json", - "sha3 0.10.8", - "shivini", "toml_edit 0.14.4", "tracing", "tracing-subscriber", "zkevm_test_harness", - "zksync_config", - "zksync_env_config", "zksync_prover_fri_types", + "zksync_prover_keystore", "zksync_types", - "zksync_utils", "zksync_vlog", ] @@ -8427,11 +8438,11 @@ dependencies = [ "zksync_prover_fri_types", "zksync_prover_fri_utils", "zksync_prover_interface", + "zksync_prover_keystore", "zksync_queued_job_processor", "zksync_system_constants", "zksync_types", "zksync_utils", - "zksync_vk_setup_data_generator_server_fri", "zksync_vlog", ] @@ -8454,10 +8465,10 @@ dependencies = [ "zksync_prover_dal", "zksync_prover_fri_types", "zksync_prover_fri_utils", + "zksync_prover_keystore", "zksync_queued_job_processor", "zksync_types", "zksync_utils", - "zksync_vk_setup_data_generator_server_fri", "zksync_vlog", ] diff --git a/prover/Cargo.toml b/prover/Cargo.toml index 9d37c2fb5cbe..8d87b727f906 100644 --- a/prover/Cargo.toml +++ b/prover/Cargo.toml @@ -87,6 +87,7 @@ zksync_periodic_job = { path = "../core/lib/periodic_job" } zksync_prover_dal = { path = "crates/lib/prover_dal" } zksync_prover_fri_types = { path = "crates/lib/prover_fri_types" } zksync_prover_fri_utils = { path = "crates/lib/prover_fri_utils" } +zksync_prover_keystore = { path = "crates/lib/keystore" } zksync_vk_setup_data_generator_server_fri = { path = "crates/bin/vk_setup_data_generator_server_fri" } # for `perf` profiling diff --git a/prover/crates/bin/proof_fri_compressor/Cargo.toml b/prover/crates/bin/proof_fri_compressor/Cargo.toml index 6f2d8b6fcc27..936f0cb5100b 100644 --- a/prover/crates/bin/proof_fri_compressor/Cargo.toml +++ b/prover/crates/bin/proof_fri_compressor/Cargo.toml @@ -21,7 +21,7 @@ zksync_utils.workspace = true zksync_core_leftovers.workspace = true zksync_prover_fri_types.workspace = true zksync_queued_job_processor.workspace = true -zksync_vk_setup_data_generator_server_fri.workspace = true +zksync_prover_keystore.workspace = true zksync_vlog.workspace = true circuit_sequencer_api.workspace = true diff --git a/prover/crates/bin/proof_fri_compressor/src/compressor.rs b/prover/crates/bin/proof_fri_compressor/src/compressor.rs index 067114ca5a6c..c7747b2e45bd 100644 --- a/prover/crates/bin/proof_fri_compressor/src/compressor.rs +++ b/prover/crates/bin/proof_fri_compressor/src/compressor.rs @@ -23,9 +23,9 @@ use zksync_prover_fri_types::{ get_current_pod_name, AuxOutputWitnessWrapper, FriProofWrapper, }; use zksync_prover_interface::outputs::L1BatchProofForL1; +use zksync_prover_keystore::keystore::Keystore; use zksync_queued_job_processor::JobProcessor; use zksync_types::{protocol_version::ProtocolSemanticVersion, L1BatchNumber}; -use zksync_vk_setup_data_server_fri::keystore::Keystore; use crate::metrics::METRICS; diff --git a/prover/crates/bin/prover_fri/Cargo.toml b/prover/crates/bin/prover_fri/Cargo.toml index ea7d77783158..ae7853427e96 100644 --- a/prover/crates/bin/prover_fri/Cargo.toml +++ b/prover/crates/bin/prover_fri/Cargo.toml @@ -22,7 +22,7 @@ zksync_prover_fri_utils.workspace = true zksync_core_leftovers.workspace = true zksync_prover_fri_types.workspace = true zksync_utils.workspace = true -zksync_vk_setup_data_generator_server_fri.workspace = true +zksync_prover_keystore.workspace = true shivini = { workspace = true, optional = true, features = [ "circuit_definitions", "zksync", @@ -45,4 +45,4 @@ clap = { workspace = true, features = ["derive"] } [features] default = [] -gpu = ["shivini", "zksync_vk_setup_data_generator_server_fri/gpu"] +gpu = ["shivini", "zksync_prover_keystore/gpu"] diff --git a/prover/crates/bin/prover_fri/src/gpu_prover_job_processor.rs b/prover/crates/bin/prover_fri/src/gpu_prover_job_processor.rs index dc8594cbdc1b..63981fa6c7d6 100644 --- a/prover/crates/bin/prover_fri/src/gpu_prover_job_processor.rs +++ b/prover/crates/bin/prover_fri/src/gpu_prover_job_processor.rs @@ -29,12 +29,12 @@ pub mod gpu_prover { CircuitWrapper, FriProofWrapper, ProverServiceDataKey, WitnessVectorArtifacts, }; use zksync_prover_fri_utils::region_fetcher::Zone; + use zksync_prover_keystore::{keystore::Keystore, GoldilocksGpuProverSetupData}; use zksync_queued_job_processor::{async_trait, JobProcessor}; use zksync_types::{ basic_fri_types::CircuitIdRoundTuple, protocol_version::ProtocolSemanticVersion, prover_dal::SocketAddress, }; - use zksync_vk_setup_data_server_fri::{keystore::Keystore, GoldilocksGpuProverSetupData}; use crate::{ metrics::METRICS, diff --git a/prover/crates/bin/prover_fri/src/prover_job_processor.rs b/prover/crates/bin/prover_fri/src/prover_job_processor.rs index 2df1b626497f..4de11a68b534 100644 --- a/prover/crates/bin/prover_fri/src/prover_job_processor.rs +++ b/prover/crates/bin/prover_fri/src/prover_job_processor.rs @@ -20,11 +20,11 @@ use zksync_prover_fri_types::{ CircuitWrapper, FriProofWrapper, ProverJob, ProverServiceDataKey, }; use zksync_prover_fri_utils::fetch_next_circuit; +use zksync_prover_keystore::{keystore::Keystore, GoldilocksProverSetupData}; use zksync_queued_job_processor::{async_trait, JobProcessor}; use zksync_types::{ basic_fri_types::CircuitIdRoundTuple, protocol_version::ProtocolSemanticVersion, }; -use zksync_vk_setup_data_server_fri::{keystore::Keystore, GoldilocksProverSetupData}; use crate::{ metrics::{CircuitLabels, Layer, METRICS}, diff --git a/prover/crates/bin/prover_fri/tests/basic_test.rs b/prover/crates/bin/prover_fri/tests/basic_test.rs index b6d6226e6967..6dc3f5642efa 100644 --- a/prover/crates/bin/prover_fri/tests/basic_test.rs +++ b/prover/crates/bin/prover_fri/tests/basic_test.rs @@ -9,10 +9,10 @@ use zksync_prover_fri::prover_job_processor::Prover; use zksync_prover_fri_types::{ keys::FriCircuitKey, CircuitWrapper, ProverJob, ProverServiceDataKey, }; -use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; -use zksync_vk_setup_data_server_fri::{ +use zksync_prover_keystore::{ keystore::Keystore, setup_data_generator::generate_setup_data_common, }; +use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; fn compare_serialized(expected: &T, actual: &T) { let serialized_expected = bincode::serialize(expected).unwrap(); diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/Cargo.toml b/prover/crates/bin/vk_setup_data_generator_server_fri/Cargo.toml index 57fca6c89796..7c17e845450c 100644 --- a/prover/crates/bin/vk_setup_data_generator_server_fri/Cargo.toml +++ b/prover/crates/bin/vk_setup_data_generator_server_fri/Cargo.toml @@ -14,36 +14,20 @@ categories.workspace = true name = "key_generator" path = "src/main.rs" -[lib] -name = "zksync_vk_setup_data_server_fri" -path = "src/lib.rs" - [dependencies] zksync_vlog.workspace = true zksync_types.workspace = true -zksync_utils.workspace = true zksync_prover_fri_types.workspace = true +zksync_prover_keystore.workspace = true zkevm_test_harness.workspace = true circuit_definitions = { workspace = true, features = ["log_tracing"] } -shivini = { workspace = true, optional = true } -zksync_config.workspace = true -zksync_env_config.workspace = true anyhow.workspace = true clap = { workspace = true, features = ["derive"] } tracing.workspace = true tracing-subscriber = { workspace = true, features = ["env-filter"] } -serde_json.workspace = true -serde = { workspace = true, features = ["derive"] } -serde_derive.workspace = true -itertools.workspace = true -bincode.workspace = true -once_cell.workspace = true toml_edit.workspace = true -md5.workspace = true -sha3.workspace = true -hex.workspace = true indicatif.workspace = true [dev-dependencies] @@ -51,4 +35,4 @@ proptest.workspace = true [features] default = [] -gpu = ["shivini"] +gpu = ["zksync_prover_keystore/gpu"] diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/src/commitment_generator.rs b/prover/crates/bin/vk_setup_data_generator_server_fri/src/commitment_generator.rs index fe4d5b2482a4..8c2a17590099 100644 --- a/prover/crates/bin/vk_setup_data_generator_server_fri/src/commitment_generator.rs +++ b/prover/crates/bin/vk_setup_data_generator_server_fri/src/commitment_generator.rs @@ -1,8 +1,8 @@ use anyhow::Context; -use zksync_vk_setup_data_server_fri::{ - commitment_utils::generate_commitments, - keystore::Keystore, - vk_commitment_helper::{get_toml_formatted_value, read_contract_toml, write_contract_toml}, +use zksync_prover_keystore::{commitment_utils::generate_commitments, keystore::Keystore}; + +use crate::vk_commitment_helper::{ + get_toml_formatted_value, read_contract_toml, write_contract_toml, }; pub fn read_and_update_contract_toml(keystore: &Keystore, dryrun: bool) -> anyhow::Result<()> { diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/src/main.rs b/prover/crates/bin/vk_setup_data_generator_server_fri/src/main.rs index da86f931b1c2..313678bc5da8 100644 --- a/prover/crates/bin/vk_setup_data_generator_server_fri/src/main.rs +++ b/prover/crates/bin/vk_setup_data_generator_server_fri/src/main.rs @@ -23,13 +23,14 @@ use zksync_prover_fri_types::{ circuit_definitions::circuit_definitions::recursion_layer::ZkSyncRecursionLayerStorageType, ProverServiceDataKey, }; -use zksync_vk_setup_data_server_fri::{ +use zksync_prover_keystore::{ commitment_utils::generate_commitments, keystore::Keystore, setup_data_generator::{CPUSetupDataGenerator, GPUSetupDataGenerator, SetupDataGenerator}, }; mod commitment_generator; +mod vk_commitment_helper; #[cfg(test)] mod tests; diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/src/tests.rs b/prover/crates/bin/vk_setup_data_generator_server_fri/src/tests.rs index 39b5f7a44fb8..d704f4e8fb60 100644 --- a/prover/crates/bin/vk_setup_data_generator_server_fri/src/tests.rs +++ b/prover/crates/bin/vk_setup_data_generator_server_fri/src/tests.rs @@ -6,8 +6,8 @@ use zksync_prover_fri_types::{ }, ProverServiceDataKey, }; +use zksync_prover_keystore::keystore::Keystore; use zksync_types::basic_fri_types::AggregationRound; -use zksync_vk_setup_data_server_fri::keystore::Keystore; fn all_possible_prover_service_data_key() -> impl Strategy { let mut keys = Vec::with_capacity(30); diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/src/vk_commitment_helper.rs b/prover/crates/bin/vk_setup_data_generator_server_fri/src/vk_commitment_helper.rs index bf568e06157b..02cbe6e0c4de 100644 --- a/prover/crates/bin/vk_setup_data_generator_server_fri/src/vk_commitment_helper.rs +++ b/prover/crates/bin/vk_setup_data_generator_server_fri/src/vk_commitment_helper.rs @@ -2,8 +2,7 @@ use std::{fs, path::PathBuf}; use anyhow::Context as _; use toml_edit::{Document, Item, Value}; - -use crate::utils::core_workspace_dir_or_current_dir; +use zksync_prover_keystore::utils::core_workspace_dir_or_current_dir; pub fn get_toml_formatted_value(string_value: String) -> Item { let mut value = Value::from(string_value); diff --git a/prover/crates/bin/witness_generator/Cargo.toml b/prover/crates/bin/witness_generator/Cargo.toml index e86656d15bb4..bb6a44e7eb33 100644 --- a/prover/crates/bin/witness_generator/Cargo.toml +++ b/prover/crates/bin/witness_generator/Cargo.toml @@ -22,7 +22,7 @@ zksync_multivm.workspace = true zksync_object_store.workspace = true zksync_types.workspace = true zksync_utils.workspace = true -zksync_vk_setup_data_generator_server_fri.workspace = true +zksync_prover_keystore.workspace = true zksync_prover_fri_types.workspace = true zksync_prover_fri_utils.workspace = true zksync_core_leftovers.workspace = true diff --git a/prover/crates/bin/witness_generator/src/leaf_aggregation.rs b/prover/crates/bin/witness_generator/src/leaf_aggregation.rs index 2f4494187975..6600b3012496 100644 --- a/prover/crates/bin/witness_generator/src/leaf_aggregation.rs +++ b/prover/crates/bin/witness_generator/src/leaf_aggregation.rs @@ -27,12 +27,12 @@ use zksync_prover_fri_types::{ FriProofWrapper, }; use zksync_prover_fri_utils::get_recursive_layer_circuit_id_for_base_layer; +use zksync_prover_keystore::keystore::Keystore; use zksync_queued_job_processor::JobProcessor; use zksync_types::{ basic_fri_types::AggregationRound, protocol_version::ProtocolSemanticVersion, prover_dal::LeafAggregationJobMetadata, L1BatchNumber, }; -use zksync_vk_setup_data_server_fri::keystore::Keystore; use crate::{ metrics::WITNESS_GENERATOR_METRICS, diff --git a/prover/crates/bin/witness_generator/src/main.rs b/prover/crates/bin/witness_generator/src/main.rs index 50c955168602..0e304b46cf74 100644 --- a/prover/crates/bin/witness_generator/src/main.rs +++ b/prover/crates/bin/witness_generator/src/main.rs @@ -14,10 +14,10 @@ use zksync_env_config::object_store::ProverObjectStoreConfig; use zksync_object_store::ObjectStoreFactory; use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; use zksync_prover_fri_types::PROVER_PROTOCOL_SEMANTIC_VERSION; +use zksync_prover_keystore::commitment_utils::get_cached_commitments; use zksync_queued_job_processor::JobProcessor; use zksync_types::basic_fri_types::AggregationRound; use zksync_utils::wait_for_tasks::ManagedTasks; -use zksync_vk_setup_data_server_fri::commitment_utils::get_cached_commitments; use zksync_vlog::prometheus::PrometheusExporterConfig; use zksync_witness_generator::{ basic_circuits::BasicWitnessGenerator, leaf_aggregation::LeafAggregationWitnessGenerator, @@ -171,7 +171,10 @@ async fn main() -> anyhow::Result<()> { let witness_generator_task = match round { AggregationRound::BasicCircuits => { + let start = Instant::now(); let vk_commitments = get_cached_commitments(Some(setup_data_path.clone())); + let end = start.elapsed(); + tracing::info!("Calculating commitment took: {:?}", end); assert_eq!( vk_commitments, vk_commitments_in_db, diff --git a/prover/crates/bin/witness_generator/src/node_aggregation.rs b/prover/crates/bin/witness_generator/src/node_aggregation.rs index b6fc6b8f7c65..87835d79e13f 100644 --- a/prover/crates/bin/witness_generator/src/node_aggregation.rs +++ b/prover/crates/bin/witness_generator/src/node_aggregation.rs @@ -23,12 +23,12 @@ use zksync_prover_fri_types::{ keys::AggregationsKey, FriProofWrapper, }; +use zksync_prover_keystore::{keystore::Keystore, utils::get_leaf_vk_params}; use zksync_queued_job_processor::JobProcessor; use zksync_types::{ basic_fri_types::AggregationRound, protocol_version::ProtocolSemanticVersion, prover_dal::NodeAggregationJobMetadata, L1BatchNumber, }; -use zksync_vk_setup_data_server_fri::{keystore::Keystore, utils::get_leaf_vk_params}; use crate::{ metrics::WITNESS_GENERATOR_METRICS, diff --git a/prover/crates/bin/witness_generator/src/recursion_tip.rs b/prover/crates/bin/witness_generator/src/recursion_tip.rs index e05a0cc38cf8..c04959b98952 100644 --- a/prover/crates/bin/witness_generator/src/recursion_tip.rs +++ b/prover/crates/bin/witness_generator/src/recursion_tip.rs @@ -43,11 +43,11 @@ use zksync_prover_fri_types::{ keys::{ClosedFormInputKey, FriCircuitKey}, CircuitWrapper, }; +use zksync_prover_keystore::{keystore::Keystore, utils::get_leaf_vk_params}; use zksync_queued_job_processor::JobProcessor; use zksync_types::{ basic_fri_types::AggregationRound, protocol_version::ProtocolSemanticVersion, L1BatchNumber, }; -use zksync_vk_setup_data_server_fri::{keystore::Keystore, utils::get_leaf_vk_params}; use crate::{ metrics::WITNESS_GENERATOR_METRICS, diff --git a/prover/crates/bin/witness_generator/src/scheduler.rs b/prover/crates/bin/witness_generator/src/scheduler.rs index c389e037ffa6..6e3461150fe2 100644 --- a/prover/crates/bin/witness_generator/src/scheduler.rs +++ b/prover/crates/bin/witness_generator/src/scheduler.rs @@ -25,11 +25,11 @@ use zksync_prover_fri_types::{ keys::FriCircuitKey, CircuitWrapper, FriProofWrapper, }; +use zksync_prover_keystore::{keystore::Keystore, utils::get_leaf_vk_params}; use zksync_queued_job_processor::JobProcessor; use zksync_types::{ basic_fri_types::AggregationRound, protocol_version::ProtocolSemanticVersion, L1BatchNumber, }; -use zksync_vk_setup_data_server_fri::{keystore::Keystore, utils::get_leaf_vk_params}; use crate::{metrics::WITNESS_GENERATOR_METRICS, utils::SchedulerPartialInputWrapper}; diff --git a/prover/crates/bin/witness_vector_generator/Cargo.toml b/prover/crates/bin/witness_vector_generator/Cargo.toml index 278ab2791d0d..e8386c8090a3 100644 --- a/prover/crates/bin/witness_vector_generator/Cargo.toml +++ b/prover/crates/bin/witness_vector_generator/Cargo.toml @@ -22,7 +22,7 @@ zksync_prover_fri_types.workspace = true zksync_core_leftovers.workspace = true zksync_queued_job_processor.workspace = true zksync_vlog.workspace = true -zksync_vk_setup_data_generator_server_fri.workspace = true +zksync_prover_keystore.workspace = true anyhow.workspace = true tracing.workspace = true diff --git a/prover/crates/bin/witness_vector_generator/src/generator.rs b/prover/crates/bin/witness_vector_generator/src/generator.rs index 800931f5d7cc..f482637c1778 100644 --- a/prover/crates/bin/witness_vector_generator/src/generator.rs +++ b/prover/crates/bin/witness_vector_generator/src/generator.rs @@ -17,12 +17,12 @@ use zksync_prover_fri_types::{ use zksync_prover_fri_utils::{ fetch_next_circuit, get_numeric_circuit_id, region_fetcher::Zone, socket_utils::send_assembly, }; +use zksync_prover_keystore::keystore::Keystore; use zksync_queued_job_processor::JobProcessor; use zksync_types::{ basic_fri_types::CircuitIdRoundTuple, protocol_version::ProtocolSemanticVersion, prover_dal::GpuProverInstanceStatus, }; -use zksync_vk_setup_data_server_fri::keystore::Keystore; use crate::metrics::METRICS; diff --git a/prover/crates/bin/witness_vector_generator/tests/basic_test.rs b/prover/crates/bin/witness_vector_generator/tests/basic_test.rs index c6dfec5009f5..dd1ef8404198 100644 --- a/prover/crates/bin/witness_vector_generator/tests/basic_test.rs +++ b/prover/crates/bin/witness_vector_generator/tests/basic_test.rs @@ -1,8 +1,8 @@ use std::fs; use zksync_prover_fri_types::{CircuitWrapper, ProverJob, ProverServiceDataKey}; +use zksync_prover_keystore::keystore::Keystore; use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; -use zksync_vk_setup_data_server_fri::keystore::Keystore; use zksync_witness_vector_generator::generator::WitnessVectorGenerator; #[test] diff --git a/prover/crates/lib/keystore/Cargo.toml b/prover/crates/lib/keystore/Cargo.toml new file mode 100644 index 000000000000..41e9f0244f69 --- /dev/null +++ b/prover/crates/lib/keystore/Cargo.toml @@ -0,0 +1,35 @@ +[package] +name = "zksync_prover_keystore" +version.workspace = true +edition.workspace = true +authors.workspace = true +homepage.workspace = true +repository.workspace = true +license.workspace = true +keywords.workspace = true +categories.workspace = true + + +[dependencies] +zksync_types.workspace = true +zksync_utils.workspace = true +zksync_prover_fri_types.workspace = true +zkevm_test_harness.workspace = true +circuit_definitions = { workspace = true, features = ["log_tracing"] } +shivini = { workspace = true, optional = true } +zksync_config.workspace = true +zksync_env_config.workspace = true + +anyhow.workspace = true +tracing.workspace = true +serde_json.workspace = true +serde = { workspace = true, features = ["derive"] } +bincode.workspace = true +once_cell.workspace = true +md5.workspace = true +sha3.workspace = true +hex.workspace = true + +[features] +default = [] +gpu = ["shivini"] diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/src/commitment_utils.rs b/prover/crates/lib/keystore/src/commitment_utils.rs similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/src/commitment_utils.rs rename to prover/crates/lib/keystore/src/commitment_utils.rs diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/src/keystore.rs b/prover/crates/lib/keystore/src/keystore.rs similarity index 99% rename from prover/crates/bin/vk_setup_data_generator_server_fri/src/keystore.rs rename to prover/crates/lib/keystore/src/keystore.rs index c683ed3d2965..7ba5a3aaa701 100644 --- a/prover/crates/bin/vk_setup_data_generator_server_fri/src/keystore.rs +++ b/prover/crates/lib/keystore/src/keystore.rs @@ -475,6 +475,7 @@ impl Keystore { pub fn load_commitments(&self) -> anyhow::Result { Self::load_json_from_file(self.get_base_path().join("commitments.json")) } + pub fn save_commitments(&self, commitments: &VkCommitments) -> anyhow::Result<()> { Self::save_json_pretty(self.get_base_path().join("commitments.json"), &commitments) } diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/src/lib.rs b/prover/crates/lib/keystore/src/lib.rs similarity index 99% rename from prover/crates/bin/vk_setup_data_generator_server_fri/src/lib.rs rename to prover/crates/lib/keystore/src/lib.rs index 4b66df56f182..7e60e3fa29cd 100644 --- a/prover/crates/bin/vk_setup_data_generator_server_fri/src/lib.rs +++ b/prover/crates/lib/keystore/src/lib.rs @@ -26,7 +26,6 @@ pub mod commitment_utils; pub mod keystore; pub mod setup_data_generator; pub mod utils; -pub mod vk_commitment_helper; #[derive(Debug, Serialize, Deserialize)] #[serde( diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/src/setup_data_generator.rs b/prover/crates/lib/keystore/src/setup_data_generator.rs similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/src/setup_data_generator.rs rename to prover/crates/lib/keystore/src/setup_data_generator.rs diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/src/utils.rs b/prover/crates/lib/keystore/src/utils.rs similarity index 100% rename from prover/crates/bin/vk_setup_data_generator_server_fri/src/utils.rs rename to prover/crates/lib/keystore/src/utils.rs From 85b734664b4306e988da07005860a7ea0fb7d22d Mon Sep 17 00:00:00 2001 From: EmilLuta Date: Wed, 4 Sep 2024 10:44:10 +0200 Subject: [PATCH 015/116] feat: Remove prover db from house keeper (#2795) This PR is a follow-up on https://github.com/matter-labs/zksync-era/pull/2666, namely the remove prover side from house keeper. This PR contains: - remove all prover jobs from house keeper (now in PJM) - move core metrics from prover jobs to l1 batch metrics reporter - remove old configuration With these changes core & prover are fully decoupled. This will enable removing unnecessary databases across all envs that don't run provers. Alongside, core and prover deployments are independent. --- Cargo.lock | 12 - Cargo.toml | 3 - core/bin/zksync_server/src/node_builder.rs | 16 +- core/lib/config/src/configs/house_keeper.rs | 25 -- core/lib/config/src/testonly.rs | 13 -- core/lib/dal/src/models/mod.rs | 2 +- .../src/models/storage_witness_job_info.rs | 78 ------- core/lib/env_config/src/house_keeper.rs | 31 --- core/lib/protobuf_config/src/house_keeper.rs | 63 ----- .../src/proto/config/house_keeper.proto | 26 +-- core/node/house_keeper/Cargo.toml | 1 - .../house_keeper/src/blocks_state_reporter.rs | 33 ++- core/node/house_keeper/src/lib.rs | 2 +- core/node/house_keeper/src/metrics.rs | 11 + .../archiver/fri_gpu_prover_archiver.rs | 55 ----- .../archiver/fri_prover_jobs_archiver.rs | 55 ----- .../house_keeper/src/prover/archiver/mod.rs | 5 - core/node/house_keeper/src/prover/metrics.rs | 123 ---------- core/node/house_keeper/src/prover/mod.rs | 14 -- .../fri_proof_compressor_queue_reporter.rs | 88 ------- .../fri_prover_queue_reporter.rs | 144 ------------ .../fri_witness_generator_queue_reporter.rs | 131 ----------- .../src/prover/queue_reporter/mod.rs | 7 - .../fri_proof_compressor_job_retry_manager.rs | 60 ----- .../fri_prover_job_retry_manager.rs | 60 ----- ...ri_witness_generator_jobs_retry_manager.rs | 124 ---------- .../src/prover/retry_manager/mod.rs | 7 - ...waiting_to_queued_fri_witness_job_mover.rs | 127 ---------- core/node/node_framework/Cargo.toml | 1 - .../implementations/layers/house_keeper.rs | 221 +----------------- .../src/implementations/layers/pools_layer.rs | 28 +-- .../src/implementations/resources/pools.rs | 13 -- etc/env/base/house_keeper.toml | 13 -- etc/env/file_based/general.yaml | 13 -- .../lib/prover_dal/src/fri_prover_dal.rs | 27 --- 35 files changed, 67 insertions(+), 1565 deletions(-) delete mode 100644 core/lib/dal/src/models/storage_witness_job_info.rs create mode 100644 core/node/house_keeper/src/metrics.rs delete mode 100644 core/node/house_keeper/src/prover/archiver/fri_gpu_prover_archiver.rs delete mode 100644 core/node/house_keeper/src/prover/archiver/fri_prover_jobs_archiver.rs delete mode 100644 core/node/house_keeper/src/prover/archiver/mod.rs delete mode 100644 core/node/house_keeper/src/prover/metrics.rs delete mode 100644 core/node/house_keeper/src/prover/mod.rs delete mode 100644 core/node/house_keeper/src/prover/queue_reporter/fri_proof_compressor_queue_reporter.rs delete mode 100644 core/node/house_keeper/src/prover/queue_reporter/fri_prover_queue_reporter.rs delete mode 100644 core/node/house_keeper/src/prover/queue_reporter/fri_witness_generator_queue_reporter.rs delete mode 100644 core/node/house_keeper/src/prover/queue_reporter/mod.rs delete mode 100644 core/node/house_keeper/src/prover/retry_manager/fri_proof_compressor_job_retry_manager.rs delete mode 100644 core/node/house_keeper/src/prover/retry_manager/fri_prover_job_retry_manager.rs delete mode 100644 core/node/house_keeper/src/prover/retry_manager/fri_witness_generator_jobs_retry_manager.rs delete mode 100644 core/node/house_keeper/src/prover/retry_manager/mod.rs delete mode 100644 core/node/house_keeper/src/prover/waiting_to_queued_fri_witness_job_mover.rs diff --git a/Cargo.lock b/Cargo.lock index cfa185345280..7c45ba3dad99 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8826,7 +8826,6 @@ dependencies = [ "vise", "zksync_config", "zksync_dal", - "zksync_prover_dal", "zksync_shared_metrics", "zksync_types", ] @@ -9176,7 +9175,6 @@ dependencies = [ "zksync_object_store", "zksync_proof_data_handler", "zksync_protobuf_config", - "zksync_prover_dal", "zksync_queued_job_processor", "zksync_reorg_detector", "zksync_state", @@ -9397,16 +9395,6 @@ dependencies = [ "zksync_types", ] -[[package]] -name = "zksync_prover_dal" -version = "0.1.0" -dependencies = [ - "sqlx", - "strum", - "zksync_basic_types", - "zksync_db_connection", -] - [[package]] name = "zksync_prover_interface" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index 7d28cd7fe21b..d244d436b9f5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -79,8 +79,6 @@ members = [ "core/tests/test_account", "core/tests/loadnext", "core/tests/vm-benchmark", - # Parts of prover workspace that are needed for Core workspace - "prover/crates/lib/prover_dal", ] resolver = "2" @@ -233,7 +231,6 @@ zksync_protobuf_build = "=0.1.0-rc.11" # "Local" dependencies zksync_multivm = { version = "0.1.0", path = "core/lib/multivm" } -zksync_prover_dal = { version = "0.1.0", path = "prover/crates/lib/prover_dal" } zksync_vlog = { version = "0.1.0", path = "core/lib/vlog" } zksync_vm_interface = { version = "0.1.0", path = "core/lib/vm_interface" } zksync_vm_executor = { version = "0.1.0", path = "core/lib/vm_executor" } diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs index 6b0315200651..36ee7d990cf9 100644 --- a/core/bin/zksync_server/src/node_builder.rs +++ b/core/bin/zksync_server/src/node_builder.rs @@ -122,7 +122,6 @@ impl MainNodeBuilder { let pools_layer = PoolsLayerBuilder::empty(config, secrets) .with_master(true) .with_replica(true) - .with_prover(true) // Used by house keeper. .build(); self.node.add_layer(pools_layer); Ok(self) @@ -446,18 +445,9 @@ impl MainNodeBuilder { fn add_house_keeper_layer(mut self) -> anyhow::Result { let house_keeper_config = try_load_config!(self.configs.house_keeper_config); - let fri_prover_config = try_load_config!(self.configs.prover_config); - let fri_witness_generator_config = try_load_config!(self.configs.witness_generator_config); - let fri_prover_group_config = try_load_config!(self.configs.prover_group_config); - let fri_proof_compressor_config = try_load_config!(self.configs.proof_compressor_config); - - self.node.add_layer(HouseKeeperLayer::new( - house_keeper_config, - fri_prover_config, - fri_witness_generator_config, - fri_prover_group_config, - fri_proof_compressor_config, - )); + + self.node + .add_layer(HouseKeeperLayer::new(house_keeper_config)); Ok(self) } diff --git a/core/lib/config/src/configs/house_keeper.rs b/core/lib/config/src/configs/house_keeper.rs index e1eb13375667..39e304562fa0 100644 --- a/core/lib/config/src/configs/house_keeper.rs +++ b/core/lib/config/src/configs/house_keeper.rs @@ -4,29 +4,4 @@ use serde::Deserialize; #[derive(Debug, Deserialize, Clone, PartialEq)] pub struct HouseKeeperConfig { pub l1_batch_metrics_reporting_interval_ms: u64, - pub gpu_prover_queue_reporting_interval_ms: u64, - pub prover_job_retrying_interval_ms: u64, - pub prover_stats_reporting_interval_ms: u64, - pub witness_job_moving_interval_ms: u64, - pub witness_generator_stats_reporting_interval_ms: u64, - pub witness_generator_job_retrying_interval_ms: u64, - pub prover_db_pool_size: u32, - pub proof_compressor_job_retrying_interval_ms: u64, - pub proof_compressor_stats_reporting_interval_ms: u64, - pub prover_job_archiver_archiving_interval_ms: Option, - pub prover_job_archiver_archive_after_secs: Option, - pub fri_gpu_prover_archiver_archiving_interval_ms: Option, - pub fri_gpu_prover_archiver_archive_after_secs: Option, -} - -impl HouseKeeperConfig { - pub fn prover_job_archiver_params(&self) -> Option<(u64, u64)> { - self.prover_job_archiver_archiving_interval_ms - .zip(self.prover_job_archiver_archive_after_secs) - } - - pub fn fri_gpu_prover_archiver_params(&self) -> Option<(u64, u64)> { - self.fri_gpu_prover_archiver_archiving_interval_ms - .zip(self.fri_gpu_prover_archiver_archive_after_secs) - } } diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index 8c713319a5e6..ea27bf8ab3ab 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -630,19 +630,6 @@ impl Distribution for EncodeDist { fn sample(&self, rng: &mut R) -> configs::house_keeper::HouseKeeperConfig { configs::house_keeper::HouseKeeperConfig { l1_batch_metrics_reporting_interval_ms: self.sample(rng), - gpu_prover_queue_reporting_interval_ms: self.sample(rng), - prover_job_retrying_interval_ms: self.sample(rng), - prover_stats_reporting_interval_ms: self.sample(rng), - witness_job_moving_interval_ms: self.sample(rng), - witness_generator_stats_reporting_interval_ms: self.sample(rng), - prover_db_pool_size: self.sample(rng), - witness_generator_job_retrying_interval_ms: self.sample(rng), - proof_compressor_job_retrying_interval_ms: self.sample(rng), - proof_compressor_stats_reporting_interval_ms: self.sample(rng), - prover_job_archiver_archiving_interval_ms: self.sample(rng), - prover_job_archiver_archive_after_secs: self.sample(rng), - fri_gpu_prover_archiver_archiving_interval_ms: self.sample(rng), - fri_gpu_prover_archiver_archive_after_secs: self.sample(rng), } } } diff --git a/core/lib/dal/src/models/mod.rs b/core/lib/dal/src/models/mod.rs index a9690dcb7993..479649f85092 100644 --- a/core/lib/dal/src/models/mod.rs +++ b/core/lib/dal/src/models/mod.rs @@ -15,7 +15,7 @@ pub mod storage_sync; pub mod storage_tee_proof; pub mod storage_transaction; pub mod storage_verification_request; -pub mod storage_witness_job_info; + #[cfg(test)] mod tests; diff --git a/core/lib/dal/src/models/storage_witness_job_info.rs b/core/lib/dal/src/models/storage_witness_job_info.rs deleted file mode 100644 index 03d1120b7170..000000000000 --- a/core/lib/dal/src/models/storage_witness_job_info.rs +++ /dev/null @@ -1,78 +0,0 @@ -use std::{convert::TryFrom, str::FromStr}; - -use sqlx::types::chrono::{DateTime, NaiveDateTime, NaiveTime, Utc}; -use zksync_types::{ - basic_fri_types::AggregationRound, - prover_dal::{ - JobPosition, WitnessJobInfo, WitnessJobStatus, WitnessJobStatusFailed, - WitnessJobStatusSuccessful, - }, - L1BatchNumber, -}; - -#[derive(sqlx::FromRow)] -pub struct StorageWitnessJobInfo { - pub aggregation_round: i32, - pub l1_batch_number: i64, - pub status: String, - pub error: Option, - pub created_at: NaiveDateTime, - pub updated_at: NaiveDateTime, - pub time_taken: Option, - pub processing_started_at: Option, - pub attempts: i32, -} - -impl From for WitnessJobInfo { - fn from(x: StorageWitnessJobInfo) -> Self { - fn nt2d(nt: NaiveDateTime) -> DateTime { - DateTime::from_naive_utc_and_offset(nt, Utc) - } - - let status = - match WitnessJobStatus::from_str(x.status.as_str()) - .unwrap_or_else(|_| panic!("Unknown value '{}' in witness job status db record.", x.status)) { - WitnessJobStatus::Successful(_) => WitnessJobStatus::Successful(WitnessJobStatusSuccessful { - started_at: - nt2d(x.processing_started_at - .unwrap_or_else(|| panic!( - "Witness job is successful but lacks processing timestamp. Batch:round {}:{} ", - x.l1_batch_number, - x.aggregation_round))), - time_taken: x.time_taken.unwrap() - NaiveTime::from_hms_opt(0,0,0).unwrap() - }), - WitnessJobStatus::Failed(_) => { - let batch = x.l1_batch_number; - let round = x.aggregation_round; - - WitnessJobStatus::Failed( - WitnessJobStatusFailed { - started_at: - nt2d(x.processing_started_at - .unwrap_or_else(|| panic!( - "Witness job is failed but lacks processing timestamp. Batch:round {}:{} ", - x.l1_batch_number, - x.aggregation_round))), - error: - x.error - .unwrap_or_else(|| panic!( - "Witness job failed but lacks error message. Batch:round {}:{}", - batch, - round)), - }) - }, - x => x - }; - - WitnessJobInfo { - block_number: L1BatchNumber(x.l1_batch_number as u32), - created_at: nt2d(x.created_at), - updated_at: nt2d(x.updated_at), - status, - position: JobPosition { - aggregation_round: AggregationRound::try_from(x.aggregation_round).unwrap(), - sequence_number: 1, // Witness job 1:1 aggregation round, per block - }, - } - } -} diff --git a/core/lib/env_config/src/house_keeper.rs b/core/lib/env_config/src/house_keeper.rs index 25eeda793937..1a1ff4d27de2 100644 --- a/core/lib/env_config/src/house_keeper.rs +++ b/core/lib/env_config/src/house_keeper.rs @@ -18,21 +18,6 @@ mod tests { fn expected_config() -> HouseKeeperConfig { HouseKeeperConfig { l1_batch_metrics_reporting_interval_ms: 10_000, - gpu_prover_queue_reporting_interval_ms: 10_000, - prover_job_retrying_interval_ms: 10000, - prover_stats_reporting_interval_ms: 5_000, - witness_job_moving_interval_ms: 30_000, - witness_generator_stats_reporting_interval_ms: 10_000, - witness_generator_job_retrying_interval_ms: 30_000, - prover_db_pool_size: 2, - proof_compressor_job_retrying_interval_ms: 30_000, - proof_compressor_stats_reporting_interval_ms: 10_000, - prover_job_archiver_archiving_interval_ms: Some(1_800_000), - prover_job_archiver_archive_after_secs: Some(172_800), - // 24 hours - fri_gpu_prover_archiver_archiving_interval_ms: Some(86_400_000), - // 48 hours - fri_gpu_prover_archiver_archive_after_secs: Some(172_800), } } @@ -41,22 +26,6 @@ mod tests { let mut lock = MUTEX.lock(); let config = r#" HOUSE_KEEPER_L1_BATCH_METRICS_REPORTING_INTERVAL_MS="10000" - HOUSE_KEEPER_GPU_PROVER_QUEUE_REPORTING_INTERVAL_MS="10000" - HOUSE_KEEPER_PROVER_JOB_RETRYING_INTERVAL_MS="10000" - HOUSE_KEEPER_WITNESS_JOB_MOVING_INTERVAL_MS="30000" - HOUSE_KEEPER_WITNESS_GENERATOR_STATS_REPORTING_INTERVAL_MS="10000" - HOUSE_KEEPER_WITNESS_GENERATOR_JOB_RETRYING_INTERVAL_MS="30000" - HOUSE_KEEPER_FRI_WITNESS_JOB_MOVING_INTERVAL_MS="40000" - HOUSE_KEEPER_FRI_PROVER_JOB_RETRYING_INTERVAL_MS="30000" - HOUSE_KEEPER_FRI_WITNESS_GENERATOR_JOB_RETRYING_INTERVAL_MS="30000" - HOUSE_KEEPER_PROVER_DB_POOL_SIZE="2" - HOUSE_KEEPER_PROVER_STATS_REPORTING_INTERVAL_MS="5000" - HOUSE_KEEPER_PROOF_COMPRESSOR_STATS_REPORTING_INTERVAL_MS="10000" - HOUSE_KEEPER_PROOF_COMPRESSOR_JOB_RETRYING_INTERVAL_MS="30000" - HOUSE_KEEPER_PROVER_JOB_ARCHIVER_ARCHIVING_INTERVAL_MS="1800000" - HOUSE_KEEPER_PROVER_JOB_ARCHIVER_ARCHIVE_AFTER_SECS="172800" - HOUSE_KEEPER_FRI_GPU_PROVER_ARCHIVER_ARCHIVING_INTERVAL_MS="86400000" - HOUSE_KEEPER_FRI_GPU_PROVER_ARCHIVER_ARCHIVE_AFTER_SECS="172800" "#; lock.set_env(config); diff --git a/core/lib/protobuf_config/src/house_keeper.rs b/core/lib/protobuf_config/src/house_keeper.rs index b6871de853fb..e40fd1b37dc7 100644 --- a/core/lib/protobuf_config/src/house_keeper.rs +++ b/core/lib/protobuf_config/src/house_keeper.rs @@ -12,43 +12,6 @@ impl ProtoRepr for proto::HouseKeeper { &self.l1_batch_metrics_reporting_interval_ms, ) .context("l1_batch_metrics_reporting_interval_ms")?, - gpu_prover_queue_reporting_interval_ms: *required( - &self.gpu_prover_queue_reporting_interval_ms, - ) - .context("gpu_prover_queue_reporting_interval_ms")?, - prover_job_retrying_interval_ms: *required(&self.prover_job_retrying_interval_ms) - .context("prover_job_retrying_interval_ms")?, - prover_stats_reporting_interval_ms: *required(&self.prover_stats_reporting_interval_ms) - .context("prover_stats_reporting_interval_ms")?, - witness_job_moving_interval_ms: *required(&self.witness_job_moving_interval_ms) - .context("witness_job_moving_interval_ms")?, - witness_generator_stats_reporting_interval_ms: *required( - &self.witness_generator_stats_reporting_interval_ms, - ) - .context("witness_generator_stats_reporting_interval_ms")?, - prover_db_pool_size: *required(&self.prover_db_pool_size) - .context("prover_db_pool_size")?, - proof_compressor_job_retrying_interval_ms: *required( - &self.proof_compressor_job_retrying_interval_ms, - ) - .context("proof_compressor_job_retrying_interval_ms")?, - witness_generator_job_retrying_interval_ms: *required( - &self.witness_generator_job_retrying_interval_ms, - ) - .context("witness_generator_job_retrying_interval_ms")?, - proof_compressor_stats_reporting_interval_ms: *required( - &self.proof_compressor_stats_reporting_interval_ms, - ) - .context("proof_compressor_stats_reporting_interval_ms")?, - - // TODO(PLA-862): Make these 2 variables required - prover_job_archiver_archiving_interval_ms: self - .prover_job_archiver_archiving_interval_ms, - prover_job_archiver_archive_after_secs: self.prover_job_archiver_archive_after_secs, - fri_gpu_prover_archiver_archiving_interval_ms: self - .fri_gpu_prover_archiver_archiving_interval_ms, - fri_gpu_prover_archiver_archive_after_secs: self - .fri_gpu_prover_archiver_archive_after_secs, }) } @@ -57,32 +20,6 @@ impl ProtoRepr for proto::HouseKeeper { l1_batch_metrics_reporting_interval_ms: Some( this.l1_batch_metrics_reporting_interval_ms, ), - gpu_prover_queue_reporting_interval_ms: Some( - this.gpu_prover_queue_reporting_interval_ms, - ), - prover_job_retrying_interval_ms: Some(this.prover_job_retrying_interval_ms), - prover_stats_reporting_interval_ms: Some(this.prover_stats_reporting_interval_ms), - witness_job_moving_interval_ms: Some(this.witness_job_moving_interval_ms), - witness_generator_stats_reporting_interval_ms: Some( - this.witness_generator_stats_reporting_interval_ms, - ), - witness_generator_job_retrying_interval_ms: Some( - this.witness_generator_job_retrying_interval_ms, - ), - prover_db_pool_size: Some(this.prover_db_pool_size), - proof_compressor_job_retrying_interval_ms: Some( - this.proof_compressor_job_retrying_interval_ms, - ), - proof_compressor_stats_reporting_interval_ms: Some( - this.proof_compressor_stats_reporting_interval_ms, - ), - prover_job_archiver_archiving_interval_ms: this - .prover_job_archiver_archiving_interval_ms, - prover_job_archiver_archive_after_secs: this.prover_job_archiver_archive_after_secs, - fri_gpu_prover_archiver_archiving_interval_ms: this - .fri_gpu_prover_archiver_archiving_interval_ms, - fri_gpu_prover_archiver_archive_after_secs: this - .fri_gpu_prover_archiver_archive_after_secs, } } } diff --git a/core/lib/protobuf_config/src/proto/config/house_keeper.proto b/core/lib/protobuf_config/src/proto/config/house_keeper.proto index dce4af95b809..c3a4ca8ad672 100644 --- a/core/lib/protobuf_config/src/proto/config/house_keeper.proto +++ b/core/lib/protobuf_config/src/proto/config/house_keeper.proto @@ -4,17 +4,17 @@ package zksync.config.house_keeper; message HouseKeeper { optional uint64 l1_batch_metrics_reporting_interval_ms = 1; // required; ms - optional uint64 gpu_prover_queue_reporting_interval_ms = 2; // required; ms - optional uint64 prover_job_retrying_interval_ms = 3; // required; ms - optional uint64 prover_stats_reporting_interval_ms = 4; // required ms - optional uint64 witness_job_moving_interval_ms = 5; // required; ms - optional uint64 witness_generator_stats_reporting_interval_ms = 6; // required; ms - optional uint64 witness_generator_job_retrying_interval_ms = 9; // required; ms - optional uint32 prover_db_pool_size = 10; // required - optional uint64 proof_compressor_job_retrying_interval_ms = 12; // required; ms - optional uint64 proof_compressor_stats_reporting_interval_ms = 13; // required; ms - optional uint64 prover_job_archiver_archiving_interval_ms = 14; // optional; ms - optional uint64 prover_job_archiver_archive_after_secs = 15; // optional; seconds - optional uint64 fri_gpu_prover_archiver_archiving_interval_ms = 16; // optional; ms - optional uint64 fri_gpu_prover_archiver_archive_after_secs = 17; // optional; seconds + reserved 2; reserved "gpu_prover_queue_reporting_interval_ms"; + reserved 3; reserved "prover_job_retrying_interval_ms"; + reserved 4; reserved "prover_stats_reporting_interval_ms"; + reserved 5; reserved "witness_job_moving_interval_ms"; + reserved 6; reserved "witness_generator_stats_reporting_interval_ms"; + reserved 9; reserved "witness_generator_job_retrying_interval_ms"; + reserved 10; reserved "prover_db_pool_size"; + reserved 12; reserved "proof_compressor_job_retrying_interval_ms"; + reserved 13; reserved "proof_compressor_stats_reporting_interval_ms"; + reserved 14; reserved "prover_job_archiver_archiving_interval_ms"; + reserved 15; reserved "prover_job_archiver_archive_after_secs"; + reserved 16; reserved "fri_gpu_prover_archiver_archiving_interval_ms"; + reserved 17; reserved "fri_gpu_prover_archiver_archive_after_secs"; } diff --git a/core/node/house_keeper/Cargo.toml b/core/node/house_keeper/Cargo.toml index ed86a713ea25..b2ed3c14c20f 100644 --- a/core/node/house_keeper/Cargo.toml +++ b/core/node/house_keeper/Cargo.toml @@ -14,7 +14,6 @@ categories.workspace = true vise.workspace = true zksync_dal.workspace = true zksync_shared_metrics.workspace = true -zksync_prover_dal.workspace = true zksync_types.workspace = true zksync_config.workspace = true diff --git a/core/node/house_keeper/src/blocks_state_reporter.rs b/core/node/house_keeper/src/blocks_state_reporter.rs index 5285390a2783..6f85aa0fbb09 100644 --- a/core/node/house_keeper/src/blocks_state_reporter.rs +++ b/core/node/house_keeper/src/blocks_state_reporter.rs @@ -4,7 +4,7 @@ use async_trait::async_trait; use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_shared_metrics::{BlockL1Stage, BlockStage, L1StageLatencyLabel, APP_METRICS}; -use crate::periodic_job::PeriodicJob; +use crate::{metrics::FRI_PROVER_METRICS, periodic_job::PeriodicJob}; #[derive(Debug)] pub struct L1BatchMetricsReporter { @@ -88,6 +88,37 @@ impl L1BatchMetricsReporter { APP_METRICS.blocks_state_block_eth_stage_latency[&L1StageLatencyLabel::UnexecutedBlock] .set(now.saturating_sub(timestamp)); } + + // proof generation details metrics + let oldest_unpicked_batch = match conn + .proof_generation_dal() + .get_oldest_unpicked_batch() + .await? + { + Some(l1_batch_number) => l1_batch_number.0 as u64, + // if there is no unpicked batch in database, we use sealed batch number as a result + None => { + conn.blocks_dal() + .get_sealed_l1_batch_number() + .await + .unwrap() + .unwrap() + .0 as u64 + } + }; + FRI_PROVER_METRICS + .oldest_unpicked_batch + .set(oldest_unpicked_batch); + + if let Some(l1_batch_number) = conn + .proof_generation_dal() + .get_oldest_not_generated_batch() + .await? + { + FRI_PROVER_METRICS + .oldest_not_generated_batch + .set(l1_batch_number.0 as u64); + } Ok(()) } } diff --git a/core/node/house_keeper/src/lib.rs b/core/node/house_keeper/src/lib.rs index 68d4ad2f8ba4..4e0d1962fc02 100644 --- a/core/node/house_keeper/src/lib.rs +++ b/core/node/house_keeper/src/lib.rs @@ -1,3 +1,3 @@ pub mod blocks_state_reporter; +mod metrics; pub mod periodic_job; -pub mod prover; diff --git a/core/node/house_keeper/src/metrics.rs b/core/node/house_keeper/src/metrics.rs new file mode 100644 index 000000000000..cc1438e35963 --- /dev/null +++ b/core/node/house_keeper/src/metrics.rs @@ -0,0 +1,11 @@ +use vise::{Gauge, Metrics}; + +#[derive(Debug, Metrics)] +#[metrics(prefix = "fri_prover")] +pub(crate) struct FriProverMetrics { + pub oldest_unpicked_batch: Gauge, + pub oldest_not_generated_batch: Gauge, +} + +#[vise::register] +pub(crate) static FRI_PROVER_METRICS: vise::Global = vise::Global::new(); diff --git a/core/node/house_keeper/src/prover/archiver/fri_gpu_prover_archiver.rs b/core/node/house_keeper/src/prover/archiver/fri_gpu_prover_archiver.rs deleted file mode 100644 index b0f5ff23fe3f..000000000000 --- a/core/node/house_keeper/src/prover/archiver/fri_gpu_prover_archiver.rs +++ /dev/null @@ -1,55 +0,0 @@ -use std::time::Duration; - -use zksync_dal::ConnectionPool; -use zksync_prover_dal::{Prover, ProverDal}; - -use crate::{periodic_job::PeriodicJob, prover::metrics::HOUSE_KEEPER_METRICS}; - -/// `FriGpuProverArchiver` is a task that periodically archives old fri GPU prover records. -/// The task will archive the `dead` prover records that have not been updated for a certain amount of time. -/// Note: These components speed up provers, in their absence, queries would become sub optimal. -#[derive(Debug)] -pub struct FriGpuProverArchiver { - pool: ConnectionPool, - archiving_interval_ms: u64, - archive_prover_after_secs: u64, -} - -impl FriGpuProverArchiver { - pub fn new( - pool: ConnectionPool, - archiving_interval_ms: u64, - archive_prover_after_secs: u64, - ) -> Self { - Self { - pool, - archiving_interval_ms, - archive_prover_after_secs, - } - } -} - -#[async_trait::async_trait] -impl PeriodicJob for FriGpuProverArchiver { - const SERVICE_NAME: &'static str = "FriGpuProverArchiver"; - - async fn run_routine_task(&mut self) -> anyhow::Result<()> { - let archived_provers = self - .pool - .connection() - .await - .unwrap() - .fri_gpu_prover_queue_dal() - .archive_old_provers(Duration::from_secs(self.archive_prover_after_secs)) - .await; - tracing::info!("Archived {:?} fri gpu prover records", archived_provers); - HOUSE_KEEPER_METRICS - .gpu_prover_archived - .inc_by(archived_provers as u64); - Ok(()) - } - - fn polling_interval_ms(&self) -> u64 { - self.archiving_interval_ms - } -} diff --git a/core/node/house_keeper/src/prover/archiver/fri_prover_jobs_archiver.rs b/core/node/house_keeper/src/prover/archiver/fri_prover_jobs_archiver.rs deleted file mode 100644 index 684c955231cf..000000000000 --- a/core/node/house_keeper/src/prover/archiver/fri_prover_jobs_archiver.rs +++ /dev/null @@ -1,55 +0,0 @@ -use std::time::Duration; - -use zksync_dal::ConnectionPool; -use zksync_prover_dal::{Prover, ProverDal}; - -use crate::{periodic_job::PeriodicJob, prover::metrics::HOUSE_KEEPER_METRICS}; - -/// `FriProverJobsArchiver` is a task that periodically archives old finalized prover job. -/// The task will archive the `successful` prover jobs that have been done for a certain amount of time. -/// Note: These components speed up provers, in their absence, queries would become sub optimal. -#[derive(Debug)] -pub struct FriProverJobsArchiver { - pool: ConnectionPool, - reporting_interval_ms: u64, - archiving_interval_secs: u64, -} - -impl FriProverJobsArchiver { - pub fn new( - pool: ConnectionPool, - reporting_interval_ms: u64, - archiving_interval_secs: u64, - ) -> Self { - Self { - pool, - reporting_interval_ms, - archiving_interval_secs, - } - } -} - -#[async_trait::async_trait] -impl PeriodicJob for FriProverJobsArchiver { - const SERVICE_NAME: &'static str = "FriProverJobsArchiver"; - - async fn run_routine_task(&mut self) -> anyhow::Result<()> { - let archived_jobs = self - .pool - .connection() - .await - .unwrap() - .fri_prover_jobs_dal() - .archive_old_jobs(Duration::from_secs(self.archiving_interval_secs)) - .await; - tracing::info!("Archived {:?} fri prover jobs", archived_jobs); - HOUSE_KEEPER_METRICS - .prover_job_archived - .inc_by(archived_jobs as u64); - Ok(()) - } - - fn polling_interval_ms(&self) -> u64 { - self.reporting_interval_ms - } -} diff --git a/core/node/house_keeper/src/prover/archiver/mod.rs b/core/node/house_keeper/src/prover/archiver/mod.rs deleted file mode 100644 index 36b82a7735ce..000000000000 --- a/core/node/house_keeper/src/prover/archiver/mod.rs +++ /dev/null @@ -1,5 +0,0 @@ -mod fri_gpu_prover_archiver; -mod fri_prover_jobs_archiver; - -pub use fri_gpu_prover_archiver::FriGpuProverArchiver; -pub use fri_prover_jobs_archiver::FriProverJobsArchiver; diff --git a/core/node/house_keeper/src/prover/metrics.rs b/core/node/house_keeper/src/prover/metrics.rs deleted file mode 100644 index 7711c9c04a6b..000000000000 --- a/core/node/house_keeper/src/prover/metrics.rs +++ /dev/null @@ -1,123 +0,0 @@ -use vise::{Counter, EncodeLabelSet, EncodeLabelValue, Family, Gauge, LabeledFamily, Metrics}; -use zksync_types::protocol_version::ProtocolSemanticVersion; -#[derive(Debug, Metrics)] -#[metrics(prefix = "house_keeper")] -pub(crate) struct HouseKeeperMetrics { - pub prover_job_archived: Counter, - pub gpu_prover_archived: Counter, -} - -#[vise::register] -pub(crate) static HOUSE_KEEPER_METRICS: vise::Global = vise::Global::new(); - -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue)] -#[metrics(rename_all = "snake_case")] -#[allow(dead_code)] -pub enum JobStatus { - Queued, - InProgress, - Successful, - Failed, - SentToServer, - Skipped, -} - -#[derive(Debug, Metrics)] -#[metrics(prefix = "prover_fri")] -pub(crate) struct ProverFriMetrics { - pub proof_compressor_requeued_jobs: Counter, - #[metrics(labels = ["type", "protocol_version"])] - pub proof_compressor_jobs: LabeledFamily<(JobStatus, String), Gauge, 2>, - pub proof_compressor_oldest_uncompressed_batch: Gauge, -} - -#[vise::register] -pub(crate) static PROVER_FRI_METRICS: vise::Global = vise::Global::new(); - -#[derive(Debug, Clone, PartialEq, Eq, Hash, EncodeLabelSet)] -pub(crate) struct ProverJobsLabels { - pub r#type: &'static str, - pub circuit_id: String, - pub aggregation_round: String, - pub prover_group_id: String, - pub protocol_version: String, -} - -#[derive(Debug, Metrics)] -#[metrics(prefix = "fri_prover")] -pub(crate) struct FriProverMetrics { - pub prover_jobs: Family>, - #[metrics(labels = ["circuit_id", "aggregation_round"])] - pub block_number: LabeledFamily<(String, String), Gauge, 2>, - pub oldest_unpicked_batch: Gauge, - pub oldest_not_generated_batch: Gauge, - #[metrics(labels = ["round"])] - pub oldest_unprocessed_block_by_round: LabeledFamily>, -} - -impl FriProverMetrics { - pub fn report_prover_jobs( - &self, - r#type: &'static str, - circuit_id: u8, - aggregation_round: u8, - prover_group_id: u8, - protocol_version: ProtocolSemanticVersion, - amount: u64, - ) { - self.prover_jobs[&ProverJobsLabels { - r#type, - circuit_id: circuit_id.to_string(), - aggregation_round: aggregation_round.to_string(), - prover_group_id: prover_group_id.to_string(), - protocol_version: protocol_version.to_string(), - }] - .set(amount); - } -} - -#[vise::register] -pub(crate) static FRI_PROVER_METRICS: vise::Global = vise::Global::new(); - -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] -#[metrics(label = "type", rename_all = "snake_case")] -#[allow(clippy::enum_variant_names)] -pub(crate) enum WitnessType { - WitnessInputsFri, - LeafAggregationJobsFri, - NodeAggregationJobsFri, - RecursionTipJobsFri, - SchedulerJobsFri, -} - -impl From<&str> for WitnessType { - fn from(s: &str) -> Self { - match s { - "witness_inputs_fri" => Self::WitnessInputsFri, - "leaf_aggregations_jobs_fri" => Self::LeafAggregationJobsFri, - "node_aggregations_jobs_fri" => Self::NodeAggregationJobsFri, - "recursion_tip_jobs_fri" => Self::RecursionTipJobsFri, - "scheduler_jobs_fri" => Self::SchedulerJobsFri, - _ => panic!("Invalid witness type"), - } - } -} - -#[derive(Debug, Metrics)] -#[metrics(prefix = "server")] -pub(crate) struct ServerMetrics { - pub prover_fri_requeued_jobs: Counter, - pub requeued_jobs: Family>, - #[metrics(labels = ["type", "round", "protocol_version"])] - pub witness_generator_jobs_by_round: - LabeledFamily<(&'static str, String, String), Gauge, 3>, - #[metrics(labels = ["type", "protocol_version"])] - pub witness_generator_jobs: LabeledFamily<(&'static str, String), Gauge, 2>, - pub leaf_fri_witness_generator_waiting_to_queued_jobs_transitions: Counter, - pub node_fri_witness_generator_waiting_to_queued_jobs_transitions: Counter, - pub recursion_tip_witness_generator_waiting_to_queued_jobs_transitions: Counter, - pub scheduler_witness_generator_waiting_to_queued_jobs_transitions: Counter, -} - -#[vise::register] -pub(crate) static SERVER_METRICS: vise::Global = vise::Global::new(); diff --git a/core/node/house_keeper/src/prover/mod.rs b/core/node/house_keeper/src/prover/mod.rs deleted file mode 100644 index af315c53cb48..000000000000 --- a/core/node/house_keeper/src/prover/mod.rs +++ /dev/null @@ -1,14 +0,0 @@ -mod archiver; -mod metrics; -mod queue_reporter; -mod retry_manager; -mod waiting_to_queued_fri_witness_job_mover; - -pub use archiver::{FriGpuProverArchiver, FriProverJobsArchiver}; -pub use queue_reporter::{ - FriProofCompressorQueueReporter, FriProverQueueReporter, FriWitnessGeneratorQueueReporter, -}; -pub use retry_manager::{ - FriProofCompressorJobRetryManager, FriProverJobRetryManager, FriWitnessGeneratorJobRetryManager, -}; -pub use waiting_to_queued_fri_witness_job_mover::WaitingToQueuedFriWitnessJobMover; diff --git a/core/node/house_keeper/src/prover/queue_reporter/fri_proof_compressor_queue_reporter.rs b/core/node/house_keeper/src/prover/queue_reporter/fri_proof_compressor_queue_reporter.rs deleted file mode 100644 index c554bf4616d3..000000000000 --- a/core/node/house_keeper/src/prover/queue_reporter/fri_proof_compressor_queue_reporter.rs +++ /dev/null @@ -1,88 +0,0 @@ -use std::collections::HashMap; - -use async_trait::async_trait; -use zksync_dal::ConnectionPool; -use zksync_prover_dal::{Prover, ProverDal}; -use zksync_types::{protocol_version::ProtocolSemanticVersion, prover_dal::JobCountStatistics}; - -use crate::{ - periodic_job::PeriodicJob, - prover::metrics::{JobStatus, PROVER_FRI_METRICS}, -}; - -/// `FriProofCompressorQueueReporter` is a task that periodically reports compression jobs status. -/// Note: these values will be used for auto-scaling proof compressor -#[derive(Debug)] -pub struct FriProofCompressorQueueReporter { - reporting_interval_ms: u64, - pool: ConnectionPool, -} - -impl FriProofCompressorQueueReporter { - pub fn new(reporting_interval_ms: u64, pool: ConnectionPool) -> Self { - Self { - reporting_interval_ms, - pool, - } - } - - async fn get_job_statistics( - pool: &ConnectionPool, - ) -> HashMap { - pool.connection() - .await - .unwrap() - .fri_proof_compressor_dal() - .get_jobs_stats() - .await - } -} - -#[async_trait] -impl PeriodicJob for FriProofCompressorQueueReporter { - const SERVICE_NAME: &'static str = "FriProofCompressorQueueReporter"; - - async fn run_routine_task(&mut self) -> anyhow::Result<()> { - let stats = Self::get_job_statistics(&self.pool).await; - - for (protocol_version, stats) in &stats { - if stats.queued > 0 { - tracing::info!( - "Found {} free {} in progress proof compressor jobs for protocol version {}", - stats.queued, - stats.in_progress, - protocol_version - ); - } - - PROVER_FRI_METRICS.proof_compressor_jobs - [&(JobStatus::Queued, protocol_version.to_string())] - .set(stats.queued as u64); - - PROVER_FRI_METRICS.proof_compressor_jobs - [&(JobStatus::InProgress, protocol_version.to_string())] - .set(stats.in_progress as u64); - } - - let oldest_not_compressed_batch = self - .pool - .connection() - .await - .unwrap() - .fri_proof_compressor_dal() - .get_oldest_not_compressed_batch() - .await; - - if let Some(l1_batch_number) = oldest_not_compressed_batch { - PROVER_FRI_METRICS - .proof_compressor_oldest_uncompressed_batch - .set(l1_batch_number.0 as u64); - } - - Ok(()) - } - - fn polling_interval_ms(&self) -> u64 { - self.reporting_interval_ms - } -} diff --git a/core/node/house_keeper/src/prover/queue_reporter/fri_prover_queue_reporter.rs b/core/node/house_keeper/src/prover/queue_reporter/fri_prover_queue_reporter.rs deleted file mode 100644 index 12dfae86ab46..000000000000 --- a/core/node/house_keeper/src/prover/queue_reporter/fri_prover_queue_reporter.rs +++ /dev/null @@ -1,144 +0,0 @@ -use async_trait::async_trait; -use zksync_config::configs::fri_prover_group::FriProverGroupConfig; -use zksync_dal::{ConnectionPool, Core, CoreDal}; -use zksync_prover_dal::{Prover, ProverDal}; -use zksync_types::{basic_fri_types::CircuitIdRoundTuple, prover_dal::JobCountStatistics}; - -use crate::{periodic_job::PeriodicJob, prover::metrics::FRI_PROVER_METRICS}; -/// `FriProverQueueReporter` is a task that periodically reports prover jobs status. -/// Note: these values will be used for auto-scaling provers and Witness Vector Generators. -#[derive(Debug)] -pub struct FriProverQueueReporter { - reporting_interval_ms: u64, - prover_connection_pool: ConnectionPool, - db_connection_pool: ConnectionPool, - config: FriProverGroupConfig, -} - -impl FriProverQueueReporter { - pub fn new( - reporting_interval_ms: u64, - prover_connection_pool: ConnectionPool, - db_connection_pool: ConnectionPool, - config: FriProverGroupConfig, - ) -> Self { - Self { - reporting_interval_ms, - prover_connection_pool, - db_connection_pool, - config, - } - } -} - -#[async_trait] -impl PeriodicJob for FriProverQueueReporter { - const SERVICE_NAME: &'static str = "FriProverQueueReporter"; - - async fn run_routine_task(&mut self) -> anyhow::Result<()> { - let mut conn = self.prover_connection_pool.connection().await.unwrap(); - let stats = conn.fri_prover_jobs_dal().get_prover_jobs_stats().await; - - for (protocol_semantic_version, circuit_prover_stats) in stats { - for (tuple, stat) in circuit_prover_stats { - let CircuitIdRoundTuple { - circuit_id, - aggregation_round, - } = tuple; - let JobCountStatistics { - queued, - in_progress, - } = stat; - let group_id = self - .config - .get_group_id_for_circuit_id_and_aggregation_round( - circuit_id, - aggregation_round, - ) - .unwrap_or(u8::MAX); - - FRI_PROVER_METRICS.report_prover_jobs( - "queued", - circuit_id, - aggregation_round, - group_id, - protocol_semantic_version, - queued as u64, - ); - - FRI_PROVER_METRICS.report_prover_jobs( - "in_progress", - circuit_id, - aggregation_round, - group_id, - protocol_semantic_version, - in_progress as u64, - ); - } - } - - let lag_by_circuit_type = conn - .fri_prover_jobs_dal() - .min_unproved_l1_batch_number() - .await; - - for ((circuit_id, aggregation_round), l1_batch_number) in lag_by_circuit_type { - FRI_PROVER_METRICS.block_number - [&(circuit_id.to_string(), aggregation_round.to_string())] - .set(l1_batch_number.0 as u64); - } - - // FIXME: refactor metrics here - - let mut db_conn = self.db_connection_pool.connection().await.unwrap(); - - let oldest_unpicked_batch = match db_conn - .proof_generation_dal() - .get_oldest_unpicked_batch() - .await? - { - Some(l1_batch_number) => l1_batch_number.0 as u64, - // if there is no unpicked batch in database, we use sealed batch number as a result - None => { - db_conn - .blocks_dal() - .get_sealed_l1_batch_number() - .await - .unwrap() - .unwrap() - .0 as u64 - } - }; - FRI_PROVER_METRICS - .oldest_unpicked_batch - .set(oldest_unpicked_batch); - - if let Some(l1_batch_number) = db_conn - .proof_generation_dal() - .get_oldest_not_generated_batch() - .await? - { - FRI_PROVER_METRICS - .oldest_not_generated_batch - .set(l1_batch_number.0 as u64); - } - - for aggregation_round in 0..3 { - if let Some(l1_batch_number) = conn - .fri_prover_jobs_dal() - .min_unproved_l1_batch_number_for_aggregation_round(aggregation_round.into()) - .await - { - FRI_PROVER_METRICS.oldest_unprocessed_block_by_round - [&aggregation_round.to_string()] - .set(l1_batch_number.0 as u64); - } - } - - Ok(()) - } - - fn polling_interval_ms(&self) -> u64 { - self.reporting_interval_ms - } -} diff --git a/core/node/house_keeper/src/prover/queue_reporter/fri_witness_generator_queue_reporter.rs b/core/node/house_keeper/src/prover/queue_reporter/fri_witness_generator_queue_reporter.rs deleted file mode 100644 index cd124dffaf67..000000000000 --- a/core/node/house_keeper/src/prover/queue_reporter/fri_witness_generator_queue_reporter.rs +++ /dev/null @@ -1,131 +0,0 @@ -use std::collections::HashMap; - -use async_trait::async_trait; -use zksync_dal::ConnectionPool; -use zksync_prover_dal::{Prover, ProverDal}; -use zksync_types::{ - basic_fri_types::AggregationRound, protocol_version::ProtocolSemanticVersion, - prover_dal::JobCountStatistics, -}; - -use crate::{periodic_job::PeriodicJob, prover::metrics::SERVER_METRICS}; - -/// `FriWitnessGeneratorQueueReporter` is a task that periodically reports witness generator jobs status. -/// Note: these values will be used for auto-scaling witness generators (Basic, Leaf, Node, Recursion Tip and Scheduler). -#[derive(Debug)] -pub struct FriWitnessGeneratorQueueReporter { - reporting_interval_ms: u64, - pool: ConnectionPool, -} - -impl FriWitnessGeneratorQueueReporter { - pub fn new(pool: ConnectionPool, reporting_interval_ms: u64) -> Self { - Self { - reporting_interval_ms, - pool, - } - } - - async fn get_job_statistics( - &self, - ) -> HashMap<(AggregationRound, ProtocolSemanticVersion), JobCountStatistics> { - let mut conn = self.pool.connection().await.unwrap(); - let mut result = HashMap::new(); - result.extend( - conn.fri_witness_generator_dal() - .get_witness_jobs_stats(AggregationRound::BasicCircuits) - .await, - ); - result.extend( - conn.fri_witness_generator_dal() - .get_witness_jobs_stats(AggregationRound::LeafAggregation) - .await, - ); - result.extend( - conn.fri_witness_generator_dal() - .get_witness_jobs_stats(AggregationRound::NodeAggregation) - .await, - ); - result.extend( - conn.fri_witness_generator_dal() - .get_witness_jobs_stats(AggregationRound::RecursionTip) - .await, - ); - result.extend( - conn.fri_witness_generator_dal() - .get_witness_jobs_stats(AggregationRound::Scheduler) - .await, - ); - - result - } -} - -fn emit_metrics_for_round( - round: AggregationRound, - protocol_version: ProtocolSemanticVersion, - stats: &JobCountStatistics, -) { - if stats.queued > 0 || stats.in_progress > 0 { - tracing::trace!( - "Found {} free and {} in progress {:?} FRI witness generators jobs for protocol version {}", - stats.queued, - stats.in_progress, - round, - protocol_version - ); - } - - SERVER_METRICS.witness_generator_jobs_by_round[&( - "queued", - format!("{:?}", round), - protocol_version.to_string(), - )] - .set(stats.queued as u64); - SERVER_METRICS.witness_generator_jobs_by_round[&( - "in_progress", - format!("{:?}", round), - protocol_version.to_string(), - )] - .set(stats.in_progress as u64); -} - -#[async_trait] -impl PeriodicJob for FriWitnessGeneratorQueueReporter { - const SERVICE_NAME: &'static str = "FriWitnessGeneratorQueueReporter"; - - async fn run_routine_task(&mut self) -> anyhow::Result<()> { - let stats_for_all_rounds = self.get_job_statistics().await; - let mut aggregated = HashMap::::new(); - for ((round, protocol_version), stats) in stats_for_all_rounds { - emit_metrics_for_round(round, protocol_version, &stats); - - let entry = aggregated.entry(protocol_version).or_default(); - entry.queued += stats.queued; - entry.in_progress += stats.in_progress; - } - - for (protocol_version, stats) in &aggregated { - if stats.queued > 0 || stats.in_progress > 0 { - tracing::trace!( - "Found {} free {} in progress witness generators jobs for protocol version {}", - stats.queued, - stats.in_progress, - protocol_version - ); - } - - SERVER_METRICS.witness_generator_jobs[&("queued", protocol_version.to_string())] - .set(stats.queued as u64); - - SERVER_METRICS.witness_generator_jobs[&("in_progress", protocol_version.to_string())] - .set(stats.in_progress as u64); - } - - Ok(()) - } - - fn polling_interval_ms(&self) -> u64 { - self.reporting_interval_ms - } -} diff --git a/core/node/house_keeper/src/prover/queue_reporter/mod.rs b/core/node/house_keeper/src/prover/queue_reporter/mod.rs deleted file mode 100644 index 9eba45320988..000000000000 --- a/core/node/house_keeper/src/prover/queue_reporter/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -mod fri_proof_compressor_queue_reporter; -mod fri_prover_queue_reporter; -mod fri_witness_generator_queue_reporter; - -pub use fri_proof_compressor_queue_reporter::FriProofCompressorQueueReporter; -pub use fri_prover_queue_reporter::FriProverQueueReporter; -pub use fri_witness_generator_queue_reporter::FriWitnessGeneratorQueueReporter; diff --git a/core/node/house_keeper/src/prover/retry_manager/fri_proof_compressor_job_retry_manager.rs b/core/node/house_keeper/src/prover/retry_manager/fri_proof_compressor_job_retry_manager.rs deleted file mode 100644 index 4d4d8ceed75e..000000000000 --- a/core/node/house_keeper/src/prover/retry_manager/fri_proof_compressor_job_retry_manager.rs +++ /dev/null @@ -1,60 +0,0 @@ -use std::time::Duration; - -use async_trait::async_trait; -use zksync_dal::ConnectionPool; -use zksync_prover_dal::{Prover, ProverDal}; - -use crate::{periodic_job::PeriodicJob, prover::metrics::PROVER_FRI_METRICS}; - -/// `FriProofCompressorJobRetryManager` is a task that periodically queues stuck compressor jobs. -#[derive(Debug)] -pub struct FriProofCompressorJobRetryManager { - pool: ConnectionPool, - max_attempts: u32, - processing_timeout: Duration, - retry_interval_ms: u64, -} - -impl FriProofCompressorJobRetryManager { - pub fn new( - max_attempts: u32, - processing_timeout: Duration, - retry_interval_ms: u64, - pool: ConnectionPool, - ) -> Self { - Self { - max_attempts, - processing_timeout, - retry_interval_ms, - pool, - } - } -} - -#[async_trait] -impl PeriodicJob for FriProofCompressorJobRetryManager { - const SERVICE_NAME: &'static str = "FriProofCompressorJobRetryManager"; - - async fn run_routine_task(&mut self) -> anyhow::Result<()> { - let stuck_jobs = self - .pool - .connection() - .await - .unwrap() - .fri_proof_compressor_dal() - .requeue_stuck_jobs(self.processing_timeout, self.max_attempts) - .await; - let job_len = stuck_jobs.len(); - for stuck_job in stuck_jobs { - tracing::info!("re-queuing fri proof compressor job {:?}", stuck_job); - } - PROVER_FRI_METRICS - .proof_compressor_requeued_jobs - .inc_by(job_len as u64); - Ok(()) - } - - fn polling_interval_ms(&self) -> u64 { - self.retry_interval_ms - } -} diff --git a/core/node/house_keeper/src/prover/retry_manager/fri_prover_job_retry_manager.rs b/core/node/house_keeper/src/prover/retry_manager/fri_prover_job_retry_manager.rs deleted file mode 100644 index 755944d21634..000000000000 --- a/core/node/house_keeper/src/prover/retry_manager/fri_prover_job_retry_manager.rs +++ /dev/null @@ -1,60 +0,0 @@ -use std::time::Duration; - -use async_trait::async_trait; -use zksync_dal::ConnectionPool; -use zksync_prover_dal::{Prover, ProverDal}; - -use crate::{periodic_job::PeriodicJob, prover::metrics::SERVER_METRICS}; - -/// `FriProverJobRetryManager` is a task that periodically queues stuck prover jobs. -#[derive(Debug)] -pub struct FriProverJobRetryManager { - pool: ConnectionPool, - max_attempts: u32, - processing_timeout: Duration, - retry_interval_ms: u64, -} - -impl FriProverJobRetryManager { - pub fn new( - max_attempts: u32, - processing_timeout: Duration, - retry_interval_ms: u64, - pool: ConnectionPool, - ) -> Self { - Self { - max_attempts, - processing_timeout, - retry_interval_ms, - pool, - } - } -} - -#[async_trait] -impl PeriodicJob for FriProverJobRetryManager { - const SERVICE_NAME: &'static str = "FriProverJobRetryManager"; - - async fn run_routine_task(&mut self) -> anyhow::Result<()> { - let stuck_jobs = self - .pool - .connection() - .await - .unwrap() - .fri_prover_jobs_dal() - .requeue_stuck_jobs(self.processing_timeout, self.max_attempts) - .await; - let job_len = stuck_jobs.len(); - for stuck_job in stuck_jobs { - tracing::info!("re-queuing fri prover job {:?}", stuck_job); - } - SERVER_METRICS - .prover_fri_requeued_jobs - .inc_by(job_len as u64); - Ok(()) - } - - fn polling_interval_ms(&self) -> u64 { - self.retry_interval_ms - } -} diff --git a/core/node/house_keeper/src/prover/retry_manager/fri_witness_generator_jobs_retry_manager.rs b/core/node/house_keeper/src/prover/retry_manager/fri_witness_generator_jobs_retry_manager.rs deleted file mode 100644 index b3d990e2754f..000000000000 --- a/core/node/house_keeper/src/prover/retry_manager/fri_witness_generator_jobs_retry_manager.rs +++ /dev/null @@ -1,124 +0,0 @@ -use async_trait::async_trait; -use zksync_config::configs::fri_witness_generator::WitnessGenerationTimeouts; -use zksync_dal::ConnectionPool; -use zksync_prover_dal::{Prover, ProverDal}; -use zksync_types::prover_dal::StuckJobs; - -use crate::{ - periodic_job::PeriodicJob, - prover::metrics::{WitnessType, SERVER_METRICS}, -}; - -/// `FriWitnessGeneratorJobRetryManager` is a task that periodically queues stuck prover jobs. -#[derive(Debug)] -pub struct FriWitnessGeneratorJobRetryManager { - pool: ConnectionPool, - max_attempts: u32, - processing_timeouts: WitnessGenerationTimeouts, - retry_interval_ms: u64, -} - -impl FriWitnessGeneratorJobRetryManager { - pub fn new( - max_attempts: u32, - processing_timeouts: WitnessGenerationTimeouts, - retry_interval_ms: u64, - pool: ConnectionPool, - ) -> Self { - Self { - max_attempts, - processing_timeouts, - retry_interval_ms, - pool, - } - } - - pub fn emit_telemetry(&self, witness_type: &str, stuck_jobs: &Vec) { - for stuck_job in stuck_jobs { - tracing::info!("re-queuing {:?} {:?}", witness_type, stuck_job); - } - SERVER_METRICS.requeued_jobs[&WitnessType::from(witness_type)] - .inc_by(stuck_jobs.len() as u64); - } - - pub async fn requeue_stuck_witness_inputs_jobs(&mut self) { - let stuck_jobs = self - .pool - .connection() - .await - .unwrap() - .fri_witness_generator_dal() - .requeue_stuck_basic_jobs(self.processing_timeouts.basic(), self.max_attempts) - .await; - self.emit_telemetry("witness_inputs_fri", &stuck_jobs); - } - - pub async fn requeue_stuck_leaf_aggregations_jobs(&mut self) { - let stuck_jobs = self - .pool - .connection() - .await - .unwrap() - .fri_witness_generator_dal() - .requeue_stuck_leaf_jobs(self.processing_timeouts.leaf(), self.max_attempts) - .await; - self.emit_telemetry("leaf_aggregations_jobs_fri", &stuck_jobs); - } - - pub async fn requeue_stuck_node_aggregations_jobs(&mut self) { - let stuck_jobs = self - .pool - .connection() - .await - .unwrap() - .fri_witness_generator_dal() - .requeue_stuck_node_jobs(self.processing_timeouts.node(), self.max_attempts) - .await; - self.emit_telemetry("node_aggregations_jobs_fri", &stuck_jobs); - } - - pub async fn requeue_stuck_recursion_tip_jobs(&mut self) { - let stuck_jobs = self - .pool - .connection() - .await - .unwrap() - .fri_witness_generator_dal() - .requeue_stuck_recursion_tip_jobs( - self.processing_timeouts.recursion_tip(), - self.max_attempts, - ) - .await; - self.emit_telemetry("recursion_tip_jobs_fri", &stuck_jobs); - } - - pub async fn requeue_stuck_scheduler_jobs(&mut self) { - let stuck_jobs = self - .pool - .connection() - .await - .unwrap() - .fri_witness_generator_dal() - .requeue_stuck_scheduler_jobs(self.processing_timeouts.scheduler(), self.max_attempts) - .await; - self.emit_telemetry("scheduler_jobs_fri", &stuck_jobs); - } -} - -#[async_trait] -impl PeriodicJob for FriWitnessGeneratorJobRetryManager { - const SERVICE_NAME: &'static str = "FriWitnessGeneratorJobRetryManager"; - - async fn run_routine_task(&mut self) -> anyhow::Result<()> { - self.requeue_stuck_witness_inputs_jobs().await; - self.requeue_stuck_leaf_aggregations_jobs().await; - self.requeue_stuck_node_aggregations_jobs().await; - self.requeue_stuck_recursion_tip_jobs().await; - self.requeue_stuck_scheduler_jobs().await; - Ok(()) - } - - fn polling_interval_ms(&self) -> u64 { - self.retry_interval_ms - } -} diff --git a/core/node/house_keeper/src/prover/retry_manager/mod.rs b/core/node/house_keeper/src/prover/retry_manager/mod.rs deleted file mode 100644 index 3b4a8b584817..000000000000 --- a/core/node/house_keeper/src/prover/retry_manager/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -mod fri_proof_compressor_job_retry_manager; -mod fri_prover_job_retry_manager; -mod fri_witness_generator_jobs_retry_manager; - -pub use fri_proof_compressor_job_retry_manager::FriProofCompressorJobRetryManager; -pub use fri_prover_job_retry_manager::FriProverJobRetryManager; -pub use fri_witness_generator_jobs_retry_manager::FriWitnessGeneratorJobRetryManager; diff --git a/core/node/house_keeper/src/prover/waiting_to_queued_fri_witness_job_mover.rs b/core/node/house_keeper/src/prover/waiting_to_queued_fri_witness_job_mover.rs deleted file mode 100644 index d4d5edc78eb9..000000000000 --- a/core/node/house_keeper/src/prover/waiting_to_queued_fri_witness_job_mover.rs +++ /dev/null @@ -1,127 +0,0 @@ -use async_trait::async_trait; -use zksync_dal::ConnectionPool; -use zksync_prover_dal::{Prover, ProverDal}; - -use crate::{periodic_job::PeriodicJob, prover::metrics::SERVER_METRICS}; - -#[derive(Debug)] -pub struct WaitingToQueuedFriWitnessJobMover { - job_moving_interval_ms: u64, - pool: ConnectionPool, -} - -impl WaitingToQueuedFriWitnessJobMover { - pub fn new(job_mover_interval_ms: u64, pool: ConnectionPool) -> Self { - Self { - job_moving_interval_ms: job_mover_interval_ms, - pool, - } - } - - async fn move_leaf_aggregation_jobs(&mut self) { - let mut conn = self.pool.connection().await.unwrap(); - let l1_batch_numbers = conn - .fri_witness_generator_dal() - .move_leaf_aggregation_jobs_from_waiting_to_queued() - .await; - let len = l1_batch_numbers.len(); - for (l1_batch_number, circuit_id) in l1_batch_numbers { - tracing::info!( - "Marked fri leaf aggregation job for l1_batch {} and circuit_id {} as queued", - l1_batch_number, - circuit_id - ); - } - - SERVER_METRICS - .node_fri_witness_generator_waiting_to_queued_jobs_transitions - .inc_by(len as u64); - } - - async fn move_node_aggregation_jobs_from_waiting_to_queued(&mut self) -> Vec<(i64, u8, u16)> { - let mut conn = self.pool.connection().await.unwrap(); - let mut jobs = conn - .fri_witness_generator_dal() - .move_depth_zero_node_aggregation_jobs() - .await; - jobs.extend( - conn.fri_witness_generator_dal() - .move_depth_non_zero_node_aggregation_jobs() - .await, - ); - jobs - } - - async fn move_node_aggregation_jobs(&mut self) { - let l1_batch_numbers = self - .move_node_aggregation_jobs_from_waiting_to_queued() - .await; - let len = l1_batch_numbers.len(); - for (l1_batch_number, circuit_id, depth) in l1_batch_numbers { - tracing::info!( - "Marked fri node aggregation job for l1_batch {} and circuit_id {} depth {} as queued", - l1_batch_number, - circuit_id, - depth - ); - } - SERVER_METRICS - .leaf_fri_witness_generator_waiting_to_queued_jobs_transitions - .inc_by(len as u64); - } - - /// Marks recursion tip witness jobs as queued. - /// The trigger condition is all final node proving jobs for the batch have been completed. - async fn move_recursion_tip_jobs(&mut self) { - let mut conn = self.pool.connection().await.unwrap(); - let l1_batch_numbers = conn - .fri_witness_generator_dal() - .move_recursion_tip_jobs_from_waiting_to_queued() - .await; - for l1_batch_number in &l1_batch_numbers { - tracing::info!( - "Marked fri recursion tip witness job for l1_batch {} as queued", - l1_batch_number, - ); - } - SERVER_METRICS - .recursion_tip_witness_generator_waiting_to_queued_jobs_transitions - .inc_by(l1_batch_numbers.len() as u64); - } - - /// Marks scheduler witness jobs as queued. - /// The trigger condition is the recursion tip proving job for the batch has been completed. - async fn move_scheduler_jobs(&mut self) { - let mut conn = self.pool.connection().await.unwrap(); - let l1_batch_numbers = conn - .fri_witness_generator_dal() - .move_scheduler_jobs_from_waiting_to_queued() - .await; - for l1_batch_number in &l1_batch_numbers { - tracing::info!( - "Marked fri scheduler witness job for l1_batch {} as queued", - l1_batch_number, - ); - } - SERVER_METRICS - .scheduler_witness_generator_waiting_to_queued_jobs_transitions - .inc_by(l1_batch_numbers.len() as u64); - } -} - -#[async_trait] -impl PeriodicJob for WaitingToQueuedFriWitnessJobMover { - const SERVICE_NAME: &'static str = "WaitingToQueuedFriWitnessJobMover"; - - async fn run_routine_task(&mut self) -> anyhow::Result<()> { - self.move_leaf_aggregation_jobs().await; - self.move_node_aggregation_jobs().await; - self.move_recursion_tip_jobs().await; - self.move_scheduler_jobs().await; - Ok(()) - } - - fn polling_interval_ms(&self) -> u64 { - self.job_moving_interval_ms - } -} diff --git a/core/node/node_framework/Cargo.toml b/core/node/node_framework/Cargo.toml index f9efb22bd610..fe4889225675 100644 --- a/core/node/node_framework/Cargo.toml +++ b/core/node/node_framework/Cargo.toml @@ -16,7 +16,6 @@ zksync_vlog.workspace = true zksync_types.workspace = true zksync_health_check.workspace = true zksync_dal.workspace = true -zksync_prover_dal.workspace = true zksync_db_connection.workspace = true zksync_config.workspace = true zksync_protobuf_config.workspace = true diff --git a/core/node/node_framework/src/implementations/layers/house_keeper.rs b/core/node/node_framework/src/implementations/layers/house_keeper.rs index 74314320d815..1e2bc568d50f 100644 --- a/core/node/node_framework/src/implementations/layers/house_keeper.rs +++ b/core/node/node_framework/src/implementations/layers/house_keeper.rs @@ -1,20 +1,10 @@ -use zksync_config::configs::{ - fri_prover_group::FriProverGroupConfig, house_keeper::HouseKeeperConfig, - FriProofCompressorConfig, FriProverConfig, FriWitnessGeneratorConfig, -}; +use zksync_config::configs::house_keeper::HouseKeeperConfig; use zksync_house_keeper::{ - blocks_state_reporter::L1BatchMetricsReporter, - periodic_job::PeriodicJob, - prover::{ - FriGpuProverArchiver, FriProofCompressorJobRetryManager, FriProofCompressorQueueReporter, - FriProverJobRetryManager, FriProverJobsArchiver, FriProverQueueReporter, - FriWitnessGeneratorJobRetryManager, FriWitnessGeneratorQueueReporter, - WaitingToQueuedFriWitnessJobMover, - }, + blocks_state_reporter::L1BatchMetricsReporter, periodic_job::PeriodicJob, }; use crate::{ - implementations::resources::pools::{PoolResource, ProverPool, ReplicaPool}, + implementations::resources::pools::{PoolResource, ReplicaPool}, service::StopReceiver, task::{Task, TaskId}, wiring_layer::{WiringError, WiringLayer}, @@ -26,17 +16,12 @@ use crate::{ #[derive(Debug)] pub struct HouseKeeperLayer { house_keeper_config: HouseKeeperConfig, - fri_prover_config: FriProverConfig, - fri_witness_generator_config: FriWitnessGeneratorConfig, - fri_prover_group_config: FriProverGroupConfig, - fri_proof_compressor_config: FriProofCompressorConfig, } #[derive(Debug, FromContext)] #[context(crate = crate)] pub struct Input { pub replica_pool: PoolResource, - pub prover_pool: PoolResource, } #[derive(Debug, IntoContext)] @@ -44,40 +29,12 @@ pub struct Input { pub struct Output { #[context(task)] pub l1_batch_metrics_reporter: L1BatchMetricsReporter, - #[context(task)] - pub fri_prover_job_retry_manager: FriProverJobRetryManager, - #[context(task)] - pub fri_witness_generator_job_retry_manager: FriWitnessGeneratorJobRetryManager, - #[context(task)] - pub waiting_to_queued_fri_witness_job_mover: WaitingToQueuedFriWitnessJobMover, - #[context(task)] - pub fri_prover_job_archiver: Option, - #[context(task)] - pub fri_prover_gpu_archiver: Option, - #[context(task)] - pub fri_witness_generator_stats_reporter: FriWitnessGeneratorQueueReporter, - #[context(task)] - pub fri_prover_stats_reporter: FriProverQueueReporter, - #[context(task)] - pub fri_proof_compressor_stats_reporter: FriProofCompressorQueueReporter, - #[context(task)] - pub fri_proof_compressor_job_retry_manager: FriProofCompressorJobRetryManager, } impl HouseKeeperLayer { - pub fn new( - house_keeper_config: HouseKeeperConfig, - fri_prover_config: FriProverConfig, - fri_witness_generator_config: FriWitnessGeneratorConfig, - fri_prover_group_config: FriProverGroupConfig, - fri_proof_compressor_config: FriProofCompressorConfig, - ) -> Self { + pub fn new(house_keeper_config: HouseKeeperConfig) -> Self { Self { house_keeper_config, - fri_prover_config, - fri_witness_generator_config, - fri_prover_group_config, - fri_proof_compressor_config, } } } @@ -94,7 +51,6 @@ impl WiringLayer for HouseKeeperLayer { async fn wire(self, input: Self::Input) -> Result { // Initialize resources let replica_pool = input.replica_pool.get().await?; - let prover_pool = input.prover_pool.get().await?; // Initialize and add tasks let l1_batch_metrics_reporter = L1BatchMetricsReporter::new( @@ -103,78 +59,8 @@ impl WiringLayer for HouseKeeperLayer { replica_pool.clone(), ); - let fri_prover_job_retry_manager = FriProverJobRetryManager::new( - self.fri_prover_config.max_attempts, - self.fri_prover_config.proof_generation_timeout(), - self.house_keeper_config.prover_job_retrying_interval_ms, - prover_pool.clone(), - ); - - let fri_witness_gen_job_retry_manager = FriWitnessGeneratorJobRetryManager::new( - self.fri_witness_generator_config.max_attempts, - self.fri_witness_generator_config - .witness_generation_timeouts(), - self.house_keeper_config - .witness_generator_job_retrying_interval_ms, - prover_pool.clone(), - ); - - let waiting_to_queued_fri_witness_job_mover = WaitingToQueuedFriWitnessJobMover::new( - self.house_keeper_config.witness_job_moving_interval_ms, - prover_pool.clone(), - ); - - let fri_prover_job_archiver = self.house_keeper_config.prover_job_archiver_params().map( - |(archiving_interval, archive_after)| { - FriProverJobsArchiver::new(prover_pool.clone(), archiving_interval, archive_after) - }, - ); - - let fri_prover_gpu_archiver = self - .house_keeper_config - .fri_gpu_prover_archiver_params() - .map(|(archiving_interval, archive_after)| { - FriGpuProverArchiver::new(prover_pool.clone(), archiving_interval, archive_after) - }); - - let fri_witness_generator_stats_reporter = FriWitnessGeneratorQueueReporter::new( - prover_pool.clone(), - self.house_keeper_config - .witness_generator_stats_reporting_interval_ms, - ); - - let fri_prover_stats_reporter = FriProverQueueReporter::new( - self.house_keeper_config.prover_stats_reporting_interval_ms, - prover_pool.clone(), - replica_pool.clone(), - self.fri_prover_group_config, - ); - - let fri_proof_compressor_stats_reporter = FriProofCompressorQueueReporter::new( - self.house_keeper_config - .proof_compressor_stats_reporting_interval_ms, - prover_pool.clone(), - ); - - let fri_proof_compressor_retry_manager = FriProofCompressorJobRetryManager::new( - self.fri_proof_compressor_config.max_attempts, - self.fri_proof_compressor_config.generation_timeout(), - self.house_keeper_config - .proof_compressor_job_retrying_interval_ms, - prover_pool.clone(), - ); - Ok(Output { l1_batch_metrics_reporter, - fri_prover_job_retry_manager, - fri_witness_generator_job_retry_manager: fri_witness_gen_job_retry_manager, - waiting_to_queued_fri_witness_job_mover, - fri_prover_job_archiver, - fri_prover_gpu_archiver, - fri_witness_generator_stats_reporter, - fri_prover_stats_reporter, - fri_proof_compressor_stats_reporter, - fri_proof_compressor_job_retry_manager: fri_proof_compressor_retry_manager, }) } } @@ -189,102 +75,3 @@ impl Task for L1BatchMetricsReporter { (*self).run(stop_receiver.0).await } } - -#[async_trait::async_trait] -impl Task for FriProverJobRetryManager { - fn id(&self) -> TaskId { - "fri_prover_job_retry_manager".into() - } - - async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - (*self).run(stop_receiver.0).await - } -} - -#[async_trait::async_trait] -impl Task for FriWitnessGeneratorJobRetryManager { - fn id(&self) -> TaskId { - "fri_witness_generator_job_retry_manager".into() - } - - async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - (*self).run(stop_receiver.0).await - } -} - -#[async_trait::async_trait] -impl Task for WaitingToQueuedFriWitnessJobMover { - fn id(&self) -> TaskId { - "waiting_to_queued_fri_witness_job_mover".into() - } - - async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - (*self).run(stop_receiver.0).await - } -} - -#[async_trait::async_trait] -impl Task for FriWitnessGeneratorQueueReporter { - fn id(&self) -> TaskId { - "fri_witness_generator_queue_reporter".into() - } - - async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - (*self).run(stop_receiver.0).await - } -} - -#[async_trait::async_trait] -impl Task for FriProverQueueReporter { - fn id(&self) -> TaskId { - "fri_prover_queue_reporter".into() - } - - async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - (*self).run(stop_receiver.0).await - } -} - -#[async_trait::async_trait] -impl Task for FriProofCompressorQueueReporter { - fn id(&self) -> TaskId { - "fri_proof_compressor_queue_reporter".into() - } - - async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - (*self).run(stop_receiver.0).await - } -} - -#[async_trait::async_trait] -impl Task for FriProofCompressorJobRetryManager { - fn id(&self) -> TaskId { - "fri_proof_compressor_job_retry_manager".into() - } - - async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - (*self).run(stop_receiver.0).await - } -} - -#[async_trait::async_trait] -impl Task for FriProverJobsArchiver { - fn id(&self) -> TaskId { - "fri_prover_jobs_archiver".into() - } - - async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - (*self).run(stop_receiver.0).await - } -} - -#[async_trait::async_trait] -impl Task for FriGpuProverArchiver { - fn id(&self) -> TaskId { - "fri_gpu_prover_archiver".into() - } - - async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - (*self).run(stop_receiver.0).await - } -} diff --git a/core/node/node_framework/src/implementations/layers/pools_layer.rs b/core/node/node_framework/src/implementations/layers/pools_layer.rs index 734f6f0ccf69..e03cf40ce12d 100644 --- a/core/node/node_framework/src/implementations/layers/pools_layer.rs +++ b/core/node/node_framework/src/implementations/layers/pools_layer.rs @@ -2,7 +2,7 @@ use zksync_config::configs::{DatabaseSecrets, PostgresConfig}; use zksync_dal::{ConnectionPool, Core}; use crate::{ - implementations::resources::pools::{MasterPool, PoolResource, ProverPool, ReplicaPool}, + implementations::resources::pools::{MasterPool, PoolResource, ReplicaPool}, wiring_layer::{WiringError, WiringLayer}, IntoContext, }; @@ -13,7 +13,6 @@ pub struct PoolsLayerBuilder { config: PostgresConfig, with_master: bool, with_replica: bool, - with_prover: bool, secrets: DatabaseSecrets, } @@ -25,7 +24,6 @@ impl PoolsLayerBuilder { config, with_master: false, with_replica: false, - with_prover: false, secrets: database_secrets, } } @@ -42,12 +40,6 @@ impl PoolsLayerBuilder { self } - /// Allows to enable the prover pool. - pub fn with_prover(mut self, with_prover: bool) -> Self { - self.with_prover = with_prover; - self - } - /// Builds the [`PoolsLayer`] with the provided configuration. pub fn build(self) -> PoolsLayer { PoolsLayer { @@ -55,7 +47,6 @@ impl PoolsLayerBuilder { secrets: self.secrets, with_master: self.with_master, with_replica: self.with_replica, - with_prover: self.with_prover, } } } @@ -67,14 +58,12 @@ impl PoolsLayerBuilder { /// /// - `PoolResource::` (if master pool is enabled) /// - `PoolResource::` (if replica pool is enabled) -/// - `PoolResource::` (if prover pool is enabled) #[derive(Debug)] pub struct PoolsLayer { config: PostgresConfig, secrets: DatabaseSecrets, with_master: bool, with_replica: bool, - with_prover: bool, } #[derive(Debug, IntoContext)] @@ -82,7 +71,6 @@ pub struct PoolsLayer { pub struct Output { pub master_pool: Option>, pub replica_pool: Option>, - pub prover_pool: Option>, } #[async_trait::async_trait] @@ -95,7 +83,7 @@ impl WiringLayer for PoolsLayer { } async fn wire(self, _input: Self::Input) -> Result { - if !self.with_master && !self.with_replica && !self.with_prover { + if !self.with_master && !self.with_replica { return Err(WiringError::Configuration( "At least one pool should be enabled".to_string(), )); @@ -137,21 +125,9 @@ impl WiringLayer for PoolsLayer { None }; - let prover_pool = if self.with_prover { - Some(PoolResource::::new( - self.secrets.prover_url()?, - self.config.max_connections()?, - None, - None, - )) - } else { - None - }; - Ok(Output { master_pool, replica_pool, - prover_pool, }) } } diff --git a/core/node/node_framework/src/implementations/resources/pools.rs b/core/node/node_framework/src/implementations/resources/pools.rs index 75f5d22e3570..3c4d1d4712be 100644 --- a/core/node/node_framework/src/implementations/resources/pools.rs +++ b/core/node/node_framework/src/implementations/resources/pools.rs @@ -9,7 +9,6 @@ use std::{ use tokio::sync::Mutex; use zksync_dal::{ConnectionPool, Core}; use zksync_db_connection::connection_pool::ConnectionPoolBuilder; -use zksync_prover_dal::Prover; use zksync_types::url::SensitiveUrl; use crate::resource::Resource; @@ -122,10 +121,6 @@ pub struct MasterPool {} #[non_exhaustive] pub struct ReplicaPool {} -#[derive(Debug, Clone)] -#[non_exhaustive] -pub struct ProverPool {} - pub trait PoolKind: Clone + Sync + Send + 'static { type DbMarker: zksync_db_connection::connection::DbMarker; @@ -147,11 +142,3 @@ impl PoolKind for ReplicaPool { "replica" } } - -impl PoolKind for ProverPool { - type DbMarker = Prover; - - fn kind_str() -> &'static str { - "prover" - } -} diff --git a/etc/env/base/house_keeper.toml b/etc/env/base/house_keeper.toml index 9596f63d062f..6f86561d1c60 100644 --- a/etc/env/base/house_keeper.toml +++ b/etc/env/base/house_keeper.toml @@ -1,15 +1,2 @@ [house_keeper] l1_batch_metrics_reporting_interval_ms = 10000 -gpu_prover_queue_reporting_interval_ms = 10000 -witness_generator_stats_reporting_interval_ms = 10000 -witness_job_moving_interval_ms = 40000 -prover_job_retrying_interval_ms = 30000 -witness_generator_job_retrying_interval_ms = 30000 -prover_db_pool_size = 2 -prover_stats_reporting_interval_ms = 50000 -proof_compressor_job_retrying_interval_ms = 30000 -proof_compressor_stats_reporting_interval_ms = 10000 -prover_job_archiver_archiving_interval_ms = 1800000 -prover_job_archiver_archive_after_secs = 172800 -fri_gpu_prover_archiver_archiving_interval_ms = 86400000 -fri_gpu_prover_archiver_archive_after_secs = 172800 \ No newline at end of file diff --git a/etc/env/file_based/general.yaml b/etc/env/file_based/general.yaml index 064a3b447b9c..864bff15dedf 100644 --- a/etc/env/file_based/general.yaml +++ b/etc/env/file_based/general.yaml @@ -303,19 +303,6 @@ external_price_api_client: house_keeper: l1_batch_metrics_reporting_interval_ms: 10000 - gpu_prover_queue_reporting_interval_ms: 10000 - prover_job_retrying_interval_ms: 30000 - witness_generator_job_retrying_interval_ms: 30000 - witness_generator_stats_reporting_interval_ms: 10000 - witness_job_moving_interval_ms: 40000 - prover_db_pool_size: 2 - prover_stats_reporting_interval_ms: 5000 - proof_compressor_job_retrying_interval_ms: 30000 - proof_compressor_stats_reporting_interval_ms: 10000 - prover_job_archiver_archiving_interval_ms: 1800000 - prover_job_archiver_archive_after_secs: 172800 - fri_gpu_prover_archiver_archiving_interval_ms: 86400000 - fri_gpu_prover_archiver_archive_after_secs: 172800 prometheus: listener_port: 3314 diff --git a/prover/crates/lib/prover_dal/src/fri_prover_dal.rs b/prover/crates/lib/prover_dal/src/fri_prover_dal.rs index c2dadae58d0b..4e68154290da 100644 --- a/prover/crates/lib/prover_dal/src/fri_prover_dal.rs +++ b/prover/crates/lib/prover_dal/src/fri_prover_dal.rs @@ -476,33 +476,6 @@ impl FriProverDal<'_, '_> { } } - pub async fn min_unproved_l1_batch_number_for_aggregation_round( - &mut self, - aggregation_round: AggregationRound, - ) -> Option { - sqlx::query!( - r#" - SELECT - l1_batch_number - FROM - prover_jobs_fri - WHERE - status <> 'skipped' - AND status <> 'successful' - AND aggregation_round = $1 - ORDER BY - l1_batch_number ASC - LIMIT - 1 - "#, - aggregation_round as i16 - ) - .fetch_optional(self.storage.conn()) - .await - .unwrap() - .map(|row| L1BatchNumber(row.l1_batch_number as u32)) - } - pub async fn update_status(&mut self, id: u32, status: &str) { sqlx::query!( r#" From f26016661e01e5bcbc44c0752dcc033de16f0ed7 Mon Sep 17 00:00:00 2001 From: Vlad Bochok <41153528+vladbochok@users.noreply.github.com> Date: Wed, 4 Sep 2024 11:20:18 +0200 Subject: [PATCH 016/116] chore: Add stage/stage-proofs/testnet/mainnet upgrade calldata (#2436) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --------- Co-authored-by: perekopskiy Co-authored-by: perekopskiy <53865202+perekopskiy@users.noreply.github.com> --- .../common.json | 5 ++ .../mainnet/transactions.json | 72 +++++++++++++++++++ .../stage-proofs/transactions.json | 72 +++++++++++++++++++ .../stage/transactions.json | 72 +++++++++++++++++++ .../testnet/transactions.json | 72 +++++++++++++++++++ .../protocol-upgrade/src/transaction.ts | 19 ++++- 6 files changed, 311 insertions(+), 1 deletion(-) create mode 100644 etc/upgrades/1720794961-decentralize-governance/common.json create mode 100644 etc/upgrades/1720794961-decentralize-governance/mainnet/transactions.json create mode 100644 etc/upgrades/1720794961-decentralize-governance/stage-proofs/transactions.json create mode 100644 etc/upgrades/1720794961-decentralize-governance/stage/transactions.json create mode 100644 etc/upgrades/1720794961-decentralize-governance/testnet/transactions.json diff --git a/etc/upgrades/1720794961-decentralize-governance/common.json b/etc/upgrades/1720794961-decentralize-governance/common.json new file mode 100644 index 000000000000..655d2c435f59 --- /dev/null +++ b/etc/upgrades/1720794961-decentralize-governance/common.json @@ -0,0 +1,5 @@ +{ + "name": "decentralize-governance", + "creationTimestamp": 1720794961, + "protocolVersion": "0.24.2" +} \ No newline at end of file diff --git a/etc/upgrades/1720794961-decentralize-governance/mainnet/transactions.json b/etc/upgrades/1720794961-decentralize-governance/mainnet/transactions.json new file mode 100644 index 000000000000..cd292b92d4ca --- /dev/null +++ b/etc/upgrades/1720794961-decentralize-governance/mainnet/transactions.json @@ -0,0 +1,72 @@ +{ + "proposeUpgradeTx": { + "l2ProtocolUpgradeTx": { + "txType": 0, + "from": "0x0000000000000000000000000000000000000000", + "to": "0x0000000000000000000000000000000000000000", + "gasLimit": 0, + "gasPerPubdataByteLimit": 0, + "maxFeePerGas": 0, + "maxPriorityFeePerGas": 0, + "paymaster": 0, + "nonce": 0, + "value": 0, + "reserved": [ + 0, + 0, + 0, + 0 + ], + "data": "0x", + "signature": "0x", + "factoryDeps": [], + "paymasterInput": "0x", + "reservedDynamic": "0x" + }, + "bootloaderHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "defaultAccountHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "verifier": "0x0000000000000000000000000000000000000000", + "verifierParams": { + "recursionNodeLevelVkHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "recursionLeafLevelVkHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "recursionCircuitsSetVksHash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "l1ContractsUpgradeCalldata": "0x", + "postUpgradeCalldata": "0x", + "upgradeTimestamp": { + "type": "BigNumber", + "hex": "0x66ab923f" + }, + "factoryDeps": [], + "newProtocolVersion": 103079215106, + "newAllowList": "0x0000000000000000000000000000000000000000" + }, + "l1upgradeCalldata": "0x08284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066ab923f0000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "upgradeAddress": "0x4d376798Ba8F69cEd59642c3AE8687c7457e855d", + "protocolVersionSemVer": "0.24.2", + "packedProtocolVersion": 103079215106, + "upgradeTimestamp": "1722520127", + "stmScheduleTransparentOperation": "0x2c4319170000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000020000000000000000000000000c2ee6b6af7d616f6e27ce7f4a451aedc2b0f5f5c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000006442e52285100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000001800000001ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff000000000000000000000000000000000000000000000000000000180000000200000000000000000000000000000000000000000000000000000000000000600000000000000000000000004d376798ba8f69ced59642c3ae8687c7457e855d00000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066ab923f0000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "stmExecuteOperation": "0x74da756b000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000020000000000000000000000000c2ee6b6af7d616f6e27ce7f4a451aedc2b0f5f5c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000006442e52285100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000001800000001ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff000000000000000000000000000000000000000000000000000000180000000200000000000000000000000000000000000000000000000000000000000000600000000000000000000000004d376798ba8f69ced59642c3ae8687c7457e855d00000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066ab923f0000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "scheduleTransparentOperation": "0x2c431917000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000002000000000000000000000000032400084c286cf3e17e7b677ea9583e60a000324000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000604fc57565f0000000000000000000000000000000000000000000000000000001800000001000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000600000000000000000000000004d376798ba8f69ced59642c3ae8687c7457e855d00000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066ab923f0000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "executeOperation": "0x74da756b00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000002000000000000000000000000032400084c286cf3e17e7b677ea9583e60a000324000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000604fc57565f0000000000000000000000000000000000000000000000000000001800000001000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000600000000000000000000000004d376798ba8f69ced59642c3ae8687c7457e855d00000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066ab923f0000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "newExecuteChainUpgradeCalldata": "0x69340beb000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000002000000000000000000000000032400084c286cf3e17e7b677ea9583e60a000324000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000604fc57565f0000000000000000000000000000000000000000000000000000001800000001000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000600000000000000000000000004d376798ba8f69ced59642c3ae8687c7457e855d00000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066ab923f0000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "diamondCut": { + "facetCuts": [], + "initAddress": "0x4d376798Ba8F69cEd59642c3AE8687c7457e855d", + "initCalldata": "0x08284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066ab923f0000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + }, + "governanceOperation": { + "calls": [ + { + "target": "0x32400084C286CF3E17e7B677ea9583e60a000324", + "value": 0, + "data": "0xfc57565f0000000000000000000000000000000000000000000000000000001800000001000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000600000000000000000000000004d376798ba8f69ced59642c3ae8687c7457e855d00000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066ab923f0000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + } + ], + "predecessor": "0x0000000000000000000000000000000000000000000000000000000000000000", + "salt": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "legacyScheduleOperation": "0x2c431917000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000002000000000000000000000000032400084c286cf3e17e7b677ea9583e60a0003240000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000005e4a9f6d941000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000600000000000000000000000004d376798ba8f69ced59642c3ae8687c7457e855d00000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066ab923f0000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "legacyExecuteOperation": "0x74da756b00000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000002000000000000000000000000032400084c286cf3e17e7b677ea9583e60a0003240000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000005e4a9f6d941000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000600000000000000000000000004d376798ba8f69ced59642c3ae8687c7457e855d00000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066ab923f0000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" +} \ No newline at end of file diff --git a/etc/upgrades/1720794961-decentralize-governance/stage-proofs/transactions.json b/etc/upgrades/1720794961-decentralize-governance/stage-proofs/transactions.json new file mode 100644 index 000000000000..61abc87f040b --- /dev/null +++ b/etc/upgrades/1720794961-decentralize-governance/stage-proofs/transactions.json @@ -0,0 +1,72 @@ +{ + "proposeUpgradeTx": { + "l2ProtocolUpgradeTx": { + "txType": 0, + "from": "0x0000000000000000000000000000000000000000", + "to": "0x0000000000000000000000000000000000000000", + "gasLimit": 0, + "gasPerPubdataByteLimit": 0, + "maxFeePerGas": 0, + "maxPriorityFeePerGas": 0, + "paymaster": 0, + "nonce": 0, + "value": 0, + "reserved": [ + 0, + 0, + 0, + 0 + ], + "data": "0x", + "signature": "0x", + "factoryDeps": [], + "paymasterInput": "0x", + "reservedDynamic": "0x" + }, + "bootloaderHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "defaultAccountHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "verifier": "0x0000000000000000000000000000000000000000", + "verifierParams": { + "recursionNodeLevelVkHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "recursionLeafLevelVkHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "recursionCircuitsSetVksHash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "l1ContractsUpgradeCalldata": "0x", + "postUpgradeCalldata": "0x", + "upgradeTimestamp": { + "type": "BigNumber", + "hex": "0x66a375e5" + }, + "factoryDeps": [], + "newProtocolVersion": 103079215106, + "newAllowList": "0x0000000000000000000000000000000000000000" + }, + "l1upgradeCalldata": "0x08284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066a375e50000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "upgradeAddress": "0xF08528979Aedd80cC2cF8d1Ba9396790909Ed7B1", + "protocolVersionSemVer": "0.24.2", + "packedProtocolVersion": 103079215106, + "upgradeTimestamp": "1721988581", + "stmScheduleTransparentOperation": "0x2c4319170000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000020000000000000000000000000925dd0bc14552b0b261ca8a23ad26df9c6f2c8ba0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000006442e52285100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000001800000001ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00000000000000000000000000000000000000000000000000000018000000020000000000000000000000000000000000000000000000000000000000000060000000000000000000000000f08528979aedd80cc2cf8d1ba9396790909ed7b100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066a375e50000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "stmExecuteOperation": "0x74da756b000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000020000000000000000000000000925dd0bc14552b0b261ca8a23ad26df9c6f2c8ba0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000006442e52285100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000001800000001ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00000000000000000000000000000000000000000000000000000018000000020000000000000000000000000000000000000000000000000000000000000060000000000000000000000000f08528979aedd80cc2cf8d1ba9396790909ed7b100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066a375e50000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "scheduleTransparentOperation": "0x2c43191700000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000200000000000000000000000005bbdede0f0bac61aa64068b60379fe32ecc0f96c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000604fc57565f000000000000000000000000000000000000000000000000000000180000000100000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000060000000000000000000000000f08528979aedd80cc2cf8d1ba9396790909ed7b100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066a375e50000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "executeOperation": "0x74da756b0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000200000000000000000000000005bbdede0f0bac61aa64068b60379fe32ecc0f96c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000604fc57565f000000000000000000000000000000000000000000000000000000180000000100000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000060000000000000000000000000f08528979aedd80cc2cf8d1ba9396790909ed7b100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066a375e50000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "newExecuteChainUpgradeCalldata": "0x69340beb00000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000200000000000000000000000005bbdede0f0bac61aa64068b60379fe32ecc0f96c000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000604fc57565f000000000000000000000000000000000000000000000000000000180000000100000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000060000000000000000000000000f08528979aedd80cc2cf8d1ba9396790909ed7b100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066a375e50000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "diamondCut": { + "facetCuts": [], + "initAddress": "0xF08528979Aedd80cC2cF8d1Ba9396790909Ed7B1", + "initCalldata": "0x08284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066a375e50000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + }, + "governanceOperation": { + "calls": [ + { + "target": "0x5BBdEDe0F0bAc61AA64068b60379fe32ecc0F96C", + "value": 0, + "data": "0xfc57565f000000000000000000000000000000000000000000000000000000180000000100000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000060000000000000000000000000f08528979aedd80cc2cf8d1ba9396790909ed7b100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066a375e50000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + } + ], + "predecessor": "0x0000000000000000000000000000000000000000000000000000000000000000", + "salt": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "legacyScheduleOperation": "0x2c43191700000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000200000000000000000000000005bbdede0f0bac61aa64068b60379fe32ecc0f96c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000005e4a9f6d94100000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000060000000000000000000000000f08528979aedd80cc2cf8d1ba9396790909ed7b100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066a375e50000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "legacyExecuteOperation": "0x74da756b0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000200000000000000000000000005bbdede0f0bac61aa64068b60379fe32ecc0f96c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000005e4a9f6d94100000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000060000000000000000000000000f08528979aedd80cc2cf8d1ba9396790909ed7b100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066a375e50000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" +} \ No newline at end of file diff --git a/etc/upgrades/1720794961-decentralize-governance/stage/transactions.json b/etc/upgrades/1720794961-decentralize-governance/stage/transactions.json new file mode 100644 index 000000000000..ff030ae9f0d7 --- /dev/null +++ b/etc/upgrades/1720794961-decentralize-governance/stage/transactions.json @@ -0,0 +1,72 @@ +{ + "proposeUpgradeTx": { + "l2ProtocolUpgradeTx": { + "txType": 0, + "from": "0x0000000000000000000000000000000000000000", + "to": "0x0000000000000000000000000000000000000000", + "gasLimit": 0, + "gasPerPubdataByteLimit": 0, + "maxFeePerGas": 0, + "maxPriorityFeePerGas": 0, + "paymaster": 0, + "nonce": 0, + "value": 0, + "reserved": [ + 0, + 0, + 0, + 0 + ], + "data": "0x", + "signature": "0x", + "factoryDeps": [], + "paymasterInput": "0x", + "reservedDynamic": "0x" + }, + "bootloaderHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "defaultAccountHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "verifier": "0x0000000000000000000000000000000000000000", + "verifierParams": { + "recursionNodeLevelVkHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "recursionLeafLevelVkHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "recursionCircuitsSetVksHash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "l1ContractsUpgradeCalldata": "0x", + "postUpgradeCalldata": "0x", + "upgradeTimestamp": { + "type": "BigNumber", + "hex": "0x669123f2" + }, + "factoryDeps": [], + "newProtocolVersion": 103079215106, + "newAllowList": "0x0000000000000000000000000000000000000000" + }, + "l1upgradeCalldata": "0x08284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000669123f20000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "upgradeAddress": "0xF08528979Aedd80cC2cF8d1Ba9396790909Ed7B1", + "protocolVersionSemVer": "0.24.2", + "packedProtocolVersion": 103079215106, + "upgradeTimestamp": "1720787954", + "stmScheduleTransparentOperation": "0x2c43191700000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000200000000000000000000000008b448ac7cd0f18f3d8464e2645575772a26a3b6b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000006442e52285100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000001800000001ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00000000000000000000000000000000000000000000000000000018000000020000000000000000000000000000000000000000000000000000000000000060000000000000000000000000f08528979aedd80cc2cf8d1ba9396790909ed7b100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000669123f20000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "stmExecuteOperation": "0x74da756b0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000200000000000000000000000008b448ac7cd0f18f3d8464e2645575772a26a3b6b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000006442e52285100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000001800000001ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00000000000000000000000000000000000000000000000000000018000000020000000000000000000000000000000000000000000000000000000000000060000000000000000000000000f08528979aedd80cc2cf8d1ba9396790909ed7b100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000669123f20000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "scheduleTransparentOperation": "0x2c43191700000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000200000000000000000000000006d6e010a2680e2e5a3b097ce411528b36d880ef6000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000604fc57565f000000000000000000000000000000000000000000000000000000180000000100000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000060000000000000000000000000f08528979aedd80cc2cf8d1ba9396790909ed7b100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000669123f20000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "executeOperation": "0x74da756b0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000200000000000000000000000006d6e010a2680e2e5a3b097ce411528b36d880ef6000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000604fc57565f000000000000000000000000000000000000000000000000000000180000000100000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000060000000000000000000000000f08528979aedd80cc2cf8d1ba9396790909ed7b100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000669123f20000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "newExecuteChainUpgradeCalldata": "0x69340beb00000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000200000000000000000000000006d6e010a2680e2e5a3b097ce411528b36d880ef6000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000604fc57565f000000000000000000000000000000000000000000000000000000180000000100000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000060000000000000000000000000f08528979aedd80cc2cf8d1ba9396790909ed7b100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000669123f20000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "diamondCut": { + "facetCuts": [], + "initAddress": "0xF08528979Aedd80cC2cF8d1Ba9396790909Ed7B1", + "initCalldata": "0x08284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000669123f20000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + }, + "governanceOperation": { + "calls": [ + { + "target": "0x6d6e010A2680E2E5a3b097ce411528b36d880EF6", + "value": 0, + "data": "0xfc57565f000000000000000000000000000000000000000000000000000000180000000100000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000060000000000000000000000000f08528979aedd80cc2cf8d1ba9396790909ed7b100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000669123f20000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + } + ], + "predecessor": "0x0000000000000000000000000000000000000000000000000000000000000000", + "salt": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "legacyScheduleOperation": "0x2c43191700000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000200000000000000000000000006d6e010a2680e2e5a3b097ce411528b36d880ef60000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000005e4a9f6d94100000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000060000000000000000000000000f08528979aedd80cc2cf8d1ba9396790909ed7b100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000669123f20000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "legacyExecuteOperation": "0x74da756b0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000200000000000000000000000006d6e010a2680e2e5a3b097ce411528b36d880ef60000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000005e4a9f6d94100000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000060000000000000000000000000f08528979aedd80cc2cf8d1ba9396790909ed7b100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c000000000000000000000000000000000000000000000000000000000669123f20000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" +} \ No newline at end of file diff --git a/etc/upgrades/1720794961-decentralize-governance/testnet/transactions.json b/etc/upgrades/1720794961-decentralize-governance/testnet/transactions.json new file mode 100644 index 000000000000..19187138aec3 --- /dev/null +++ b/etc/upgrades/1720794961-decentralize-governance/testnet/transactions.json @@ -0,0 +1,72 @@ +{ + "proposeUpgradeTx": { + "l2ProtocolUpgradeTx": { + "txType": 0, + "from": "0x0000000000000000000000000000000000000000", + "to": "0x0000000000000000000000000000000000000000", + "gasLimit": 0, + "gasPerPubdataByteLimit": 0, + "maxFeePerGas": 0, + "maxPriorityFeePerGas": 0, + "paymaster": 0, + "nonce": 0, + "value": 0, + "reserved": [ + 0, + 0, + 0, + 0 + ], + "data": "0x", + "signature": "0x", + "factoryDeps": [], + "paymasterInput": "0x", + "reservedDynamic": "0x" + }, + "bootloaderHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "defaultAccountHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "verifier": "0x0000000000000000000000000000000000000000", + "verifierParams": { + "recursionNodeLevelVkHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "recursionLeafLevelVkHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "recursionCircuitsSetVksHash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "l1ContractsUpgradeCalldata": "0x", + "postUpgradeCalldata": "0x", + "upgradeTimestamp": { + "type": "BigNumber", + "hex": "0x66a21f2e" + }, + "factoryDeps": [], + "newProtocolVersion": 103079215106, + "newAllowList": "0x0000000000000000000000000000000000000000" + }, + "l1upgradeCalldata": "0x08284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066a21f2e0000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "upgradeAddress": "0xF08528979Aedd80cC2cF8d1Ba9396790909Ed7B1", + "protocolVersionSemVer": "0.24.2", + "packedProtocolVersion": 103079215106, + "upgradeTimestamp": "1721900846", + "stmScheduleTransparentOperation": "0x2c43191700000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000200000000000000000000000004e39e90746a9ee410a8ce173c7b96d3afed444a50000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000006442e52285100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000001800000001ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00000000000000000000000000000000000000000000000000000018000000020000000000000000000000000000000000000000000000000000000000000060000000000000000000000000f08528979aedd80cc2cf8d1ba9396790909ed7b100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066a21f2e0000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "stmExecuteOperation": "0x74da756b0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000200000000000000000000000004e39e90746a9ee410a8ce173c7b96d3afed444a50000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000006442e52285100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000001800000001ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00000000000000000000000000000000000000000000000000000018000000020000000000000000000000000000000000000000000000000000000000000060000000000000000000000000f08528979aedd80cc2cf8d1ba9396790909ed7b100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066a21f2e0000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "scheduleTransparentOperation": "0x2c43191700000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000200000000000000000000000009a6de0f62aa270a8bcb1e2610078650d539b1ef9000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000604fc57565f000000000000000000000000000000000000000000000000000000180000000100000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000060000000000000000000000000f08528979aedd80cc2cf8d1ba9396790909ed7b100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066a21f2e0000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "executeOperation": "0x74da756b0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000200000000000000000000000009a6de0f62aa270a8bcb1e2610078650d539b1ef9000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000604fc57565f000000000000000000000000000000000000000000000000000000180000000100000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000060000000000000000000000000f08528979aedd80cc2cf8d1ba9396790909ed7b100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066a21f2e0000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "newExecuteChainUpgradeCalldata": "0x69340beb00000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000200000000000000000000000009a6de0f62aa270a8bcb1e2610078650d539b1ef9000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000604fc57565f000000000000000000000000000000000000000000000000000000180000000100000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000060000000000000000000000000f08528979aedd80cc2cf8d1ba9396790909ed7b100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066a21f2e0000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "diamondCut": { + "facetCuts": [], + "initAddress": "0xF08528979Aedd80cC2cF8d1Ba9396790909Ed7B1", + "initCalldata": "0x08284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066a21f2e0000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + }, + "governanceOperation": { + "calls": [ + { + "target": "0x9A6DE0f62Aa270A8bCB1e2610078650D539B1Ef9", + "value": 0, + "data": "0xfc57565f000000000000000000000000000000000000000000000000000000180000000100000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000060000000000000000000000000f08528979aedd80cc2cf8d1ba9396790909ed7b100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066a21f2e0000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + } + ], + "predecessor": "0x0000000000000000000000000000000000000000000000000000000000000000", + "salt": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "legacyScheduleOperation": "0x2c43191700000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000200000000000000000000000009a6de0f62aa270a8bcb1e2610078650d539b1ef90000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000005e4a9f6d94100000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000060000000000000000000000000f08528979aedd80cc2cf8d1ba9396790909ed7b100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066a21f2e0000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "legacyExecuteOperation": "0x74da756b0000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000200000000000000000000000009a6de0f62aa270a8bcb1e2610078650d539b1ef90000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000005e4a9f6d94100000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000060000000000000000000000000f08528979aedd80cc2cf8d1ba9396790909ed7b100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000050408284e5700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000048000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000004a000000000000000000000000000000000000000000000000000000000000004c00000000000000000000000000000000000000000000000000000000066a21f2e0000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000260000000000000000000000000000000000000000000000000000000000000028000000000000000000000000000000000000000000000000000000000000002a000000000000000000000000000000000000000000000000000000000000002c000000000000000000000000000000000000000000000000000000000000002e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" +} \ No newline at end of file diff --git a/infrastructure/protocol-upgrade/src/transaction.ts b/infrastructure/protocol-upgrade/src/transaction.ts index 4aaed4186d75..dfea3a3bfc35 100644 --- a/infrastructure/protocol-upgrade/src/transaction.ts +++ b/infrastructure/protocol-upgrade/src/transaction.ts @@ -4,7 +4,8 @@ import { DefaultUpgradeFactory as DefaultUpgradeFactoryL1, AdminFacetFactory, GovernanceFactory, - StateTransitionManagerFactory + StateTransitionManagerFactory, + ChainAdminFactory } from 'l1-contracts/typechain'; import { FacetCut } from 'l1-contracts/src.ts/diamondCut'; import { IZkSyncFactory } from '../pre-boojum/IZkSyncFactory'; @@ -207,6 +208,19 @@ function prepareGovernanceTxs(target: string, data: BytesLike): GovernanceTx { }; } +function prepareChainAdminCalldata(target: string, data: BytesLike): string { + const call = { + target: target, + value: 0, + data: data + }; + + const chainAdmin = new ChainAdminFactory(); + const calldata = chainAdmin.interface.encodeFunctionData('multicall', [[call], true]); + + return calldata; +} + export function prepareTransparentUpgradeCalldataForNewGovernance( oldProtocolVersion, oldProtocolVersionDeadline, @@ -249,6 +263,8 @@ export function prepareTransparentUpgradeCalldataForNewGovernance( operation: governanceOperation } = prepareGovernanceTxs(zksyncAddress, diamondProxyUpgradeCalldata); + const newExecuteChainUpgradeCalldata = prepareChainAdminCalldata(zksyncAddress, diamondProxyUpgradeCalldata); + const legacyScheduleTransparentOperation = adminFacet.interface.encodeFunctionData('executeUpgrade', [diamondCut]); const { scheduleCalldata: legacyScheduleOperation, executeCalldata: legacyExecuteOperation } = prepareGovernanceTxs( zksyncAddress, @@ -260,6 +276,7 @@ export function prepareTransparentUpgradeCalldataForNewGovernance( stmExecuteOperation, scheduleTransparentOperation, executeOperation, + newExecuteChainUpgradeCalldata, diamondCut, governanceOperation, legacyScheduleOperation, From b4b07f35697aa4c16326974270a6e0890ef8cdac Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Wed, 4 Sep 2024 13:36:58 +0300 Subject: [PATCH 017/116] docs(en): Mention treeless mode in snapshot recovery (#2771) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Mentions treeless mode / tree data fetcher in the snapshot recovery guide. ## Why ❔ Makes the treeless mode more discoverable. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Code has been formatted via `zk fmt` and `zk lint`. --- docs/guides/external-node/07_snapshots_recovery.md | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/docs/guides/external-node/07_snapshots_recovery.md b/docs/guides/external-node/07_snapshots_recovery.md index 99de05ff2c11..dfbc7a5366c5 100644 --- a/docs/guides/external-node/07_snapshots_recovery.md +++ b/docs/guides/external-node/07_snapshots_recovery.md @@ -16,7 +16,10 @@ Recovery from a snapshot consists of several parts. to take about 1 hour on the mainnet. - **Merkle tree** recovery starts once Postgres is fully recovered. Merkle tree recovery can take about 3 hours on the mainnet. Ordinarily, Merkle tree recovery is a blocker for node synchronization; i.e., the node will not process - blocks newer than the snapshot block until the Merkle tree is recovered. + blocks newer than the snapshot block until the Merkle tree is recovered. If the [treeless mode](10_treeless_mode.md) + is enabled, tree recovery is not performed, and the node will start catching up blocks immediately after Postgres + recovery. This is still true if the tree data fetcher is enabled _together_ with a Merkle tree; tree recovery is + asynchronous in this case. - Recovering RocksDB-based **VM state cache** is concurrent with Merkle tree recovery and also depends on Postgres recovery. It takes about 1 hour on the mainnet. Unlike Merkle tree recovery, VM state cache is not necessary for node operation (the node will get the state from Postgres is if it is absent), although it considerably speeds up VM @@ -24,7 +27,8 @@ Recovery from a snapshot consists of several parts. After Postgres recovery is completed, the node becomes operational, providing Web3 API etc. It still needs some time to catch up executing blocks after the snapshot (i.e, roughly several hours worth of blocks / transactions). This may take -order of 1–2 hours on the mainnet. In total, recovery process and catch-up thus should take roughly 5–6 hours. +order of 1–2 hours on the mainnet. In total, recovery process and catch-up thus should take roughly 5–6 hours with a +Merkle tree, or 3–4 hours in the treeless mode / with a tree data fetcher. ## Current limitations From da5cafe41b9ab0d4fd64779382987433903b1226 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mat=C3=ADas=20Ignacio=20Gonz=C3=A1lez?= Date: Wed, 4 Sep 2024 11:16:24 -0300 Subject: [PATCH 018/116] feat(zk_toolbox): Migrate CI unit tests to zk_toolbox (#2759) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Migrate CI unit tests to zk_toolbox --- .github/workflows/ci-core-reusable.yml | 11 +-- .../zk_supervisor/src/commands/contracts.rs | 24 ++++-- .../zk_supervisor/src/commands/test/rust.rs | 76 +++++++++++++------ zk_toolbox/crates/zk_supervisor/src/dals.rs | 55 +------------- .../crates/zk_supervisor/src/defaults.rs | 4 + zk_toolbox/crates/zk_supervisor/src/main.rs | 1 + .../crates/zk_supervisor/src/messages.rs | 2 + 7 files changed, 82 insertions(+), 91 deletions(-) create mode 100644 zk_toolbox/crates/zk_supervisor/src/defaults.rs diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index 62bd76dd0efc..c6e3dc31d65e 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -52,22 +52,19 @@ jobs: - name: Init run: | - ci_run zk ci_run run_retried rustup show - ci_run zk run yarn - ci_run zk db setup - ci_run zk compiler all - ci_run zk contract build + ci_run ./bin/zkt + ci_run zk_supervisor contracts - name: Contracts unit tests run: ci_run yarn l1-contracts test - name: Rust unit tests run: | - ci_run zk test rust + ci_run zk_supervisor test rust # Benchmarks are not tested by `cargo nextest` unless specified explicitly, and even then `criterion` harness is incompatible # with how `cargo nextest` runs tests. Thus, we run criterion-based benchmark tests manually. - ci_run zk f cargo test --release -p vm-benchmark --bench oneshot --bench batch + ci_run cargo test --release -p vm-benchmark --bench oneshot --bench batch loadtest: runs-on: [ matterlabs-ci-runner-high-performance ] diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/contracts.rs b/zk_toolbox/crates/zk_supervisor/src/commands/contracts.rs index 0c635b2b0d34..bab4205cd66f 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/contracts.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/contracts.rs @@ -1,16 +1,16 @@ use std::path::PathBuf; -use clap::{Parser, ValueEnum}; +use clap::Parser; use common::{cmd::Cmd, logger, spinner::Spinner}; use config::EcosystemConfig; -use strum::EnumIter; use xshell::{cmd, Shell}; use crate::messages::{ MSG_BUILDING_CONTRACTS, MSG_BUILDING_CONTRACTS_SUCCESS, MSG_BUILDING_L1_CONTRACTS_SPINNER, MSG_BUILDING_L2_CONTRACTS_SPINNER, MSG_BUILDING_SYSTEM_CONTRACTS_SPINNER, - MSG_BUILD_L1_CONTRACTS_HELP, MSG_BUILD_L2_CONTRACTS_HELP, MSG_BUILD_SYSTEM_CONTRACTS_HELP, - MSG_CONTRACTS_DEPS_SPINNER, MSG_NOTHING_TO_BUILD_MSG, + MSG_BUILDING_TEST_CONTRACTS_SPINNER, MSG_BUILD_L1_CONTRACTS_HELP, MSG_BUILD_L2_CONTRACTS_HELP, + MSG_BUILD_SYSTEM_CONTRACTS_HELP, MSG_BUILD_TEST_CONTRACTS_HELP, MSG_CONTRACTS_DEPS_SPINNER, + MSG_NOTHING_TO_BUILD_MSG, }; #[derive(Debug, Parser)] @@ -21,6 +21,8 @@ pub struct ContractsArgs { pub l2_contracts: Option, #[clap(long, alias = "sc", help = MSG_BUILD_SYSTEM_CONTRACTS_HELP, default_missing_value = "true", num_args = 0..=1)] pub system_contracts: Option, + #[clap(long, alias = "test", help = MSG_BUILD_TEST_CONTRACTS_HELP, default_missing_value = "true", num_args = 0..=1)] + pub test_contracts: Option, } impl ContractsArgs { @@ -28,11 +30,13 @@ impl ContractsArgs { if self.l1_contracts.is_none() && self.l2_contracts.is_none() && self.system_contracts.is_none() + && self.test_contracts.is_none() { return vec![ ContractType::L1, ContractType::L2, ContractType::SystemContracts, + ContractType::TestContracts, ]; } @@ -47,17 +51,20 @@ impl ContractsArgs { if self.system_contracts.unwrap_or(false) { contracts.push(ContractType::SystemContracts); } + if self.test_contracts.unwrap_or(false) { + contracts.push(ContractType::TestContracts); + } contracts } } -#[derive(Debug, ValueEnum, EnumIter, strum::Display, PartialEq, Eq, Clone, Copy)] -#[strum(serialize_all = "lowercase")] +#[derive(Debug, PartialEq, Eq, Clone, Copy)] pub enum ContractType { L1, L2, SystemContracts, + TestContracts, } #[derive(Debug)] @@ -85,6 +92,11 @@ impl ContractBuilder { cmd: "yarn sc build".to_string(), msg: MSG_BUILDING_SYSTEM_CONTRACTS_SPINNER.to_string(), }, + ContractType::TestContracts => Self { + dir: ecosystem.link_to_code.join("etc/contracts-test-data"), + cmd: "yarn build".to_string(), + msg: MSG_BUILDING_TEST_CONTRACTS_SPINNER.to_string(), + }, } } diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/rust.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/rust.rs index 59c86743291d..ad1318cfa768 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/rust.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/rust.rs @@ -1,12 +1,16 @@ +use std::{path::Path, str::FromStr}; + use anyhow::Context; -use common::{cmd::Cmd, config::global_config, db::wait_for_db, logger}; +use common::{cmd::Cmd, db::wait_for_db, logger}; use config::EcosystemConfig; +use url::Url; use xshell::{cmd, Shell}; use super::args::rust::RustArgs; use crate::{ commands::database, - dals::get_test_dals, + dals::{Dal, CORE_DAL_PATH, PROVER_DAL_PATH}, + defaults::{TEST_DATABASE_PROVER_URL, TEST_DATABASE_SERVER_URL}, messages::{ MSG_CARGO_NEXTEST_MISSING_ERR, MSG_CHAIN_NOT_FOUND_ERR, MSG_POSTGRES_CONFIG_NOT_FOUND_ERR, MSG_RESETTING_TEST_DATABASES, MSG_UNIT_TESTS_RUN_SUCCESS, MSG_USING_CARGO_NEXTEST, @@ -17,16 +21,45 @@ pub async fn run(shell: &Shell, args: RustArgs) -> anyhow::Result<()> { let ecosystem = EcosystemConfig::from_file(shell)?; let chain = ecosystem .clone() - .load_chain(global_config().chain_name.clone()) + .load_chain(Some(ecosystem.default_chain)) .context(MSG_CHAIN_NOT_FOUND_ERR)?; - let general_config = chain.get_general_config()?; - let postgres = general_config - .postgres_config - .context(MSG_POSTGRES_CONFIG_NOT_FOUND_ERR)?; + let general_config = chain.get_general_config(); + let link_to_code = ecosystem.link_to_code; + + let (test_server_url, test_prover_url) = if let Ok(general_config) = general_config { + let postgres = general_config + .postgres_config + .context(MSG_POSTGRES_CONFIG_NOT_FOUND_ERR)?; + + ( + postgres + .test_server_url + .context(MSG_POSTGRES_CONFIG_NOT_FOUND_ERR)?, + postgres + .test_prover_url + .context(MSG_POSTGRES_CONFIG_NOT_FOUND_ERR)?, + ) + } else { + ( + TEST_DATABASE_SERVER_URL.to_string(), + TEST_DATABASE_PROVER_URL.to_string(), + ) + }; + + let dals = vec![ + Dal { + url: Url::from_str(&test_server_url.clone())?, + path: CORE_DAL_PATH.to_string(), + }, + Dal { + url: Url::from_str(&test_prover_url.clone())?, + path: PROVER_DAL_PATH.to_string(), + }, + ]; - reset_test_databases(shell).await?; + reset_test_databases(shell, &link_to_code, dals).await?; - let _dir_guard = shell.push_dir(&ecosystem.link_to_code); + let _dir_guard = shell.push_dir(&link_to_code); let cmd = if nextest_is_installed(shell)? { logger::info(MSG_USING_CARGO_NEXTEST); @@ -43,18 +76,8 @@ pub async fn run(shell: &Shell, args: RustArgs) -> anyhow::Result<()> { }; let cmd = cmd - .env( - "TEST_DATABASE_URL", - postgres - .test_server_url - .context(MSG_POSTGRES_CONFIG_NOT_FOUND_ERR)?, - ) - .env( - "TEST_PROVER_DATABASE_URL", - postgres - .test_prover_url - .context(MSG_POSTGRES_CONFIG_NOT_FOUND_ERR)?, - ); + .env("TEST_DATABASE_URL", test_server_url) + .env("TEST_PROVER_DATABASE_URL", test_prover_url); cmd.run()?; logger::outro(MSG_UNIT_TESTS_RUN_SUCCESS); @@ -70,9 +93,12 @@ fn nextest_is_installed(shell: &Shell) -> anyhow::Result { Ok(out.contains("cargo-nextest")) } -async fn reset_test_databases(shell: &Shell) -> anyhow::Result<()> { +async fn reset_test_databases( + shell: &Shell, + link_to_code: &Path, + dals: Vec, +) -> anyhow::Result<()> { logger::info(MSG_RESETTING_TEST_DATABASES); - let ecosystem = EcosystemConfig::from_file(shell)?; Cmd::new(cmd!( shell, @@ -85,11 +111,11 @@ async fn reset_test_databases(shell: &Shell) -> anyhow::Result<()> { )) .run()?; - for dal in get_test_dals(shell)? { + for dal in dals { let mut url = dal.url.clone(); url.set_path(""); wait_for_db(&url, 3).await?; - database::reset::reset_database(shell, ecosystem.link_to_code.clone(), dal.clone()).await?; + database::reset::reset_database(shell, link_to_code, dal.clone()).await?; } Ok(()) diff --git a/zk_toolbox/crates/zk_supervisor/src/dals.rs b/zk_toolbox/crates/zk_supervisor/src/dals.rs index a8600a2665e6..f9c07585f6dd 100644 --- a/zk_toolbox/crates/zk_supervisor/src/dals.rs +++ b/zk_toolbox/crates/zk_supervisor/src/dals.rs @@ -1,5 +1,3 @@ -use std::str::FromStr; - use anyhow::{anyhow, Context}; use common::config::global_config; use config::{EcosystemConfig, SecretsConfig}; @@ -11,8 +9,8 @@ use crate::{ messages::{MSG_CHAIN_NOT_FOUND_ERR, MSG_DATABASE_MUST_BE_PRESENTED}, }; -const CORE_DAL_PATH: &str = "core/lib/dal"; -const PROVER_DAL_PATH: &str = "prover/crates/lib/prover_dal"; +pub const CORE_DAL_PATH: &str = "core/lib/dal"; +pub const PROVER_DAL_PATH: &str = "prover/crates/lib/prover_dal"; #[derive(Debug, Clone)] pub struct SelectedDals { @@ -50,10 +48,6 @@ pub fn get_dals( Ok(dals) } -pub fn get_test_dals(shell: &Shell) -> anyhow::Result> { - Ok(vec![get_test_prover_dal(shell)?, get_test_core_dal(shell)?]) -} - pub fn get_prover_dal(shell: &Shell, url: Option) -> anyhow::Result { let url = if let Some(url) = url { Url::parse(&url)? @@ -94,51 +88,6 @@ pub fn get_core_dal(shell: &Shell, url: Option) -> anyhow::Result { }) } -pub fn get_test_core_dal(shell: &Shell) -> anyhow::Result { - let general_config = get_general_config(shell)?; - let postgres = general_config - .postgres_config - .context(MSG_DATABASE_MUST_BE_PRESENTED)?; - - let url = Url::from_str( - &postgres - .test_server_url - .clone() - .context(MSG_DATABASE_MUST_BE_PRESENTED)?, - )?; - Ok(Dal { - path: CORE_DAL_PATH.to_string(), - url, - }) -} - -pub fn get_test_prover_dal(shell: &Shell) -> anyhow::Result { - let general_config = get_general_config(shell)?; - let postgres = general_config - .postgres_config - .context(MSG_DATABASE_MUST_BE_PRESENTED)?; - - let url = Url::from_str( - &postgres - .test_prover_url - .clone() - .context(MSG_DATABASE_MUST_BE_PRESENTED)?, - )?; - - Ok(Dal { - path: PROVER_DAL_PATH.to_string(), - url, - }) -} - -fn get_general_config(shell: &Shell) -> anyhow::Result { - let ecosystem_config = EcosystemConfig::from_file(shell)?; - let chain_config = ecosystem_config - .load_chain(global_config().chain_name.clone()) - .context(MSG_CHAIN_NOT_FOUND_ERR)?; - chain_config.get_general_config() -} - fn get_secrets(shell: &Shell) -> anyhow::Result { let ecosystem_config = EcosystemConfig::from_file(shell)?; let chain_config = ecosystem_config diff --git a/zk_toolbox/crates/zk_supervisor/src/defaults.rs b/zk_toolbox/crates/zk_supervisor/src/defaults.rs new file mode 100644 index 000000000000..f4bae739c2d1 --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/defaults.rs @@ -0,0 +1,4 @@ +pub const TEST_DATABASE_SERVER_URL: &str = + "postgres://postgres:notsecurepassword@localhost:5433/zksync_local_test"; +pub const TEST_DATABASE_PROVER_URL: &str = + "postgres://postgres:notsecurepassword@localhost:5433/prover_local_test"; diff --git a/zk_toolbox/crates/zk_supervisor/src/main.rs b/zk_toolbox/crates/zk_supervisor/src/main.rs index 6b5bfa46943e..a8722787b5ff 100644 --- a/zk_toolbox/crates/zk_supervisor/src/main.rs +++ b/zk_toolbox/crates/zk_supervisor/src/main.rs @@ -21,6 +21,7 @@ use crate::commands::{clean::CleanCommands, fmt::FmtArgs}; mod commands; mod dals; +mod defaults; mod messages; #[derive(Parser, Debug)] diff --git a/zk_toolbox/crates/zk_supervisor/src/messages.rs b/zk_toolbox/crates/zk_supervisor/src/messages.rs index 17f01e664678..ff9cc104a505 100644 --- a/zk_toolbox/crates/zk_supervisor/src/messages.rs +++ b/zk_toolbox/crates/zk_supervisor/src/messages.rs @@ -112,10 +112,12 @@ pub(super) const MSG_CONTRACTS_DEPS_SPINNER: &str = "Installing dependencies.."; pub(super) const MSG_BUILDING_L2_CONTRACTS_SPINNER: &str = "Building L2 contracts.."; pub(super) const MSG_BUILDING_L1_CONTRACTS_SPINNER: &str = "Building L1 contracts.."; pub(super) const MSG_BUILDING_SYSTEM_CONTRACTS_SPINNER: &str = "Building system contracts.."; +pub(super) const MSG_BUILDING_TEST_CONTRACTS_SPINNER: &str = "Building test contracts.."; pub(super) const MSG_BUILDING_CONTRACTS_SUCCESS: &str = "Contracts built successfully"; pub(super) const MSG_BUILD_L1_CONTRACTS_HELP: &str = "Build L1 contracts"; pub(super) const MSG_BUILD_L2_CONTRACTS_HELP: &str = "Build L2 contracts"; pub(super) const MSG_BUILD_SYSTEM_CONTRACTS_HELP: &str = "Build system contracts"; +pub(super) const MSG_BUILD_TEST_CONTRACTS_HELP: &str = "Build test contracts"; // Integration tests related messages pub(super) fn msg_integration_tests_run(external_node: bool) -> String { From e335f4bda8993ffa63cd8792a12796132a9a2f22 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Wed, 4 Sep 2024 17:22:58 +0300 Subject: [PATCH 019/116] test(vm): Test decommitment cost divergence & circuit stats (#2777) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ - Adds a unit test for decommitment cost divergence. - Restores unit tests depending on circuit stats. ## Why ❔ Increases test coverage; prevents regressions. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- .../src/versions/vm_fast/tests/block_tip.rs | 72 ++---- .../src/versions/vm_fast/tests/circuits.rs | 11 +- .../src/versions/vm_fast/tests/code_oracle.rs | 11 +- .../vm_fast/tests/get_used_contracts.rs | 62 ++++- .../multivm/src/versions/vm_fast/tests/mod.rs | 8 +- .../src/versions/vm_fast/tests/precompiles.rs | 69 ++---- .../tests/tester/transaction_test_info.rs | 12 +- core/lib/multivm/src/versions/vm_fast/vm.rs | 220 ++++++++++-------- .../src/versions/vm_latest/tracers/utils.rs | 12 +- .../contracts/counter/proxy_counter.sol | 4 + 10 files changed, 263 insertions(+), 218 deletions(-) diff --git a/core/lib/multivm/src/versions/vm_fast/tests/block_tip.rs b/core/lib/multivm/src/versions/vm_fast/tests/block_tip.rs index 239d40947a67..15af9d868adc 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/block_tip.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/block_tip.rs @@ -10,22 +10,18 @@ use zksync_types::{ commitment::SerializeCommitment, fee_model::BatchFeeInput, get_code_key, l2_to_l1_log::L2ToL1Log, writes::StateDiffRecord, Address, Execute, H256, U256, }; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; +use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; -use super::utils::{get_complex_upgrade_abi, read_complex_upgrade}; -use crate::versions::vm_fast::tests::tester::{ - default_l1_batch, get_empty_storage, InMemoryStorageView, VmTesterBuilder, +use super::{ + tester::{default_l1_batch, get_empty_storage, VmTesterBuilder}, + utils::{get_complex_upgrade_abi, read_complex_upgrade}, }; use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_latest::{ - constants::{ - BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD, - BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD, BOOTLOADER_BATCH_TIP_OVERHEAD, - MAX_VM_PUBDATA_PER_BATCH, - }, - tracers::PubdataTracer, - L1BatchEnv, TracerDispatcher, + interface::{L1BatchEnv, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, + vm_latest::constants::{ + BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD, + BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD, BOOTLOADER_BATCH_TIP_OVERHEAD, + MAX_VM_PUBDATA_PER_BATCH, }, }; @@ -130,7 +126,6 @@ fn execute_test(test_data: L1MessengerTestData) -> TestStatistics { // We are measuring computational cost, so prices for pubdata don't matter, while they artificially dilute // the gas limit - let batch_env = L1BatchEnv { fee_input: BatchFeeInput::pubdata_independent(100_000, 100_000, 100_000), ..default_l1_batch(zksync_types::L1BatchNumber(1)) @@ -143,15 +138,7 @@ fn execute_test(test_data: L1MessengerTestData) -> TestStatistics { .with_l1_batch_env(batch_env) .build(); - let bytecodes = test_data - .bytecodes - .iter() - .map(|bytecode| { - let hash = hash_bytecode(bytecode); - let words = bytes_to_be_words(bytecode.clone()); - (h256_to_u256(hash), words) - }) - .collect(); + let bytecodes = test_data.bytecodes.iter().map(Vec::as_slice); vm.vm.insert_bytecodes(bytecodes); let txs_data = populate_mimic_calls(test_data.clone()); @@ -163,7 +150,7 @@ fn execute_test(test_data: L1MessengerTestData) -> TestStatistics { contract_address: CONTRACT_FORCE_DEPLOYER_ADDRESS, calldata: data, value: U256::zero(), - factory_deps: None, + factory_deps: vec![], }, None, ); @@ -173,44 +160,25 @@ fn execute_test(test_data: L1MessengerTestData) -> TestStatistics { let result = vm.vm.execute(VmExecutionMode::OneTx); assert!( !result.result.is_failed(), - "Transaction {i} wasn't successful for input: {:#?}", - test_data + "Transaction {i} wasn't successful for input: {test_data:#?}" ); } - // Now we count how much ergs were spent at the end of the batch + // Now we count how much gas was spent at the end of the batch // It is assumed that the top level frame is the bootloader + vm.vm.enforce_state_diffs(test_data.state_diffs.clone()); + let gas_before = vm.vm.gas_remaining(); - let ergs_before = vm.vm.gas_remaining(); - - // We ensure that indeed the provided state diffs are used - let pubdata_tracer = PubdataTracer::::new_with_forced_state_diffs( - vm.vm.batch_env.clone(), - VmExecutionMode::Batch, - test_data.state_diffs.clone(), - ); - - let result = vm.vm.inspect_inner( - TracerDispatcher::default(), - VmExecutionMode::Batch, - Some(pubdata_tracer), - ); - + let result = vm.vm.execute(VmExecutionMode::Batch); assert!( !result.result.is_failed(), - "Batch wasn't successful for input: {:?}", - test_data - ); - - let ergs_after = vm.vm.gas_remaining(); - - assert_eq!( - (ergs_before - ergs_after) as u64, - result.statistics.gas_used + "Batch wasn't successful for input: {test_data:?}" ); + let gas_after = vm.vm.gas_remaining(); + assert_eq!((gas_before - gas_after) as u64, result.statistics.gas_used); TestStatistics { - max_used_gas: ergs_before - ergs_after, + max_used_gas: gas_before - gas_after, circuit_statistics: result.statistics.circuit_statistic.total() as u64, execution_metrics_size: result.get_execution_metrics(None).size() as u64, } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/circuits.rs b/core/lib/multivm/src/versions/vm_fast/tests/circuits.rs index c582bd28c882..0270ac35475b 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/circuits.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/circuits.rs @@ -1,17 +1,16 @@ use zksync_types::{Address, Execute, U256}; +use super::tester::VmTesterBuilder; use crate::{ interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_latest::{ - constants::BATCH_COMPUTATIONAL_GAS_LIMIT, tests::tester::VmTesterBuilder, HistoryEnabled, - }, + vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, }; // Checks that estimated number of circuits for simple transfer doesn't differ much // from hardcoded expected value. #[test] fn test_circuits() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() .with_random_rich_accounts(1) .with_deployer() @@ -25,12 +24,12 @@ fn test_circuits() { contract_address: Address::random(), calldata: Vec::new(), value: U256::from(1u8), - factory_deps: None, + factory_deps: vec![], }, None, ); vm.vm.push_transaction(tx); - let res = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); + let res = vm.vm.inspect((), VmExecutionMode::OneTx); let s = res.statistics.circuit_statistic; // Check `circuit_statistic`. diff --git a/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs b/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs index 29df17d7293c..836603d77d87 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs @@ -6,9 +6,12 @@ use zksync_utils::{bytecode::hash_bytecode, h256_to_u256, u256_to_h256}; use crate::{ interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - vm_fast::tests::{ - tester::{get_empty_storage, VmTesterBuilder}, - utils::{load_precompiles_contract, read_precompiles_contract, read_test_contract}, + vm_fast::{ + circuits_tracer::CircuitsTracer, + tests::{ + tester::{get_empty_storage, VmTesterBuilder}, + utils::{load_precompiles_contract, read_precompiles_contract, read_test_contract}, + }, }, }; @@ -209,7 +212,7 @@ fn refunds_in_code_oracle() { if decommit { let (_, is_fresh) = vm.vm.inner.world_diff.decommit_opcode( &mut vm.vm.world, - &mut vm.vm.tracer, + &mut CircuitsTracer::default(), h256_to_u256(normal_zkevm_bytecode_hash), ); assert!(is_fresh); diff --git a/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs index 85ff4bbf5e9b..3fcef71add07 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs @@ -6,7 +6,7 @@ use itertools::Itertools; use zk_evm_1_3_1::zkevm_opcode_defs::decoding::{EncodingModeProduction, VmEncodingMode}; use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; use zksync_test_account::Account; -use zksync_types::{Address, Execute, U256}; +use zksync_types::{AccountTreeId, Address, Execute, StorageKey, H256, U256}; use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; use crate::{ @@ -110,7 +110,13 @@ fn inflated_counter_bytecode() -> Vec { counter_bytecode } -fn execute_proxy_counter(gas: u32) -> (VmTester, U256, VmExecutionResultAndLogs) { +#[derive(Debug)] +struct ProxyCounterData { + proxy_counter_address: Address, + counter_bytecode_hash: U256, +} + +fn execute_proxy_counter(gas: u32) -> (VmTester, ProxyCounterData, VmExecutionResultAndLogs) { let counter_bytecode = inflated_counter_bytecode(); let counter_bytecode_hash = h256_to_u256(hash_bytecode(&counter_bytecode)); let counter_address = Address::repeat_byte(0x23); @@ -157,27 +163,69 @@ fn execute_proxy_counter(gas: u32) -> (VmTester, U256, VmExecutionResultAndLogs) .vm .execute_transaction_with_bytecode_compression(increment_tx, true); compression_result.unwrap(); - (vm, counter_bytecode_hash, exec_result) + let data = ProxyCounterData { + proxy_counter_address: deploy_tx.address, + counter_bytecode_hash, + }; + (vm, data, exec_result) } #[test] fn get_used_contracts_with_far_call() { - let (vm, counter_bytecode_hash, exec_result) = execute_proxy_counter(100_000); + let (vm, data, exec_result) = execute_proxy_counter(100_000); assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); let decommitted_hashes = vm.vm.decommitted_hashes().collect::>(); assert!( - decommitted_hashes.contains(&counter_bytecode_hash), + decommitted_hashes.contains(&data.counter_bytecode_hash), "{decommitted_hashes:?}" ); } #[test] fn get_used_contracts_with_out_of_gas_far_call() { - let (vm, counter_bytecode_hash, exec_result) = execute_proxy_counter(10_000); + let (mut vm, data, exec_result) = execute_proxy_counter(10_000); assert_matches!(exec_result.result, ExecutionResult::Revert { .. }); let decommitted_hashes = vm.vm.decommitted_hashes().collect::>(); assert!( - decommitted_hashes.contains(&counter_bytecode_hash), + decommitted_hashes.contains(&data.counter_bytecode_hash), "{decommitted_hashes:?}" ); + + // Execute another transaction with a successful far call and check that it's still charged for decommitment. + let account = &mut vm.rich_accounts[0]; + let (_, proxy_counter_abi) = read_proxy_counter_contract(); + let increment = proxy_counter_abi.function("increment").unwrap(); + let increment_tx = account.get_l2_tx_for_execute( + Execute { + contract_address: data.proxy_counter_address, + calldata: increment + .encode_input(&[Token::Uint(1.into()), Token::Uint(u64::MAX.into())]) + .unwrap(), + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + let (compression_result, exec_result) = vm + .vm + .execute_transaction_with_bytecode_compression(increment_tx, true); + compression_result.unwrap(); + assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); + + let proxy_counter_cost_key = StorageKey::new( + AccountTreeId::new(data.proxy_counter_address), + H256::from_low_u64_be(1), + ); + let far_call_cost_log = exec_result + .logs + .storage_logs + .iter() + .find(|log| log.log.key == proxy_counter_cost_key) + .expect("no cost log"); + assert!( + far_call_cost_log.previous_value.is_zero(), + "{far_call_cost_log:?}" + ); + let far_call_cost = h256_to_u256(far_call_cost_log.log.value); + assert!(far_call_cost > 10_000.into(), "{far_call_cost}"); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/mod.rs b/core/lib/multivm/src/versions/vm_fast/tests/mod.rs index 9d5b229f23a9..730c573cdcf4 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/mod.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/mod.rs @@ -1,9 +1,9 @@ +mod block_tip; mod bootloader; -mod default_aa; -//mod block_tip; FIXME: requires vm metrics mod bytecode_publishing; +mod default_aa; // mod call_tracer; FIXME: requires tracers -// mod circuits; FIXME: requires tracers / circuit stats +mod circuits; mod code_oracle; mod gas_limit; mod get_used_contracts; @@ -11,7 +11,7 @@ mod is_write_initial; mod l1_tx_execution; mod l2_blocks; mod nonce_holder; -// mod precompiles; FIXME: requires tracers / circuit stats +mod precompiles; // mod prestate_tracer; FIXME: is pre-state tracer still relevant? mod refunds; mod require_eip712; diff --git a/core/lib/multivm/src/versions/vm_fast/tests/precompiles.rs b/core/lib/multivm/src/versions/vm_fast/tests/precompiles.rs index 5bdf0930d558..f77eeb4f126e 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/precompiles.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/precompiles.rs @@ -1,9 +1,9 @@ -use zk_evm_1_5_0::zk_evm_abstractions::precompiles::PrecompileAddress; +use circuit_sequencer_api_1_5_0::geometry_config::get_geometry_config; use zksync_types::{Address, Execute}; +use super::{tester::VmTesterBuilder, utils::read_precompiles_contract}; use crate::{ interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_fast::tests::{tester::VmTesterBuilder, utils::read_precompiles_contract}, vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, }; @@ -30,25 +30,18 @@ fn test_keccak() { Execute { contract_address: address, calldata: hex::decode(keccak1000_calldata).unwrap(), - value: Default::default(), - factory_deps: None, + value: 0.into(), + factory_deps: vec![], }, None, ); vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); + let exec_result = vm.vm.inspect((), VmExecutionMode::OneTx); + assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); - let keccak_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::Keccak256) - .count(); - - assert!(keccak_count >= 1000); + let keccak_count = exec_result.statistics.circuit_statistic.keccak256 + * get_geometry_config().cycles_per_keccak256_circuit as f32; + assert!(keccak_count >= 1000.0, "{keccak_count}"); } #[test] @@ -74,25 +67,18 @@ fn test_sha256() { Execute { contract_address: address, calldata: hex::decode(sha1000_calldata).unwrap(), - value: Default::default(), - factory_deps: None, + value: 0.into(), + factory_deps: vec![], }, None, ); vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let sha_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::SHA256) - .count(); + let exec_result = vm.vm.inspect((), VmExecutionMode::OneTx); + assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); - assert!(sha_count >= 1000); + let sha_count = exec_result.statistics.circuit_statistic.sha256 + * get_geometry_config().cycles_per_sha256_circuit as f32; + assert!(sha_count >= 1000.0, "{sha_count}"); } #[test] @@ -110,24 +96,17 @@ fn test_ecrecover() { let tx = account.get_l2_tx_for_execute( Execute { contract_address: account.address, - calldata: Vec::new(), - value: Default::default(), - factory_deps: None, + calldata: vec![], + value: 0.into(), + factory_deps: vec![], }, None, ); vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let ecrecover_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::Ecrecover) - .count(); + let exec_result = vm.vm.inspect((), VmExecutionMode::OneTx); + assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); - assert_eq!(ecrecover_count, 1); + let ecrecover_count = exec_result.statistics.circuit_statistic.ecrecover + * get_geometry_config().cycles_per_ecrecover_circuit as f32; + assert!((ecrecover_count - 1.0).abs() < 1e-4, "{ecrecover_count}"); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs index 5b8f0cb0b10f..105bc5f2fd43 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs @@ -184,14 +184,22 @@ impl TransactionTestInfo { } // TODO this doesn't include all the state of ModifiedWorld -#[derive(Debug, PartialEq)] +#[derive(Debug)] struct VmStateDump { state: vm2::State>, storage_writes: Vec<((H160, U256), U256)>, events: Box<[vm2::Event]>, } -impl Vm { +impl PartialEq for VmStateDump { + fn eq(&self, other: &Self) -> bool { + self.state == other.state + && self.storage_writes == other.storage_writes + && self.events == other.events + } +} + +impl Vm { fn dump_state(&self) -> VmStateDump { VmStateDump { state: self.inner.state.clone(), diff --git a/core/lib/multivm/src/versions/vm_fast/vm.rs b/core/lib/multivm/src/versions/vm_fast/vm.rs index d40ea075f19c..4bb570c0275a 100644 --- a/core/lib/multivm/src/versions/vm_fast/vm.rs +++ b/core/lib/multivm/src/versions/vm_fast/vm.rs @@ -63,13 +63,15 @@ pub struct Vm { pub(crate) batch_env: L1BatchEnv, pub(crate) system_env: SystemEnv, snapshot: Option, - pub(crate) tracer: CircuitsTracer, + #[cfg(test)] + enforced_state_diffs: Option>, } impl Vm { fn run( &mut self, execution_mode: VmExecutionMode, + tracer: &mut CircuitsTracer, track_refunds: bool, ) -> (ExecutionResult, Refunds) { let mut refunds = Refunds { @@ -80,7 +82,7 @@ impl Vm { let mut pubdata_before = self.inner.world_diff.pubdata() as u32; let result = loop { - let hook = match self.inner.run(&mut self.world, &mut self.tracer) { + let hook = match self.inner.run(&mut self.world, tracer) { ExecutionEnd::SuspendedOnHook(hook) => hook, ExecutionEnd::ProgramFinished(output) => break ExecutionResult::Success { output }, ExecutionEnd::Reverted(output) => { @@ -91,7 +93,7 @@ impl Vm { } ExecutionEnd::Panicked => { break ExecutionResult::Halt { - reason: if self.gas_remaining() == 0 { + reason: if self.inner.state.current_frame.gas == 0 { Halt::BootloaderOutOfGas } else { Halt::VMPanic @@ -213,10 +215,7 @@ impl Vm { user_logs: extract_l2tol1logs_from_l1_messenger(&events), l2_to_l1_messages: VmEvent::extract_long_l2_to_l1_messages(&events), published_bytecodes, - state_diffs: self - .compute_state_diffs() - .filter(|diff| diff.address != L1_MESSENGER_ADDRESS) - .collect(), + state_diffs: self.compute_state_diffs(), }; // Save the pubdata for the future initial bootloader memory building @@ -231,7 +230,13 @@ impl Vm { } Hook::PaymasterValidationEntered | Hook::ValidationStepEnded => { /* unused */ } - Hook::DebugLog | Hook::DebugReturnData | Hook::NearCallCatch => { + Hook::DebugLog => { + let (log, log_arg) = self.get_debug_log(); + let last_tx = self.bootloader_state.last_l2_block().txs.last(); + let tx_hash = last_tx.map(|tx| tx.hash); + tracing::trace!(tx = ?tx_hash, "{log}: {log_arg}"); + } + Hook::DebugReturnData | Hook::NearCallCatch => { // These hooks are for debug purposes only } } @@ -249,6 +254,26 @@ impl Vm { .unwrap() } + fn get_debug_log(&self) -> (String, String) { + let hook_params = self.get_hook_params(); + let mut msg = u256_to_h256(hook_params[0]).as_bytes().to_vec(); + // Trim 0 byte padding at the end. + while msg.last() == Some(&0) { + msg.pop(); + } + + let data = hook_params[1]; + let msg = String::from_utf8(msg).expect("Invalid debug message"); + + // For long data, it is better to use hex-encoding for greater readability + let data_str = if data > U256::from(u64::MAX) { + format!("0x{data:x}") + } else { + data.to_string() + }; + (msg, data_str) + } + /// Should only be used when the bootloader is executing (e.g., when handling hooks). pub(crate) fn read_word_from_bootloader_heap(&self, word: usize) -> U256 { self.inner.state.heaps[vm2::FIRST_HEAP].read_u256(word as u32 * 32) @@ -314,10 +339,19 @@ impl Vm { self.write_to_bootloader_heap(memory); } - fn compute_state_diffs(&mut self) -> impl Iterator + '_ { - let storage = &mut self.world.storage; + #[cfg(test)] + pub(super) fn enforce_state_diffs(&mut self, diffs: Vec) { + self.enforced_state_diffs = Some(diffs); + } - self.inner.world_diff.get_storage_changes().map( + fn compute_state_diffs(&mut self) -> Vec { + #[cfg(test)] + if let Some(enforced_diffs) = self.enforced_state_diffs.take() { + return enforced_diffs; + } + + let storage = &mut self.world.storage; + let diffs = self.inner.world_diff.get_storage_changes().map( move |((address, key), (initial_value, final_value))| { let storage_key = StorageKey::new(AccountTreeId::new(address), u256_to_h256(key)); StateDiffRecord { @@ -334,14 +368,17 @@ impl Vm { final_value, } }, - ) + ); + diffs + .filter(|diff| diff.address != L1_MESSENGER_ADDRESS) + .collect() } pub(crate) fn decommitted_hashes(&self) -> impl Iterator + '_ { self.inner.world_diff.decommitted_hashes() } - fn gas_remaining(&self) -> u32 { + pub(super) fn gas_remaining(&self) -> u32 { self.inner.state.current_frame.gas } } @@ -356,13 +393,15 @@ impl Vm { .hash .into(); - let program_cache = HashMap::from([convert_system_contract_code( + let program_cache = HashMap::from([World::convert_system_contract_code( &system_env.base_system_smart_contracts.default_aa, false, )]); - let (_, bootloader) = - convert_system_contract_code(&system_env.base_system_smart_contracts.bootloader, true); + let (_, bootloader) = World::convert_system_contract_code( + &system_env.base_system_smart_contracts.bootloader, + true, + ); let bootloader_memory = bootloader_initial_memory(&batch_env); let mut inner = VirtualMachine::new( @@ -386,7 +425,7 @@ impl Vm { inner.state.current_frame.aux_heap_size = u32::MAX; inner.state.current_frame.exception_handler = INITIAL_FRAME_FORMAL_EH_LOCATION; - let mut me = Self { + let mut this = Self { world: World::new(storage, program_cache), inner, gas_for_account_validation: system_env.default_validation_computational_gas_limit, @@ -398,12 +437,11 @@ impl Vm { system_env, batch_env, snapshot: None, - tracer: CircuitsTracer::default(), + #[cfg(test)] + enforced_state_diffs: None, }; - - me.write_to_bootloader_heap(bootloader_memory); - - me + this.write_to_bootloader_heap(bootloader_memory); + this } // visible for testing @@ -465,12 +503,12 @@ impl VmInterface for Vm { track_refunds = true; } - self.tracer = Default::default(); - + let mut tracer = CircuitsTracer::default(); let start = self.inner.world_diff.snapshot(); let pubdata_before = self.inner.world_diff.pubdata(); + let gas_before = self.gas_remaining(); - let (result, refunds) = self.run(execution_mode, track_refunds); + let (result, refunds) = self.run(execution_mode, &mut tracer, track_refunds); let ignore_world_diff = matches!(execution_mode, VmExecutionMode::OneTx) && matches!(result, ExecutionResult::Halt { .. }); @@ -522,9 +560,8 @@ impl VmInterface for Vm { }; let pubdata_after = self.inner.world_diff.pubdata(); - - let circuit_statistic = self.tracer.circuit_statistic(); - + let circuit_statistic = tracer.circuit_statistic(); + let gas_remaining = self.gas_remaining(); VmExecutionResultAndLogs { result, logs, @@ -532,8 +569,8 @@ impl VmInterface for Vm { statistics: VmExecutionStatistics { contracts_used: 0, cycles_used: 0, - gas_used: 0, - gas_remaining: self.gas_remaining(), + gas_used: (gas_before - gas_remaining).into(), + gas_remaining, computational_gas_used: 0, total_log_queries: 0, pubdata_published: (pubdata_after - pubdata_before).max(0) as u32, @@ -654,49 +691,56 @@ impl fmt::Debug for Vm { } } -#[derive(Debug, Clone)] +#[derive(Debug)] pub(crate) struct World { pub(crate) storage: S, - // TODO (PLA-1008): Store `Program`s in an LRU cache - program_cache: HashMap>>, + program_cache: HashMap>, pub(crate) bytecode_cache: HashMap>, } -impl World { - fn new(storage: S, program_cache: HashMap>>) -> Self { +impl World { + fn new(storage: S, program_cache: HashMap>) -> Self { Self { storage, program_cache, bytecode_cache: Default::default(), } } -} -impl vm2::World for World { - fn decommit_code(&mut self, hash: U256) -> Vec { - self.decommit(hash) - .code_page() - .as_ref() - .iter() - .flat_map(|u| { - let mut buffer = [0u8; 32]; - u.to_big_endian(&mut buffer); - buffer - }) - .collect() + fn bytecode_to_program(bytecode: &[u8]) -> Program { + Program::new( + decode_program( + &bytecode + .chunks_exact(8) + .map(|chunk| u64::from_be_bytes(chunk.try_into().unwrap())) + .collect::>(), + false, + ), + bytecode + .chunks_exact(32) + .map(U256::from_big_endian) + .collect::>(), + ) } - fn decommit(&mut self, hash: U256) -> Program> { - self.program_cache - .entry(hash) - .or_insert_with(|| { - bytecode_to_program(self.bytecode_cache.entry(hash).or_insert_with(|| { - self.storage - .load_factory_dep(u256_to_h256(hash)) - .expect("vm tried to decommit nonexistent bytecode") - })) - }) - .clone() + fn convert_system_contract_code( + code: &SystemContractCode, + is_bootloader: bool, + ) -> (U256, Program) { + ( + h256_to_u256(code.hash), + Program::new( + decode_program( + &code + .code + .iter() + .flat_map(|x| x.0.into_iter().rev()) + .collect::>(), + is_bootloader, + ), + code.code.clone(), + ), + ) } } @@ -745,38 +789,30 @@ impl vm2::StorageInterface for World { } } -fn bytecode_to_program>(bytecode: &[u8]) -> Program { - Program::new( - decode_program( - &bytecode - .chunks_exact(8) - .map(|chunk| u64::from_be_bytes(chunk.try_into().unwrap())) - .collect::>(), - false, - ), - bytecode - .chunks_exact(32) - .map(U256::from_big_endian) - .collect::>(), - ) -} +impl vm2::World for World { + fn decommit(&mut self, hash: U256) -> Program { + self.program_cache + .entry(hash) + .or_insert_with(|| { + Self::bytecode_to_program(self.bytecode_cache.entry(hash).or_insert_with(|| { + self.storage + .load_factory_dep(u256_to_h256(hash)) + .expect("vm tried to decommit nonexistent bytecode") + })) + }) + .clone() + } -fn convert_system_contract_code>( - code: &SystemContractCode, - is_bootloader: bool, -) -> (U256, Program) { - ( - h256_to_u256(code.hash), - Program::new( - decode_program( - &code - .code - .iter() - .flat_map(|x| x.0.into_iter().rev()) - .collect::>(), - is_bootloader, - ), - code.code.clone(), - ), - ) + fn decommit_code(&mut self, hash: U256) -> Vec { + self.decommit(hash) + .code_page() + .as_ref() + .iter() + .flat_map(|u| { + let mut buffer = [0u8; 32]; + u.to_big_endian(&mut buffer); + buffer + }) + .collect() + } } diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/utils.rs b/core/lib/multivm/src/versions/vm_latest/tracers/utils.rs index bad09617b8f0..1ecb75c28071 100644 --- a/core/lib/multivm/src/versions/vm_latest/tracers/utils.rs +++ b/core/lib/multivm/src/versions/vm_latest/tracers/utils.rs @@ -95,7 +95,10 @@ pub(crate) fn get_debug_log( .into_iter() .map(u256_to_h256) .collect(); - let msg = vm_hook_params[0].as_bytes().to_vec(); + let mut msg = vm_hook_params[0].as_bytes().to_vec(); + while msg.last() == Some(&0) { + msg.pop(); + } let data = vm_hook_params[1].as_bytes().to_vec(); let msg = String::from_utf8(msg).expect("Invalid debug message"); @@ -109,10 +112,8 @@ pub(crate) fn get_debug_log( } else { data.to_string() }; - let tx_id = state.vm_local_state.tx_number_in_block; - - format!("Bootloader transaction {}: {} {}", tx_id, msg, data_str) + format!("Bootloader transaction {tx_id}: {msg}: {data_str}") } /// Reads the memory slice represented by the fat pointer. @@ -167,8 +168,7 @@ pub(crate) fn print_debug_if_needed( VmHook::DebugReturnData => get_debug_returndata(memory, latest_returndata_ptr), _ => return, }; - - tracing::trace!("{}", log); + tracing::trace!("{log}"); } pub(crate) fn computational_gas_price( diff --git a/etc/contracts-test-data/contracts/counter/proxy_counter.sol b/etc/contracts-test-data/contracts/counter/proxy_counter.sol index 1c1883cd4c9d..b3bbf9dda93c 100644 --- a/etc/contracts-test-data/contracts/counter/proxy_counter.sol +++ b/etc/contracts-test-data/contracts/counter/proxy_counter.sol @@ -13,10 +13,14 @@ contract ProxyCounter { counter = _counter; } + uint256 lastFarCallCost; + function increment(uint256 x, uint gasToPass) public { while (gasleft() > gasToPass) { // Burn gas so that there's about `gasToPass` left before the external call. } + uint256 gasBefore = gasleft(); counter.increment(x); + lastFarCallCost = gasBefore - gasleft(); } } From 6f38a43a82d12d9974087a45bd83826e74b9f1c3 Mon Sep 17 00:00:00 2001 From: D025 Date: Wed, 4 Sep 2024 17:29:39 +0300 Subject: [PATCH 020/116] chore: enable renovate for gh actions workflow (#2801) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Enable renovate for gh actions workflow ## Why ❔ For automatically update gh actions workflows and pin digests ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- .github/workflows/build-prover-fri-gpu-gar.yml | 6 +++--- .github/workflows/build-prover-template.yml | 2 +- .github/workflows/build-tee-prover-template.yml | 8 ++++---- .github/workflows/ci-zk-toolbox-reusable.yml | 2 +- .github/workflows/ci.yml | 2 +- .github/workflows/nodejs-license.yaml | 2 +- .github/workflows/protobuf.yaml | 4 ++-- .github/workflows/release-please.yml | 2 +- .github/workflows/release-test-stage.yml | 2 +- .github/workflows/vm-perf-comparison.yml | 2 +- .github/workflows/zk-environment-publish.yml | 2 +- renovate.json | 10 ++-------- 12 files changed, 19 insertions(+), 25 deletions(-) diff --git a/.github/workflows/build-prover-fri-gpu-gar.yml b/.github/workflows/build-prover-fri-gpu-gar.yml index b13fca82445a..c0ea060b07e9 100644 --- a/.github/workflows/build-prover-fri-gpu-gar.yml +++ b/.github/workflows/build-prover-fri-gpu-gar.yml @@ -34,13 +34,13 @@ jobs: gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://us-docker.pkg.dev - name: Set up QEMU - uses: docker/setup-qemu-action@v2 + uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf # v3.2.0 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 - name: Build and push - uses: docker/build-push-action@v4 + uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0 with: context: docker/prover-gpu-fri-gar build-args: | diff --git a/.github/workflows/build-prover-template.yml b/.github/workflows/build-prover-template.yml index 84e1b4f0f5d0..4f3cad7f1d02 100644 --- a/.github/workflows/build-prover-template.yml +++ b/.github/workflows/build-prover-template.yml @@ -180,7 +180,7 @@ jobs: - witness-vector-generator steps: - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 - name: Login to us-central1 GAR run: | diff --git a/.github/workflows/build-tee-prover-template.yml b/.github/workflows/build-tee-prover-template.yml index 21c7f9340ba0..0e5b80d2e3a2 100644 --- a/.github/workflows/build-tee-prover-template.yml +++ b/.github/workflows/build-tee-prover-template.yml @@ -28,15 +28,15 @@ jobs: IMAGE_TAG_SUFFIX: ${{ inputs.image_tag_suffix }} runs-on: [matterlabs-ci-runner-high-performance] steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 if: ${{ github.event_name == 'workflow_dispatch' }} with: ref: ${{ github.event.inputs.target_branch }} - - uses: actions/checkout@v4 + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 if: ${{ github.event_name != 'workflow_dispatch' }} - - uses: cachix/install-nix-action@v27 + - uses: cachix/install-nix-action@ba0dd844c9180cbf77aa72a116d6fbc515d0e87b # v27 with: extra_nix_config: | access-tokens = github.com=${{ github.token }} @@ -45,7 +45,7 @@ jobs: sandbox = true - name: Setup Attic cache - uses: ryanccn/attic-action@v0 + uses: ryanccn/attic-action@618a980988d704a7709daeea88526146acd1d45f # v0.2.1 with: endpoint: https://attic.teepot.org/ cache: tee-pot diff --git a/.github/workflows/ci-zk-toolbox-reusable.yml b/.github/workflows/ci-zk-toolbox-reusable.yml index 21ffdc0523c9..5f82df646c13 100644 --- a/.github/workflows/ci-zk-toolbox-reusable.yml +++ b/.github/workflows/ci-zk-toolbox-reusable.yml @@ -249,7 +249,7 @@ jobs: - name: Upload logs - uses: actions/upload-artifact@v4 + uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0 if: always() with: name: logs diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index bcafbfc0b6b1..53c169114915 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -31,7 +31,7 @@ jobs: - name: Get changed files id: changed-files - uses: tj-actions/changed-files@v39 + uses: tj-actions/changed-files@fea790cb660e33aef4bdf07304e28fedd77dfa13 # v39 with: files_yaml: | prover: diff --git a/.github/workflows/nodejs-license.yaml b/.github/workflows/nodejs-license.yaml index b776673e1298..642ded744021 100644 --- a/.github/workflows/nodejs-license.yaml +++ b/.github/workflows/nodejs-license.yaml @@ -47,7 +47,7 @@ jobs: uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 - name: Use Node.js - uses: actions/setup-node@v3 + uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v4.0.3 with: node-version: 18 diff --git a/.github/workflows/protobuf.yaml b/.github/workflows/protobuf.yaml index d2885f613aa0..9c2c34186701 100644 --- a/.github/workflows/protobuf.yaml +++ b/.github/workflows/protobuf.yaml @@ -33,7 +33,7 @@ jobs: compatibility: runs-on: [ubuntu-22.04-github-hosted-16core] steps: - - uses: mozilla-actions/sccache-action@v0.0.3 + - uses: mozilla-actions/sccache-action@89e9040de88b577a072e3760aaf59f585da083af # v0.0.5 # before - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 @@ -69,7 +69,7 @@ jobs: | xargs cat > ./after.binpb # compare - - uses: bufbuild/buf-setup-action@v1 + - uses: bufbuild/buf-setup-action@54abbed4fe8d8d45173eca4798b0c39a53a7b658 # v1.39.0 with: github_token: ${{ github.token }} - name: buf breaking diff --git a/.github/workflows/release-please.yml b/.github/workflows/release-please.yml index 692a420eed81..4a8f527f45c6 100644 --- a/.github/workflows/release-please.yml +++ b/.github/workflows/release-please.yml @@ -15,7 +15,7 @@ jobs: steps: - name: Run release-please id: release - uses: google-github-actions/release-please-action@v4 + uses: google-github-actions/release-please-action@e4dc86ba9405554aeba3c6bb2d169500e7d3b4ee # v4.1.1 with: token: ${{ secrets.RELEASE_TOKEN }} config-file: .github/release-please/config.json diff --git a/.github/workflows/release-test-stage.yml b/.github/workflows/release-test-stage.yml index 9f921be78292..1da5aa9ac928 100644 --- a/.github/workflows/release-test-stage.yml +++ b/.github/workflows/release-test-stage.yml @@ -23,7 +23,7 @@ jobs: - name: Get all test, doc and src files that have changed id: changed-files-yaml - uses: tj-actions/changed-files@v37 + uses: tj-actions/changed-files@fea790cb660e33aef4bdf07304e28fedd77dfa13 # v39 with: files_yaml: | # TODO: make it more granular, as already implemented in CI workflow diff --git a/.github/workflows/vm-perf-comparison.yml b/.github/workflows/vm-perf-comparison.yml index db729cbadc07..cfcfff93037f 100644 --- a/.github/workflows/vm-perf-comparison.yml +++ b/.github/workflows/vm-perf-comparison.yml @@ -68,7 +68,7 @@ jobs: id: comparison - name: Comment on PR - uses: thollander/actions-comment-pull-request@v2 + uses: thollander/actions-comment-pull-request@fabd468d3a1a0b97feee5f6b9e499eab0dd903f6 # v2.5.0 with: message: | ${{ steps.comparison.outputs.speedup == '' && '## No performance difference detected (anymore)' || '## Detected VM performance changes' }} diff --git a/.github/workflows/zk-environment-publish.yml b/.github/workflows/zk-environment-publish.yml index 7e232475b148..5a08dff178c4 100644 --- a/.github/workflows/zk-environment-publish.yml +++ b/.github/workflows/zk-environment-publish.yml @@ -36,7 +36,7 @@ jobs: - name: Get changed files id: changed-files-yaml - uses: tj-actions/changed-files@v39 + uses: tj-actions/changed-files@fea790cb660e33aef4bdf07304e28fedd77dfa13 # v39 with: files_yaml: | zk_env: diff --git a/renovate.json b/renovate.json index 055bc3425806..eeccfee848dc 100644 --- a/renovate.json +++ b/renovate.json @@ -1,11 +1,5 @@ { - "enabled": false, - "extends": [ - "config:base", - "helpers:pinGitHubActionDigests" - ], - "enabledManagers": [ - "github-actions" - ], + "extends": ["config:base", "schedule:earlyMondays","helpers:pinGitHubActionDigests"], + "enabledManagers": ["github-actions"], "prCreation": "immediate" } From 35e4cae29314fa98ce356a875e08b3e869a31036 Mon Sep 17 00:00:00 2001 From: Artem Fomiuk <88630083+Artemka374@users.noreply.github.com> Date: Wed, 4 Sep 2024 17:42:38 +0300 Subject: [PATCH 021/116] feat(prover): add CLI option to run prover with max allocation (#2794) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Add CLI option for prover to limit max allocation of GPU ## Why ❔ To be able to run compressor and prover on one machine. ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- prover/Cargo.lock | 30 ++++++++++--------- prover/Cargo.toml | 4 +-- .../src/gpu_prover_job_processor.rs | 12 ++++++-- prover/crates/bin/prover_fri/src/main.rs | 6 ++++ 4 files changed, 34 insertions(+), 18 deletions(-) diff --git a/prover/Cargo.lock b/prover/Cargo.lock index 3ac54b477380..f5c8ea5549a6 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -733,9 +733,9 @@ dependencies = [ [[package]] name = "boojum-cuda" -version = "0.150.4" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c861b4baec895cb8e53b10825407f0844b0eafda2ac79e7f02de95439f0f1e74" +checksum = "252c28bc729eb32a053de0cbd1c8c55b2f51d00ca0c656f30bc70d255c2d8753" dependencies = [ "boojum", "cmake", @@ -1862,9 +1862,9 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "era_cudart" -version = "0.150.4" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ac97d833b861e32bc0a71d0542bf5c92094f9818c52d65c695227bfa95ffbe3" +checksum = "803be147b389086e33254a6c9fe26a0d1d21a11f9f73181cad06cf5b1beb7d16" dependencies = [ "bitflags 2.6.0", "era_cudart_sys", @@ -1873,9 +1873,9 @@ dependencies = [ [[package]] name = "era_cudart_sys" -version = "0.150.4" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee6aed60cf09cb6d0b954d74351acb9beb13daab0bacad279691f6b97504b7e6" +checksum = "49f9a3d87f3d45d11bc835e5fc78fe6e3fe243355d435f6b3e794b98df7d3323" dependencies = [ "serde_json", ] @@ -5580,9 +5580,9 @@ checksum = "24188a676b6ae68c3b2cb3a01be17fbf7240ce009799bb56d5b1409051e78fde" [[package]] name = "shivini" -version = "0.150.4" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5e5d862287bb883a4cb0bc4f8ea938ba3fdaa5e495f1a59bc3515231017a0e2" +checksum = "331868b8d92ffec8887c17e786632cf0c9bd4750986fc1400a6d1fbf3739cba4" dependencies = [ "bincode", "blake2 0.10.6", @@ -7558,13 +7558,15 @@ dependencies = [ [[package]] name = "zksync-gpu-ffi" -version = "0.150.4" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82fe099f4f4a2cc8ca8ca591d7619ac00b8054f63b712fa6ceee2b84c6e04c62" +checksum = "ae694dc0ad818e4d45af70b2cf579ff46f1ac938b42ee55543529beb45ba1464" dependencies = [ "bindgen 0.59.2", + "cmake", "crossbeam 0.8.4", "derivative", + "era_cudart_sys", "futures 0.3.30", "futures-locks", "num_cpus", @@ -7572,9 +7574,9 @@ dependencies = [ [[package]] name = "zksync-gpu-prover" -version = "0.150.4" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f73d27e0e4589c7445f5a22e511cb5186e2d205172ca4b26acd7a334b3af9492" +checksum = "f8156dbaf36764409cc93424d43dc86c993601d73f5aa9a5938e6552a14dc2df" dependencies = [ "bit-vec", "cfg-if 1.0.0", @@ -7589,9 +7591,9 @@ dependencies = [ [[package]] name = "zksync-wrapper-prover" -version = "0.150.4" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cf4c09adf0a84af0d7ded1fd85a2487fef4cbf1cfc1925412717d0eef03dd5a" +checksum = "83975189451bfacfa97dbcce899fde9db15a0c072196a9b92ddfabbe756bab9d" dependencies = [ "circuit_definitions", "zkevm_test_harness", diff --git a/prover/Cargo.toml b/prover/Cargo.toml index 8d87b727f906..403314cc13ca 100644 --- a/prover/Cargo.toml +++ b/prover/Cargo.toml @@ -61,8 +61,8 @@ circuit_sequencer_api = "=0.150.4" zkevm_test_harness = "=0.150.4" # GPU proving dependencies -wrapper_prover = { package = "zksync-wrapper-prover", version = "=0.150.4" } -shivini = "=0.150.4" +wrapper_prover = { package = "zksync-wrapper-prover", version = "=0.150.6" } +shivini = "=0.150.6" # Core workspace dependencies zksync_multivm = { path = "../core/lib/multivm", version = "0.1.0" } diff --git a/prover/crates/bin/prover_fri/src/gpu_prover_job_processor.rs b/prover/crates/bin/prover_fri/src/gpu_prover_job_processor.rs index 63981fa6c7d6..4b11353eac5c 100644 --- a/prover/crates/bin/prover_fri/src/gpu_prover_job_processor.rs +++ b/prover/crates/bin/prover_fri/src/gpu_prover_job_processor.rs @@ -5,6 +5,7 @@ pub mod gpu_prover { use anyhow::Context as _; use shivini::{ gpu_proof_config::GpuProofConfig, gpu_prove_from_external_witness_data, ProverContext, + ProverContextConfig, }; use tokio::task::JoinHandle; use zksync_config::configs::{fri_prover_group::FriProverGroupConfig, FriProverConfig}; @@ -82,7 +83,15 @@ pub mod gpu_prover { address: SocketAddress, zone: Zone, protocol_version: ProtocolSemanticVersion, + max_allocation: Option, ) -> Self { + let prover_context = match max_allocation { + Some(max_allocation) => ProverContext::create_with_config( + ProverContextConfig::default().with_maximum_device_allocation(max_allocation), + ) + .expect("failed initializing gpu prover context"), + None => ProverContext::create().expect("failed initializing gpu prover context"), + }; Prover { blob_store, public_blob_store, @@ -91,8 +100,7 @@ pub mod gpu_prover { setup_load_mode, circuit_ids_for_round_to_be_proven, witness_vector_queue, - prover_context: ProverContext::create() - .expect("failed initializing gpu prover context"), + prover_context, address, zone, protocol_version, diff --git a/prover/crates/bin/prover_fri/src/main.rs b/prover/crates/bin/prover_fri/src/main.rs index db813394c194..b93eb9c03958 100644 --- a/prover/crates/bin/prover_fri/src/main.rs +++ b/prover/crates/bin/prover_fri/src/main.rs @@ -139,6 +139,7 @@ async fn main() -> anyhow::Result<()> { public_blob_store, pool, circuit_ids_for_round_to_be_proven, + opt.max_allocation, notify, ) .await @@ -178,6 +179,7 @@ async fn get_prover_tasks( public_blob_store: Option>, pool: ConnectionPool, circuit_ids_for_round_to_be_proven: Vec, + _max_allocation: Option, _init_notifier: Arc, ) -> anyhow::Result>>> { use crate::prover_job_processor::{load_setup_data_cache, Prover}; @@ -213,6 +215,7 @@ async fn get_prover_tasks( public_blob_store: Option>, pool: ConnectionPool, circuit_ids_for_round_to_be_proven: Vec, + max_allocation: Option, init_notifier: Arc, ) -> anyhow::Result>>> { use gpu_prover_job_processor::gpu_prover; @@ -245,6 +248,7 @@ async fn get_prover_tasks( address.clone(), zone.clone(), protocol_version, + max_allocation, ); let producer = shared_witness_vector_queue.clone(); @@ -295,4 +299,6 @@ pub(crate) struct Cli { pub(crate) config_path: Option, #[arg(long)] pub(crate) secrets_path: Option, + #[arg(long)] + pub(crate) max_allocation: Option, } From 4a4d87e6c5ad37598a82cbc377b33ba951869adc Mon Sep 17 00:00:00 2001 From: Artem Fomiuk <88630083+Artemka374@users.noreply.github.com> Date: Wed, 4 Sep 2024 17:52:34 +0300 Subject: [PATCH 022/116] feat(zk_toolbox): Ease requirements, add option to download setup keys (#2784) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Make some of the prerequisites in prover subcommand optional(checks are enforced only when the prerequisite is needed) Add option to download setup keys instead of generating ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- zk_toolbox/crates/common/src/lib.rs | 5 +- zk_toolbox/crates/common/src/prerequisites.rs | 28 +++---- zk_toolbox/crates/zk_inception/README.md | 18 +++- .../src/commands/prover/args/mod.rs | 1 + .../src/commands/prover/args/setup_keys.rs | 53 ++++++++++++ .../zk_inception/src/commands/prover/gcs.rs | 4 +- .../src/commands/prover/generate_sk.rs | 29 ------- .../zk_inception/src/commands/prover/init.rs | 6 +- .../src/commands/prover/init_bellman_cuda.rs | 4 +- .../zk_inception/src/commands/prover/mod.rs | 8 +- .../zk_inception/src/commands/prover/run.rs | 5 +- .../src/commands/prover/setup_keys.rs | 83 +++++++++++++++++++ .../crates/zk_inception/src/messages.rs | 4 + 13 files changed, 188 insertions(+), 60 deletions(-) create mode 100644 zk_toolbox/crates/zk_inception/src/commands/prover/args/setup_keys.rs delete mode 100644 zk_toolbox/crates/zk_inception/src/commands/prover/generate_sk.rs create mode 100644 zk_toolbox/crates/zk_inception/src/commands/prover/setup_keys.rs diff --git a/zk_toolbox/crates/common/src/lib.rs b/zk_toolbox/crates/common/src/lib.rs index fbd6e93eb5d0..5a6f63e3a51f 100644 --- a/zk_toolbox/crates/common/src/lib.rs +++ b/zk_toolbox/crates/common/src/lib.rs @@ -14,6 +14,9 @@ pub mod git; pub mod server; pub mod wallets; -pub use prerequisites::{check_general_prerequisites, check_prover_prequisites}; +pub use prerequisites::{ + check_general_prerequisites, check_prerequisites, GCLOUD_PREREQUISITES, GPU_PREREQUISITES, + WGET_PREREQUISITES, +}; pub use prompt::{init_prompt_theme, Prompt, PromptConfirm, PromptSelect}; pub use term::{error, logger, spinner}; diff --git a/zk_toolbox/crates/common/src/prerequisites.rs b/zk_toolbox/crates/common/src/prerequisites.rs index 6c437302470d..87ec396d0e63 100644 --- a/zk_toolbox/crates/common/src/prerequisites.rs +++ b/zk_toolbox/crates/common/src/prerequisites.rs @@ -30,15 +30,7 @@ const DOCKER_COMPOSE_PREREQUISITE: Prerequisite = Prerequisite { download_link: "https://docs.docker.com/compose/install/", }; -const PROVER_PREREQUISITES: [Prerequisite; 5] = [ - Prerequisite { - name: "gcloud", - download_link: "https://cloud.google.com/sdk/docs/install", - }, - Prerequisite { - name: "wget", - download_link: "https://www.gnu.org/software/wget/", - }, +pub const GPU_PREREQUISITES: [Prerequisite; 3] = [ Prerequisite { name: "cmake", download_link: "https://cmake.org/download/", @@ -53,7 +45,17 @@ const PROVER_PREREQUISITES: [Prerequisite; 5] = [ }, // CUDA GPU driver ]; -struct Prerequisite { +pub const WGET_PREREQUISITES: [Prerequisite; 1] = [Prerequisite { + name: "wget", + download_link: "https://www.gnu.org/software/wget/", +}]; + +pub const GCLOUD_PREREQUISITES: [Prerequisite; 1] = [Prerequisite { + name: "gcloud", + download_link: "https://cloud.google.com/sdk/docs/install", +}]; + +pub struct Prerequisite { name: &'static str, download_link: &'static str, } @@ -62,11 +64,7 @@ pub fn check_general_prerequisites(shell: &Shell) { check_prerequisites(shell, &PREREQUISITES, true); } -pub fn check_prover_prequisites(shell: &Shell) { - check_prerequisites(shell, &PROVER_PREREQUISITES, false); -} - -fn check_prerequisites(shell: &Shell, prerequisites: &[Prerequisite], check_compose: bool) { +pub fn check_prerequisites(shell: &Shell, prerequisites: &[Prerequisite], check_compose: bool) { let mut missing_prerequisites = vec![]; for prerequisite in prerequisites { diff --git a/zk_toolbox/crates/zk_inception/README.md b/zk_toolbox/crates/zk_inception/README.md index 037a7e3fc925..25eeff40247b 100644 --- a/zk_toolbox/crates/zk_inception/README.md +++ b/zk_toolbox/crates/zk_inception/README.md @@ -21,7 +21,7 @@ This document contains the help content for the `zk_inception` command-line prog - [`zk_inception chain update-token-multiplier-setter`↴](#zk_inception-chain-update-token-multiplier-setter) - [`zk_inception prover`↴](#zk_inception-prover) - [`zk_inception prover init`↴](#zk_inception-prover-init) -- [`zk_inception prover generate-sk`↴](#zk_inception-prover-generate-sk) +- [`zk_inception prover setup-keys`↴](#zk_inception-prover-setup-keys) - [`zk_inception prover run`↴](#zk_inception-prover-run) - [`zk_inception prover init-bellman-cuda`↴](#zk_inception-prover-init-bellman-cuda) - [`zk_inception server`↴](#zk_inception-server) @@ -475,11 +475,21 @@ Initialize prover Possible values: `gcp`, `local` -## `zk_inception prover generate-sk` +## `zk_inception prover setup-keys` -Generate setup keys +Setup keys -**Usage:** `zk_inception prover generate-sk` +**Usage:** `zk_inception prover setup-keys` + +###### **Options:** + +- `--mode` + + Possible valuess: `download`, `generate` + +- `--region` + + Possible values: `asia`, `europe`, `us` ## `zk_inception prover run` diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/args/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/args/mod.rs index 66d97d75094c..903ecdb81d91 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/args/mod.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/args/mod.rs @@ -1,3 +1,4 @@ pub mod init; pub mod init_bellman_cuda; pub mod run; +pub mod setup_keys; diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/args/setup_keys.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/args/setup_keys.rs new file mode 100644 index 000000000000..4839c03eb429 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/args/setup_keys.rs @@ -0,0 +1,53 @@ +use clap::{Parser, ValueEnum}; +use common::PromptSelect; +use strum::{EnumIter, IntoEnumIterator}; + +use crate::messages::{MSG_SETUP_KEYS_DOWNLOAD_HELP, MSG_SETUP_KEYS_REGION_PROMPT}; + +#[derive(Debug, Clone, Parser, Default)] +pub struct SetupKeysArgs { + #[clap(long)] + pub region: Option, + #[clap(long)] + pub mode: Option, +} + +#[derive(Debug, Clone)] +pub struct SetupKeysArgsFinal { + pub region: Option, + pub mode: Mode, +} + +#[derive(Debug, Clone, ValueEnum, strum::EnumString, EnumIter, PartialEq, Eq, strum::Display)] +pub enum Mode { + Download, + Generate, +} + +#[derive(Debug, Clone, ValueEnum, strum::EnumString, EnumIter, PartialEq, Eq, strum::Display)] +pub enum Region { + Us, + Europe, + Asia, +} + +impl SetupKeysArgs { + pub fn fill_values_with_prompt(self) -> SetupKeysArgsFinal { + let mode = self + .mode + .unwrap_or_else(|| PromptSelect::new(MSG_SETUP_KEYS_DOWNLOAD_HELP, Mode::iter()).ask()); + + if mode == Mode::Download { + let region = self.region.unwrap_or_else(|| { + PromptSelect::new(MSG_SETUP_KEYS_REGION_PROMPT, Region::iter()).ask() + }); + + SetupKeysArgsFinal { + region: Some(region), + mode, + } + } else { + SetupKeysArgsFinal { region: None, mode } + } + } +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/gcs.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/gcs.rs index 0c76cb10f542..700209f5ffc8 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/gcs.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/gcs.rs @@ -1,4 +1,4 @@ -use common::{cmd::Cmd, logger, spinner::Spinner}; +use common::{check_prerequisites, cmd::Cmd, logger, spinner::Spinner, GCLOUD_PREREQUISITES}; use xshell::{cmd, Shell}; use zksync_config::{configs::object_store::ObjectStoreMode, ObjectStoreConfig}; @@ -14,6 +14,8 @@ pub(crate) fn create_gcs_bucket( shell: &Shell, config: ProofStorageGCSCreateBucket, ) -> anyhow::Result { + check_prerequisites(shell, &GCLOUD_PREREQUISITES, false); + let bucket_name = config.bucket_name; let location = config.location; let project_id = config.project_id; diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/generate_sk.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/generate_sk.rs deleted file mode 100644 index c13d1c3b5e03..000000000000 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/generate_sk.rs +++ /dev/null @@ -1,29 +0,0 @@ -use anyhow::Ok; -use common::{check_prover_prequisites, cmd::Cmd, logger, spinner::Spinner}; -use config::EcosystemConfig; -use xshell::{cmd, Shell}; - -use super::utils::get_link_to_prover; -use crate::messages::{MSG_GENERATING_SK_SPINNER, MSG_SK_GENERATED}; - -pub(crate) async fn run(shell: &Shell) -> anyhow::Result<()> { - check_prover_prequisites(shell); - - let ecosystem_config = EcosystemConfig::from_file(shell)?; - let link_to_prover = get_link_to_prover(&ecosystem_config); - shell.change_dir(&link_to_prover); - - let spinner = Spinner::new(MSG_GENERATING_SK_SPINNER); - let cmd = Cmd::new(cmd!( - shell, - "cargo run --features gpu --release --bin key_generator -- - generate-sk-gpu all --recompute-if-missing - --setup-path=data/keys - --path={link_to_prover}/data/keys" - )); - cmd.run()?; - spinner.finish(); - logger::outro(MSG_SK_GENERATED); - - Ok(()) -} diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/init.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/init.rs index 051fd26801c9..7aadd04bf6b7 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/init.rs @@ -2,12 +2,13 @@ use std::path::PathBuf; use anyhow::Context; use common::{ - check_prover_prequisites, + check_prerequisites, cmd::Cmd, config::global_config, db::{drop_db_if_exists, init_db, migrate_db, DatabaseConfig}, logger, spinner::Spinner, + WGET_PREREQUISITES, }; use config::{copy_configs, set_prover_database, traits::SaveConfigWithBasePath, EcosystemConfig}; use xshell::{cmd, Shell}; @@ -34,8 +35,6 @@ use crate::{ }; pub(crate) async fn run(args: ProverInitArgs, shell: &Shell) -> anyhow::Result<()> { - check_prover_prequisites(shell); - let ecosystem_config = EcosystemConfig::from_file(shell)?; let setup_key_path = get_default_setup_key_path(&ecosystem_config)?; @@ -115,6 +114,7 @@ fn download_setup_key( general_config: &GeneralConfig, path: &str, ) -> anyhow::Result<()> { + check_prerequisites(shell, &WGET_PREREQUISITES, false); let spinner = Spinner::new(MSG_DOWNLOADING_SETUP_KEY_SPINNER); let compressor_config: zksync_config::configs::FriProofCompressorConfig = general_config .proof_compressor_config diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/init_bellman_cuda.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/init_bellman_cuda.rs index 75535587c42c..5ed1473a33f6 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/init_bellman_cuda.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/init_bellman_cuda.rs @@ -1,5 +1,5 @@ use anyhow::Context; -use common::{check_prover_prequisites, cmd::Cmd, git, logger, spinner::Spinner}; +use common::{check_prerequisites, cmd::Cmd, git, logger, spinner::Spinner, GPU_PREREQUISITES}; use config::{traits::SaveConfigWithBasePath, EcosystemConfig}; use xshell::{cmd, Shell}; @@ -13,7 +13,7 @@ use crate::{ }; pub(crate) async fn run(shell: &Shell, args: InitBellmanCudaArgs) -> anyhow::Result<()> { - check_prover_prequisites(shell); + check_prerequisites(shell, &GPU_PREREQUISITES, false); let mut ecosystem_config = EcosystemConfig::from_file(shell)?; diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/mod.rs index 31c3a02e3806..4fb90dcfd020 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/mod.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/mod.rs @@ -2,12 +2,14 @@ use args::{init::ProverInitArgs, init_bellman_cuda::InitBellmanCudaArgs, run::Pr use clap::Subcommand; use xshell::Shell; +use crate::commands::prover::args::setup_keys::SetupKeysArgs; + mod args; mod gcs; -mod generate_sk; mod init; mod init_bellman_cuda; mod run; +mod setup_keys; mod utils; #[derive(Subcommand, Debug)] @@ -16,7 +18,7 @@ pub enum ProverCommands { Init(Box), /// Generate setup keys #[command(alias = "sk")] - GenerateSK, + SetupKeys(SetupKeysArgs), /// Run prover Run(ProverRunArgs), /// Initialize bellman-cuda @@ -27,7 +29,7 @@ pub enum ProverCommands { pub(crate) async fn run(shell: &Shell, args: ProverCommands) -> anyhow::Result<()> { match args { ProverCommands::Init(args) => init::run(*args, shell).await, - ProverCommands::GenerateSK => generate_sk::run(shell).await, + ProverCommands::SetupKeys(args) => setup_keys::run(args, shell).await, ProverCommands::Run(args) => run::run(args, shell).await, ProverCommands::InitBellmanCuda(args) => init_bellman_cuda::run(shell, *args).await, } diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/run.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/run.rs index 20ddfea6ac55..a819c3322a89 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/run.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/run.rs @@ -1,5 +1,5 @@ use anyhow::Context; -use common::{check_prover_prequisites, cmd::Cmd, config::global_config, logger}; +use common::{check_prerequisites, cmd::Cmd, config::global_config, logger, GPU_PREREQUISITES}; use config::{ChainConfig, EcosystemConfig}; use xshell::{cmd, Shell}; @@ -20,7 +20,6 @@ use crate::messages::{ }; pub(crate) async fn run(args: ProverRunArgs, shell: &Shell) -> anyhow::Result<()> { - check_prover_prequisites(shell); let args = args.fill_values_with_prompt()?; let ecosystem_config = EcosystemConfig::from_file(shell)?; let chain = ecosystem_config @@ -97,6 +96,7 @@ fn run_witness_vector_generator( } fn run_prover(shell: &Shell, chain: &ChainConfig) -> anyhow::Result<()> { + check_prerequisites(shell, &GPU_PREREQUISITES, false); logger::info(MSG_RUNNING_PROVER); let config_path = chain.path_to_general_config(); let secrets_path = chain.path_to_secrets_config(); @@ -113,6 +113,7 @@ fn run_compressor( chain: &ChainConfig, ecosystem: &EcosystemConfig, ) -> anyhow::Result<()> { + check_prerequisites(shell, &GPU_PREREQUISITES, false); logger::info(MSG_RUNNING_COMPRESSOR); let config_path = chain.path_to_general_config(); let secrets_path = chain.path_to_secrets_config(); diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/setup_keys.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/setup_keys.rs new file mode 100644 index 000000000000..09d9f76a47cf --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/setup_keys.rs @@ -0,0 +1,83 @@ +use anyhow::Ok; +use common::{ + check_prerequisites, cmd::Cmd, logger, spinner::Spinner, GCLOUD_PREREQUISITES, + GPU_PREREQUISITES, +}; +use config::EcosystemConfig; +use xshell::{cmd, Shell}; + +use super::utils::get_link_to_prover; +use crate::{ + commands::prover::args::setup_keys::{Mode, Region, SetupKeysArgs}, + messages::{MSG_GENERATING_SK_SPINNER, MSG_SK_GENERATED}, +}; + +pub(crate) async fn run(args: SetupKeysArgs, shell: &Shell) -> anyhow::Result<()> { + let args = args.fill_values_with_prompt(); + let ecosystem_config = EcosystemConfig::from_file(shell)?; + + if args.mode == Mode::Generate { + check_prerequisites(shell, &GPU_PREREQUISITES, false); + let link_to_prover = get_link_to_prover(&ecosystem_config); + shell.change_dir(&link_to_prover); + + let spinner = Spinner::new(MSG_GENERATING_SK_SPINNER); + let cmd = Cmd::new(cmd!( + shell, + "cargo run --features gpu --release --bin key_generator -- + generate-sk-gpu all --recompute-if-missing + --setup-path=data/keys + --path={link_to_prover}/data/keys" + )); + cmd.run()?; + spinner.finish(); + logger::outro(MSG_SK_GENERATED); + } else { + check_prerequisites(shell, &GCLOUD_PREREQUISITES, false); + + let link_to_setup_keys = get_link_to_prover(&ecosystem_config).join("data/keys"); + let path_to_keys_buckets = + get_link_to_prover(&ecosystem_config).join("setup-data-gpu-keys.json"); + + let region = args.region.expect("Region is not provided"); + + let file = shell + .read_file(path_to_keys_buckets) + .expect("Could not find commitments file in zksync-era"); + let json: serde_json::Value = + serde_json::from_str(&file).expect("Could not parse commitments.json"); + + let bucket = &match region { + Region::Us => json + .get("us") + .expect("Could not find link to US bucket") + .to_string(), + Region::Europe => json + .get("europe") + .expect("Could not find link to Europe bucket") + .to_string(), + Region::Asia => json + .get("asia") + .expect("Could not find link to Asia bucket") + .to_string(), + }; + + let len = bucket.len() - 2usize; + let bucket = &bucket[1..len]; + + let spinner = Spinner::new(&format!( + "Downloading keys from bucket: {} to {:?}", + bucket, link_to_setup_keys + )); + + let cmd = Cmd::new(cmd!( + shell, + "gsutil -m rsync -r {bucket} {link_to_setup_keys}" + )); + cmd.run()?; + spinner.finish(); + logger::outro("Keys are downloaded"); + } + + Ok(()) +} diff --git a/zk_toolbox/crates/zk_inception/src/messages.rs b/zk_toolbox/crates/zk_inception/src/messages.rs index 30cb422dfca6..25933d39db30 100644 --- a/zk_toolbox/crates/zk_inception/src/messages.rs +++ b/zk_toolbox/crates/zk_inception/src/messages.rs @@ -5,6 +5,10 @@ use ethers::{ utils::format_ether, }; +pub(super) const MSG_SETUP_KEYS_DOWNLOAD_HELP: &str = + "Do you want to download the setup keys or generate them?"; +pub(super) const MSG_SETUP_KEYS_REGION_PROMPT: &str = + "From which region you want setup keys to be downloaded?"; /// Common messages pub(super) const MSG_SELECTED_CONFIG: &str = "Selected config"; pub(super) const MSG_CHAIN_NOT_INITIALIZED: &str = From bc0d7d5935c8f5409a8e53f1c04c5141409aef31 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Wed, 4 Sep 2024 18:23:39 +0300 Subject: [PATCH 023/116] perf(vm): Fix VM performance regression on CI loadtest (#2782) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Fixes VM performance regression on CI loadtest introduced in https://github.com/matter-labs/zksync-era/pull/2760. ## Why ❔ Changes in the VM interface made the VM eagerly clone compressed bytecodes if compression hasn't failed. Compressed bytecodes aren't used during sandboxed VM execution in the API server (the sandbox only checks that compression is successful).For new VMs, bytecodes can be borrowed from the VM state, which is what this PR does using `Cow`. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- .github/workflows/ci-core-reusable.yml | 9 ++++--- core/lib/multivm/src/versions/shadow.rs | 2 +- core/lib/multivm/src/versions/vm_1_3_2/vm.rs | 4 +-- .../vm_1_4_1/bootloader_state/state.rs | 6 ++--- core/lib/multivm/src/versions/vm_1_4_1/vm.rs | 7 +++-- .../vm_1_4_2/bootloader_state/state.rs | 6 ++--- core/lib/multivm/src/versions/vm_1_4_2/vm.rs | 7 +++-- .../bootloader_state/state.rs | 6 ++--- .../src/versions/vm_boojum_integration/vm.rs | 7 +++-- .../vm_fast/bootloader_state/state.rs | 6 ++--- core/lib/multivm/src/versions/vm_fast/vm.rs | 12 ++++----- .../vm_latest/bootloader_state/state.rs | 6 ++--- core/lib/multivm/src/versions/vm_latest/vm.rs | 7 +++-- core/lib/multivm/src/versions/vm_m5/vm.rs | 4 +-- core/lib/multivm/src/versions/vm_m6/vm.rs | 4 +-- .../bootloader_state/state.rs | 6 ++--- .../src/versions/vm_refunds_enhancement/vm.rs | 7 +++-- .../bootloader_state/state.rs | 6 ++--- .../src/versions/vm_virtual_blocks/vm.rs | 7 +++-- core/lib/multivm/src/vm_instance.rs | 2 +- core/lib/vm_executor/src/batch/factory.rs | 9 ++++--- .../src/types/errors/bytecode_compression.rs | 5 +++- core/lib/vm_interface/src/vm.rs | 4 +-- core/lib/web3_decl/src/client/mod.rs | 26 ++++++++++++++----- core/tests/loadnext/src/account/mod.rs | 11 +++++--- core/tests/loadnext/src/account_pool.rs | 2 ++ core/tests/loadnext/src/constants.rs | 4 +-- core/tests/loadnext/src/executor.rs | 6 ++--- core/tests/loadnext/src/sdk/ethereum/mod.rs | 1 + core/tests/loadnext/src/sdk/wallet.rs | 1 + 30 files changed, 118 insertions(+), 72 deletions(-) diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index c6e3dc31d65e..53bd1ab7a518 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -87,8 +87,10 @@ jobs: - name: Loadtest configuration run: | - echo EXPECTED_TX_COUNT=${{ matrix.vm_mode == 'new' && 22000 || 16000 }} >> .env - echo ACCOUNTS_AMOUNT="150" >> .env + echo EXPECTED_TX_COUNT=${{ matrix.vm_mode == 'new' && 21000 || 16000 }} >> .env + echo ACCOUNTS_AMOUNT="100" >> .env + echo MAX_INFLIGHT_TXS="10" >> .env + echo SYNC_API_REQUESTS_LIMIT="15" >> .env echo FAIL_FAST=true >> .env echo IN_DOCKER=1 >> .env echo DATABASE_MERKLE_TREE_MODE=lightweight >> .env @@ -112,7 +114,8 @@ jobs: - name: Run server run: | EXPERIMENTAL_VM_STATE_KEEPER_FAST_VM_MODE=${{ matrix.vm_mode }} \ - PASSED_ENV_VARS="EXPERIMENTAL_VM_STATE_KEEPER_FAST_VM_MODE" \ + CHAIN_MEMPOOL_DELAY_INTERVAL=50 \ + PASSED_ENV_VARS="EXPERIMENTAL_VM_STATE_KEEPER_FAST_VM_MODE,CHAIN_MEMPOOL_DELAY_INTERVAL" \ ci_run zk server --uring --components api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads &>server.log & ci_run sleep 60 diff --git a/core/lib/multivm/src/versions/shadow.rs b/core/lib/multivm/src/versions/shadow.rs index 32a4463c425d..871258f43b85 100644 --- a/core/lib/multivm/src/versions/shadow.rs +++ b/core/lib/multivm/src/versions/shadow.rs @@ -77,7 +77,7 @@ where tracer: Self::TracerDispatcher, tx: Transaction, with_compression: bool, - ) -> (BytecodeCompressionResult, VmExecutionResultAndLogs) { + ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { let tx_hash = tx.hash(); let main_result = self.main.inspect_transaction_with_bytecode_compression( tracer, diff --git a/core/lib/multivm/src/versions/vm_1_3_2/vm.rs b/core/lib/multivm/src/versions/vm_1_3_2/vm.rs index eb1ae45542db..8068e4847b83 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/vm.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/vm.rs @@ -83,7 +83,7 @@ impl VmInterface for Vm { tracer: Self::TracerDispatcher, tx: Transaction, with_compression: bool, - ) -> (BytecodeCompressionResult, VmExecutionResultAndLogs) { + ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { if let Some(storage_invocations) = tracer.storage_invocations { self.vm .execution_mode @@ -156,7 +156,7 @@ impl VmInterface for Vm { result, ) } else { - (Ok(compressed_bytecodes), result) + (Ok(compressed_bytecodes.into()), result) } } diff --git a/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/state.rs b/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/state.rs index 22d7b2814cf6..241054ae0345 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/state.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/state.rs @@ -191,11 +191,11 @@ impl BootloaderState { l2_block.first_tx_index + l2_block.txs.len() } - pub(crate) fn get_last_tx_compressed_bytecodes(&self) -> Vec { + pub(crate) fn get_last_tx_compressed_bytecodes(&self) -> &[CompressedBytecodeInfo] { if let Some(tx) = self.last_l2_block().txs.last() { - tx.compressed_bytecodes.clone() + &tx.compressed_bytecodes } else { - vec![] + &[] } } diff --git a/core/lib/multivm/src/versions/vm_1_4_1/vm.rs b/core/lib/multivm/src/versions/vm_1_4_1/vm.rs index 8e63afd8e1ca..2c1a4ba5e36b 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/vm.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/vm.rs @@ -105,7 +105,7 @@ impl VmInterface for Vm { tracer: Self::TracerDispatcher, tx: Transaction, with_compression: bool, - ) -> (BytecodeCompressionResult, VmExecutionResultAndLogs) { + ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { self.push_transaction_with_compression(tx, with_compression); let result = self.inspect_inner(tracer, VmExecutionMode::OneTx, None); if self.has_unpublished_bytecodes() { @@ -115,7 +115,10 @@ impl VmInterface for Vm { ) } else { ( - Ok(self.bootloader_state.get_last_tx_compressed_bytecodes()), + Ok(self + .bootloader_state + .get_last_tx_compressed_bytecodes() + .into()), result, ) } diff --git a/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/state.rs b/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/state.rs index e692c8a2640d..c0d94bd685c4 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/state.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/state.rs @@ -191,11 +191,11 @@ impl BootloaderState { l2_block.first_tx_index + l2_block.txs.len() } - pub(crate) fn get_last_tx_compressed_bytecodes(&self) -> Vec { + pub(crate) fn get_last_tx_compressed_bytecodes(&self) -> &[CompressedBytecodeInfo] { if let Some(tx) = self.last_l2_block().txs.last() { - tx.compressed_bytecodes.clone() + &tx.compressed_bytecodes } else { - vec![] + &[] } } diff --git a/core/lib/multivm/src/versions/vm_1_4_2/vm.rs b/core/lib/multivm/src/versions/vm_1_4_2/vm.rs index e7a1f69fa424..71633dd3fca3 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/vm.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/vm.rs @@ -105,7 +105,7 @@ impl VmInterface for Vm { tracer: Self::TracerDispatcher, tx: Transaction, with_compression: bool, - ) -> (BytecodeCompressionResult, VmExecutionResultAndLogs) { + ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { self.push_transaction_with_compression(tx, with_compression); let result = self.inspect_inner(tracer, VmExecutionMode::OneTx, None); if self.has_unpublished_bytecodes() { @@ -115,7 +115,10 @@ impl VmInterface for Vm { ) } else { ( - Ok(self.bootloader_state.get_last_tx_compressed_bytecodes()), + Ok(self + .bootloader_state + .get_last_tx_compressed_bytecodes() + .into()), result, ) } diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/state.rs b/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/state.rs index 8a605978a1ed..830fe482320b 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/state.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/state.rs @@ -191,11 +191,11 @@ impl BootloaderState { l2_block.first_tx_index + l2_block.txs.len() } - pub(crate) fn get_last_tx_compressed_bytecodes(&self) -> Vec { + pub(crate) fn get_last_tx_compressed_bytecodes(&self) -> &[CompressedBytecodeInfo] { if let Some(tx) = self.last_l2_block().txs.last() { - tx.compressed_bytecodes.clone() + &tx.compressed_bytecodes } else { - vec![] + &[] } } diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs b/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs index 4b6b6931dd22..c7b4a5537acb 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs @@ -106,7 +106,7 @@ impl VmInterface for Vm { tracer: Self::TracerDispatcher, tx: Transaction, with_compression: bool, - ) -> (BytecodeCompressionResult, VmExecutionResultAndLogs) { + ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { self.push_transaction_with_compression(tx, with_compression); let result = self.inspect_inner(tracer, VmExecutionMode::OneTx); if self.has_unpublished_bytecodes() { @@ -116,7 +116,10 @@ impl VmInterface for Vm { ) } else { ( - Ok(self.bootloader_state.get_last_tx_compressed_bytecodes()), + Ok(self + .bootloader_state + .get_last_tx_compressed_bytecodes() + .into()), result, ) } diff --git a/core/lib/multivm/src/versions/vm_fast/bootloader_state/state.rs b/core/lib/multivm/src/versions/vm_fast/bootloader_state/state.rs index ce37636d2cda..15b4daf02a77 100644 --- a/core/lib/multivm/src/versions/vm_fast/bootloader_state/state.rs +++ b/core/lib/multivm/src/versions/vm_fast/bootloader_state/state.rs @@ -189,11 +189,11 @@ impl BootloaderState { l2_block.first_tx_index + l2_block.txs.len() } - pub(crate) fn get_last_tx_compressed_bytecodes(&self) -> Vec { + pub(crate) fn get_last_tx_compressed_bytecodes(&self) -> &[CompressedBytecodeInfo] { if let Some(tx) = self.last_l2_block().txs.last() { - tx.compressed_bytecodes.clone() + &tx.compressed_bytecodes } else { - vec![] + &[] } } diff --git a/core/lib/multivm/src/versions/vm_fast/vm.rs b/core/lib/multivm/src/versions/vm_fast/vm.rs index 4bb570c0275a..d8816cfaf2a6 100644 --- a/core/lib/multivm/src/versions/vm_fast/vm.rs +++ b/core/lib/multivm/src/versions/vm_fast/vm.rs @@ -31,7 +31,7 @@ use super::{ use crate::{ glue::GlueInto, interface::{ - storage::ReadStorage, BytecodeCompressionError, CompressedBytecodeInfo, + storage::ReadStorage, BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, ExecutionResult, FinishedL1Batch, Halt, L1BatchEnv, L2BlockEnv, Refunds, SystemEnv, TxRevertReason, VmEvent, VmExecutionLogs, VmExecutionMode, VmExecutionResultAndLogs, VmExecutionStatistics, VmInterface, VmInterfaceHistoryEnabled, @@ -585,17 +585,17 @@ impl VmInterface for Vm { (): Self::TracerDispatcher, tx: zksync_types::Transaction, with_compression: bool, - ) -> ( - Result, BytecodeCompressionError>, - VmExecutionResultAndLogs, - ) { + ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { self.push_transaction_inner(tx, 0, with_compression); let result = self.inspect((), VmExecutionMode::OneTx); let compression_result = if self.has_unpublished_bytecodes() { Err(BytecodeCompressionError::BytecodeCompressionFailed) } else { - Ok(self.bootloader_state.get_last_tx_compressed_bytecodes()) + Ok(self + .bootloader_state + .get_last_tx_compressed_bytecodes() + .into()) }; (compression_result, result) } diff --git a/core/lib/multivm/src/versions/vm_latest/bootloader_state/state.rs b/core/lib/multivm/src/versions/vm_latest/bootloader_state/state.rs index f15199a74f84..4ba27b14bad6 100644 --- a/core/lib/multivm/src/versions/vm_latest/bootloader_state/state.rs +++ b/core/lib/multivm/src/versions/vm_latest/bootloader_state/state.rs @@ -191,11 +191,11 @@ impl BootloaderState { l2_block.first_tx_index + l2_block.txs.len() } - pub(crate) fn get_last_tx_compressed_bytecodes(&self) -> Vec { + pub(crate) fn get_last_tx_compressed_bytecodes(&self) -> &[CompressedBytecodeInfo] { if let Some(tx) = self.last_l2_block().txs.last() { - tx.compressed_bytecodes.clone() + &tx.compressed_bytecodes } else { - vec![] + &[] } } diff --git a/core/lib/multivm/src/versions/vm_latest/vm.rs b/core/lib/multivm/src/versions/vm_latest/vm.rs index c0c13669c2ef..a445a1d51402 100644 --- a/core/lib/multivm/src/versions/vm_latest/vm.rs +++ b/core/lib/multivm/src/versions/vm_latest/vm.rs @@ -141,7 +141,7 @@ impl VmInterface for Vm { tracer: Self::TracerDispatcher, tx: Transaction, with_compression: bool, - ) -> (BytecodeCompressionResult, VmExecutionResultAndLogs) { + ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { self.push_transaction_with_compression(tx, with_compression); let result = self.inspect_inner(tracer, VmExecutionMode::OneTx, None); if self.has_unpublished_bytecodes() { @@ -151,7 +151,10 @@ impl VmInterface for Vm { ) } else { ( - Ok(self.bootloader_state.get_last_tx_compressed_bytecodes()), + Ok(self + .bootloader_state + .get_last_tx_compressed_bytecodes() + .into()), result, ) } diff --git a/core/lib/multivm/src/versions/vm_m5/vm.rs b/core/lib/multivm/src/versions/vm_m5/vm.rs index 4282f3f0cf4a..df4baccaf156 100644 --- a/core/lib/multivm/src/versions/vm_m5/vm.rs +++ b/core/lib/multivm/src/versions/vm_m5/vm.rs @@ -93,14 +93,14 @@ impl VmInterface for Vm { _tracer: Self::TracerDispatcher, tx: Transaction, _with_compression: bool, - ) -> (BytecodeCompressionResult, VmExecutionResultAndLogs) { + ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { crate::vm_m5::vm_with_bootloader::push_transaction_to_bootloader_memory( &mut self.vm, &tx, self.system_env.execution_mode.glue_into(), ); // Bytecode compression isn't supported - (Ok(vec![]), self.inspect((), VmExecutionMode::OneTx)) + (Ok(vec![].into()), self.inspect((), VmExecutionMode::OneTx)) } fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { diff --git a/core/lib/multivm/src/versions/vm_m6/vm.rs b/core/lib/multivm/src/versions/vm_m6/vm.rs index 520abd930555..7e19076a5202 100644 --- a/core/lib/multivm/src/versions/vm_m6/vm.rs +++ b/core/lib/multivm/src/versions/vm_m6/vm.rs @@ -109,7 +109,7 @@ impl VmInterface for Vm { tracer: Self::TracerDispatcher, tx: Transaction, with_compression: bool, - ) -> (BytecodeCompressionResult, VmExecutionResultAndLogs) { + ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { if let Some(storage_invocations) = tracer.storage_invocations { self.vm .execution_mode @@ -182,7 +182,7 @@ impl VmInterface for Vm { result, ) } else { - (Ok(compressed_bytecodes), result) + (Ok(compressed_bytecodes.into()), result) } } diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/state.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/state.rs index 12aab3c7364c..b428851c9383 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/state.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/state.rs @@ -167,11 +167,11 @@ impl BootloaderState { l2_block.first_tx_index + l2_block.txs.len() } - pub(crate) fn get_last_tx_compressed_bytecodes(&self) -> Vec { + pub(crate) fn get_last_tx_compressed_bytecodes(&self) -> &[CompressedBytecodeInfo] { if let Some(tx) = self.last_l2_block().txs.last() { - tx.compressed_bytecodes.clone() + &tx.compressed_bytecodes } else { - vec![] + &[] } } diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs index 2aa3ba05e662..119abf052b9f 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs @@ -99,7 +99,7 @@ impl VmInterface for Vm { dispatcher: Self::TracerDispatcher, tx: Transaction, with_compression: bool, - ) -> (BytecodeCompressionResult, VmExecutionResultAndLogs) { + ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { self.push_transaction_with_compression(tx, with_compression); let result = self.inspect(dispatcher, VmExecutionMode::OneTx); if self.has_unpublished_bytecodes() { @@ -109,7 +109,10 @@ impl VmInterface for Vm { ) } else { ( - Ok(self.bootloader_state.get_last_tx_compressed_bytecodes()), + Ok(self + .bootloader_state + .get_last_tx_compressed_bytecodes() + .into()), result, ) } diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/state.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/state.rs index 562d74513710..7e9af0ed6b82 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/state.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/state.rs @@ -167,11 +167,11 @@ impl BootloaderState { l2_block.first_tx_index + l2_block.txs.len() } - pub(crate) fn get_last_tx_compressed_bytecodes(&self) -> Vec { + pub(crate) fn get_last_tx_compressed_bytecodes(&self) -> &[CompressedBytecodeInfo] { if let Some(tx) = self.last_l2_block().txs.last() { - tx.compressed_bytecodes.clone() + &tx.compressed_bytecodes } else { - vec![] + &[] } } diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs index 6080df2bf2f1..0ecdd6797f4b 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs @@ -99,7 +99,7 @@ impl VmInterface for Vm { tracer: TracerDispatcher, tx: Transaction, with_compression: bool, - ) -> (BytecodeCompressionResult, VmExecutionResultAndLogs) { + ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { self.push_transaction_with_compression(tx, with_compression); let result = self.inspect_inner(tracer, VmExecutionMode::OneTx); if self.has_unpublished_bytecodes() { @@ -109,7 +109,10 @@ impl VmInterface for Vm { ) } else { ( - Ok(self.bootloader_state.get_last_tx_compressed_bytecodes()), + Ok(self + .bootloader_state + .get_last_tx_compressed_bytecodes() + .into()), result, ) } diff --git a/core/lib/multivm/src/vm_instance.rs b/core/lib/multivm/src/vm_instance.rs index 0fc626d9ac48..cedb4bc8276d 100644 --- a/core/lib/multivm/src/vm_instance.rs +++ b/core/lib/multivm/src/vm_instance.rs @@ -74,7 +74,7 @@ impl VmInterface for VmInstance { dispatcher: Self::TracerDispatcher, tx: zksync_types::Transaction, with_compression: bool, - ) -> (BytecodeCompressionResult, VmExecutionResultAndLogs) { + ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { dispatch_vm!(self.inspect_transaction_with_bytecode_compression( dispatcher.into(), tx, diff --git a/core/lib/vm_executor/src/batch/factory.rs b/core/lib/vm_executor/src/batch/factory.rs index 17b125b0c41a..d02014584467 100644 --- a/core/lib/vm_executor/src/batch/factory.rs +++ b/core/lib/vm_executor/src/batch/factory.rs @@ -250,7 +250,7 @@ impl CommandReceiver { .unwrap_or_default(); return Ok(BatchTransactionExecutionResult { tx_result: Box::new(tx_result), - compressed_bytecodes, + compressed_bytecodes: compressed_bytecodes.into_owned(), call_traces, }); } @@ -269,8 +269,9 @@ impl CommandReceiver { let (compression_result, tx_result) = vm.inspect_transaction_with_bytecode_compression(tracer.into(), tx.clone(), false); - let compressed_bytecodes = - compression_result.context("compression failed when it wasn't applied")?; + let compressed_bytecodes = compression_result + .context("compression failed when it wasn't applied")? + .into_owned(); // TODO implement tracer manager which will be responsible // for collecting result from all tracers and save it to the database @@ -308,7 +309,7 @@ impl CommandReceiver { .unwrap_or_default(); Ok(BatchTransactionExecutionResult { tx_result: Box::new(tx_result), - compressed_bytecodes, + compressed_bytecodes: compressed_bytecodes.into_owned(), call_traces, }) } else { diff --git a/core/lib/vm_interface/src/types/errors/bytecode_compression.rs b/core/lib/vm_interface/src/types/errors/bytecode_compression.rs index 1dd69dc7398d..c0c6e8737bbe 100644 --- a/core/lib/vm_interface/src/types/errors/bytecode_compression.rs +++ b/core/lib/vm_interface/src/types/errors/bytecode_compression.rs @@ -1,3 +1,5 @@ +use std::borrow::Cow; + use crate::CompressedBytecodeInfo; /// Errors related to bytecode compression. @@ -9,4 +11,5 @@ pub enum BytecodeCompressionError { } /// Result of compressing bytecodes used by a transaction. -pub type BytecodeCompressionResult = Result, BytecodeCompressionError>; +pub type BytecodeCompressionResult<'a> = + Result, BytecodeCompressionError>; diff --git a/core/lib/vm_interface/src/vm.rs b/core/lib/vm_interface/src/vm.rs index b6be2c7581f7..f70be52bd86a 100644 --- a/core/lib/vm_interface/src/vm.rs +++ b/core/lib/vm_interface/src/vm.rs @@ -41,7 +41,7 @@ pub trait VmInterface { tracer: Self::TracerDispatcher, tx: Transaction, with_compression: bool, - ) -> (BytecodeCompressionResult, VmExecutionResultAndLogs); + ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs); /// Record VM memory metrics. fn record_vm_memory_metrics(&self) -> VmMemoryMetrics; @@ -63,7 +63,7 @@ pub trait VmInterfaceExt: VmInterface { &mut self, tx: Transaction, with_compression: bool, - ) -> (BytecodeCompressionResult, VmExecutionResultAndLogs) { + ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { self.inspect_transaction_with_bytecode_compression( Self::TracerDispatcher::default(), tx, diff --git a/core/lib/web3_decl/src/client/mod.rs b/core/lib/web3_decl/src/client/mod.rs index a8246216eca3..7f0de4f3bca9 100644 --- a/core/lib/web3_decl/src/client/mod.rs +++ b/core/lib/web3_decl/src/client/mod.rs @@ -318,6 +318,7 @@ pub struct ClientBuilder { client: C, url: SensitiveUrl, rate_limit: (usize, Duration), + report_config: bool, network: Net, } @@ -328,6 +329,7 @@ impl fmt::Debug for ClientBuilder { .field("client", &any::type_name::()) .field("url", &self.url) .field("rate_limit", &self.rate_limit) + .field("report_config", &self.report_config) .field("network", &self.network) .finish_non_exhaustive() } @@ -340,6 +342,7 @@ impl ClientBuilder { client, url, rate_limit: (1, Duration::ZERO), + report_config: true, network: Net::default(), } } @@ -366,16 +369,25 @@ impl ClientBuilder { self } + /// Allows switching off config reporting for this client in logs and metrics. This is useful if a client is a short-living one + /// and is not injected as a dependency. + pub fn report_config(mut self, report: bool) -> Self { + self.report_config = report; + self + } + /// Builds the client. pub fn build(self) -> Client { - tracing::info!( - "Creating JSON-RPC client for network {:?} with inner client: {:?} and rate limit: {:?}", - self.network, - self.client, - self.rate_limit - ); let rate_limit = SharedRateLimit::new(self.rate_limit.0, self.rate_limit.1); - METRICS.observe_config(self.network.metric_label(), &rate_limit); + if self.report_config { + tracing::info!( + "Creating JSON-RPC client for network {:?} with inner client: {:?} and rate limit: {:?}", + self.network, + self.client, + self.rate_limit + ); + METRICS.observe_config(self.network.metric_label(), &rate_limit); + } Client { inner: self.client, diff --git a/core/tests/loadnext/src/account/mod.rs b/core/tests/loadnext/src/account/mod.rs index 5dcd5167165e..0f418bf12676 100644 --- a/core/tests/loadnext/src/account/mod.rs +++ b/core/tests/loadnext/src/account/mod.rs @@ -5,6 +5,7 @@ use std::{ }; use futures::{channel::mpsc, SinkExt}; +use rand::Rng; use tokio::sync::RwLock; use zksync_contracts::test_contracts::LoadnextContractExecutionParams; use zksync_types::{api::TransactionReceipt, Address, Nonce, H256, U256, U64}; @@ -75,6 +76,8 @@ pub struct AccountLifespan { inflight_txs: VecDeque, /// Current account nonce, it is None at the beginning and will be set after the first transaction current_nonce: Option, + /// Randomly assigned polling interval. + polling_interval: Duration, } impl AccountLifespan { @@ -82,11 +85,12 @@ impl AccountLifespan { config: &LoadtestConfig, contract_execution_params: LoadnextContractExecutionParams, addresses: AddressPool, - test_account: TestWallet, + mut test_account: TestWallet, report_sink: mpsc::Sender, main_l2_token: Address, paymaster_address: Address, ) -> Self { + let polling_interval = test_account.rng.gen_range(POLLING_INTERVAL); Self { wallet: test_account, config: config.clone(), @@ -99,6 +103,7 @@ impl AccountLifespan { report_sink, inflight_txs: Default::default(), current_nonce: None, + polling_interval, } } @@ -132,7 +137,7 @@ impl AccountLifespan { self.execute_command(deploy_command.clone()).await?; self.wait_for_all_inflight_tx().await?; - let mut timer = tokio::time::interval(POLLING_INTERVAL); + let mut timer = tokio::time::interval(self.polling_interval); let mut l1_tx_count = 0; loop { let command = self.generate_command(); @@ -157,7 +162,7 @@ impl AccountLifespan { } async fn wait_for_all_inflight_tx(&mut self) -> Result<(), Aborted> { - let mut timer = tokio::time::interval(POLLING_INTERVAL); + let mut timer = tokio::time::interval(self.polling_interval); while !self.inflight_txs.is_empty() { timer.tick().await; self.check_inflight_txs().await?; diff --git a/core/tests/loadnext/src/account_pool.rs b/core/tests/loadnext/src/account_pool.rs index 7b5e277e139b..3fa3141553cd 100644 --- a/core/tests/loadnext/src/account_pool.rs +++ b/core/tests/loadnext/src/account_pool.rs @@ -101,7 +101,9 @@ impl AccountPool { .context("invalid L2 RPC URL")?, )? .for_network(l2_chain_id.into()) + .report_config(false) .build(); + // Perform a health check: check whether ZKsync server is alive. let mut server_alive = false; for _ in 0usize..3 { diff --git a/core/tests/loadnext/src/constants.rs b/core/tests/loadnext/src/constants.rs index 7ac66ab7e1e7..6b989b16feb1 100644 --- a/core/tests/loadnext/src/constants.rs +++ b/core/tests/loadnext/src/constants.rs @@ -1,4 +1,4 @@ -use std::time::Duration; +use std::{ops, time::Duration}; /// Normally, block is committed on Ethereum every 15 seconds; however there are no guarantees that our transaction /// will be included in the next block right after sending. @@ -14,7 +14,7 @@ pub const ETH_POLLING_INTERVAL: Duration = Duration::from_secs(10); pub const COMMIT_TIMEOUT: Duration = Duration::from_secs(600); /// We don't want to overload the server with too many requests; given the fact that blocks are expected to be created /// every couple of seconds, chosen value seems to be adequate to provide the result in one or two calls at average. -pub const POLLING_INTERVAL: Duration = Duration::from_secs(3); +pub const POLLING_INTERVAL: ops::Range = Duration::from_secs(2)..Duration::from_secs(3); pub const MAX_OUTSTANDING_NONCE: usize = 20; diff --git a/core/tests/loadnext/src/executor.rs b/core/tests/loadnext/src/executor.rs index a573583ed318..43a1be164b64 100644 --- a/core/tests/loadnext/src/executor.rs +++ b/core/tests/loadnext/src/executor.rs @@ -244,7 +244,7 @@ impl Executor { }); priority_op_handle - .polling_interval(POLLING_INTERVAL) + .polling_interval(POLLING_INTERVAL.end) .unwrap(); priority_op_handle .commit_timeout(COMMIT_TIMEOUT) @@ -313,7 +313,7 @@ impl Executor { }); priority_op_handle - .polling_interval(POLLING_INTERVAL) + .polling_interval(POLLING_INTERVAL.end) .unwrap(); priority_op_handle .commit_timeout(COMMIT_TIMEOUT) @@ -463,7 +463,7 @@ impl Executor { // Wait for transactions to be committed, if at least one of them fails, // return error. for mut handle in handles { - handle.polling_interval(POLLING_INTERVAL).unwrap(); + handle.polling_interval(POLLING_INTERVAL.end).unwrap(); let result = handle .commit_timeout(COMMIT_TIMEOUT) diff --git a/core/tests/loadnext/src/sdk/ethereum/mod.rs b/core/tests/loadnext/src/sdk/ethereum/mod.rs index 4b7bb00a3080..4557c2c43200 100644 --- a/core/tests/loadnext/src/sdk/ethereum/mod.rs +++ b/core/tests/loadnext/src/sdk/ethereum/mod.rs @@ -102,6 +102,7 @@ impl EthereumProvider { let query_client = Client::http(eth_web3_url) .map_err(|err| ClientError::NetworkError(err.to_string()))? .for_network(sl_chain_id.into()) + .report_config(false) .build(); let query_client: Box> = Box::new(query_client); let eth_client = SigningClient::new( diff --git a/core/tests/loadnext/src/sdk/wallet.rs b/core/tests/loadnext/src/sdk/wallet.rs index 9d3bd73a9bf2..551d0d8e385f 100644 --- a/core/tests/loadnext/src/sdk/wallet.rs +++ b/core/tests/loadnext/src/sdk/wallet.rs @@ -45,6 +45,7 @@ where let client = Client::http(rpc_address) .map_err(|err| ClientError::NetworkError(err.to_string()))? .for_network(signer.chain_id.into()) + .report_config(false) .build(); Ok(Wallet { From 9821a20018c367ce246dba656daab5c2e7757973 Mon Sep 17 00:00:00 2001 From: Alexander Melnikov Date: Wed, 4 Sep 2024 09:58:00 -0600 Subject: [PATCH 024/116] fix(zk_toolbox): fix port offset for new chains (#2803) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Fixed port offset for newly created chains via `zk_inception`: - Use `chain.id` instead of `chain.chain_id` - Use `(chain.id - 1) * 100` as an offset to keep the port for the first chain as 3050 ## Why ❔ Using `chain.chain_id` was not intended as the resulting port number could potentially overflow. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- zk_toolbox/crates/config/src/general.rs | 2 +- .../crates/zk_inception/src/commands/chain/args/init.rs | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/zk_toolbox/crates/config/src/general.rs b/zk_toolbox/crates/config/src/general.rs index 3426b21c6f6e..bcbe69e47196 100644 --- a/zk_toolbox/crates/config/src/general.rs +++ b/zk_toolbox/crates/config/src/general.rs @@ -127,7 +127,7 @@ pub fn update_ports(config: &mut GeneralConfig, ports_config: &PortsConfig) -> a let prometheus = config .prometheus_config .as_mut() - .context("Contract Verifier config is not presented")?; + .context("Prometheus config is not presented")?; api.web3_json_rpc.http_port = ports_config.web3_json_rpc_http_port; update_port_in_url( diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/args/init.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/args/init.rs index 2253eeb314ef..9dd6c490bd78 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/args/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/args/init.rs @@ -22,7 +22,7 @@ pub struct PortOffset(u16); impl PortOffset { pub fn from_chain_id(chain_id: u16) -> Self { - Self(chain_id * 100) + Self((chain_id - 1) * 100) } } @@ -88,7 +88,7 @@ impl InitArgs { l1_rpc_url, port_offset: self .port_offset - .unwrap_or(PortOffset::from_chain_id(config.chain_id.as_u64() as u16)) + .unwrap_or(PortOffset::from_chain_id(config.id as u16)) .into(), } } From 1139e029e8bb2abf6011bffacc5e55ab896cc3a3 Mon Sep 17 00:00:00 2001 From: Igor Aleksanov Date: Thu, 5 Sep 2024 14:13:02 +0400 Subject: [PATCH 025/116] refactor(prover_keystore): Remove cached commitments function (#2805) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ - Removes `get_cached_commitments` function from keystore, as it didn't in fact cache anything. - Improve interfaces for commitment generation and checking the scheduler vk hash. - Generalizes the alignment check. - Remove `zksync_types` from `zksync_prover_keystore` deps. ## Why ❔ Readability. --- prover/Cargo.lock | 2 +- .../src/commitment_generator.rs | 6 +- .../src/main.rs | 4 +- .../crates/bin/witness_generator/src/main.rs | 74 +++++++---- prover/crates/lib/keystore/Cargo.toml | 2 +- .../lib/keystore/src/commitment_utils.rs | 125 ++++++++---------- prover/crates/lib/keystore/src/keystore.rs | 2 +- prover/crates/lib/keystore/src/utils.rs | 2 +- 8 files changed, 109 insertions(+), 108 deletions(-) diff --git a/prover/Cargo.lock b/prover/Cargo.lock index f5c8ea5549a6..bc7d7e3693ad 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -8235,10 +8235,10 @@ dependencies = [ "shivini", "tracing", "zkevm_test_harness", + "zksync_basic_types", "zksync_config", "zksync_env_config", "zksync_prover_fri_types", - "zksync_types", "zksync_utils", ] diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/src/commitment_generator.rs b/prover/crates/bin/vk_setup_data_generator_server_fri/src/commitment_generator.rs index 8c2a17590099..ec4bbb77ba6e 100644 --- a/prover/crates/bin/vk_setup_data_generator_server_fri/src/commitment_generator.rs +++ b/prover/crates/bin/vk_setup_data_generator_server_fri/src/commitment_generator.rs @@ -1,5 +1,5 @@ use anyhow::Context; -use zksync_prover_keystore::{commitment_utils::generate_commitments, keystore::Keystore}; +use zksync_prover_keystore::keystore::Keystore; use crate::vk_commitment_helper::{ get_toml_formatted_value, read_contract_toml, write_contract_toml, @@ -7,7 +7,9 @@ use crate::vk_commitment_helper::{ pub fn read_and_update_contract_toml(keystore: &Keystore, dryrun: bool) -> anyhow::Result<()> { let mut contract_doc = read_contract_toml().context("read_contract_toml()")?; - let vk_commitments = generate_commitments(keystore).context("generate_commitments()")?; + let vk_commitments = keystore + .generate_commitments() + .context("generate_commitments()")?; contract_doc["contracts"]["FRI_RECURSION_LEAF_LEVEL_VK_HASH"] = get_toml_formatted_value(vk_commitments.leaf); diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/src/main.rs b/prover/crates/bin/vk_setup_data_generator_server_fri/src/main.rs index 313678bc5da8..aa359720ab44 100644 --- a/prover/crates/bin/vk_setup_data_generator_server_fri/src/main.rs +++ b/prover/crates/bin/vk_setup_data_generator_server_fri/src/main.rs @@ -24,7 +24,6 @@ use zksync_prover_fri_types::{ ProverServiceDataKey, }; use zksync_prover_keystore::{ - commitment_utils::generate_commitments, keystore::Keystore, setup_data_generator::{CPUSetupDataGenerator, GPUSetupDataGenerator, SetupDataGenerator}, }; @@ -98,7 +97,8 @@ fn generate_vks(keystore: &Keystore, jobs: usize, quiet: bool) -> anyhow::Result } // Let's also update the commitments file. - keystore.save_commitments(&generate_commitments(keystore)?) + let commitments = keystore.generate_commitments()?; + keystore.save_commitments(&commitments) } #[derive(Debug, Parser)] diff --git a/prover/crates/bin/witness_generator/src/main.rs b/prover/crates/bin/witness_generator/src/main.rs index 0e304b46cf74..2dca22c24579 100644 --- a/prover/crates/bin/witness_generator/src/main.rs +++ b/prover/crates/bin/witness_generator/src/main.rs @@ -14,9 +14,9 @@ use zksync_env_config::object_store::ProverObjectStoreConfig; use zksync_object_store::ObjectStoreFactory; use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; use zksync_prover_fri_types::PROVER_PROTOCOL_SEMANTIC_VERSION; -use zksync_prover_keystore::commitment_utils::get_cached_commitments; +use zksync_prover_keystore::keystore::Keystore; use zksync_queued_job_processor::JobProcessor; -use zksync_types::basic_fri_types::AggregationRound; +use zksync_types::{basic_fri_types::AggregationRound, protocol_version::ProtocolSemanticVersion}; use zksync_utils::wait_for_tasks::ManagedTasks; use zksync_vlog::prometheus::PrometheusExporterConfig; use zksync_witness_generator::{ @@ -54,6 +54,43 @@ struct Opt { secrets_path: Option, } +/// Checks if the configuration locally matches the one in the database. +/// This function recalculates the commitment in order to check the exact code that +/// will run, instead of loading `commitments.json` (which also may correct misaligned +/// information). +async fn ensure_protocol_alignment( + prover_pool: &ConnectionPool, + protocol_version: ProtocolSemanticVersion, + setup_data_path: String, +) -> anyhow::Result<()> { + tracing::info!("Verifying protocol alignment for {:?}", protocol_version); + let vk_commitments_in_db = match prover_pool + .connection() + .await + .unwrap() + .fri_protocol_versions_dal() + .vk_commitments_for(protocol_version) + .await + { + Some(commitments) => commitments, + None => { + panic!( + "No vk commitments available in database for a protocol version {:?}.", + protocol_version + ); + } + }; + let keystore = Keystore::new_with_setup_data_path(setup_data_path); + // `recursion_scheduler_level_vk_hash` actually stores `scheduler_vk_hash` for historical reasons. + let scheduler_vk_hash = vk_commitments_in_db.recursion_scheduler_level_vk_hash; + keystore + .verify_scheduler_vk_hash(scheduler_vk_hash) + .with_context(|| + format!("VK commitments didn't match commitments from DB for protocol version {protocol_version:?}") + )?; + Ok(()) +} + #[tokio::main] async fn main() -> anyhow::Result<()> { let opt = Opt::from_args(); @@ -103,22 +140,13 @@ async fn main() -> anyhow::Result<()> { let (stop_sender, stop_receiver) = watch::channel(false); let protocol_version = PROVER_PROTOCOL_SEMANTIC_VERSION; - let vk_commitments_in_db = match prover_connection_pool - .connection() - .await - .unwrap() - .fri_protocol_versions_dal() - .vk_commitments_for(protocol_version) - .await - { - Some(commitments) => commitments, - None => { - panic!( - "No vk commitments available in database for a protocol version {:?}.", - protocol_version - ); - } - }; + ensure_protocol_alignment( + &prover_connection_pool, + protocol_version, + prover_config.setup_data_path.clone(), + ) + .await + .unwrap_or_else(|err| panic!("Protocol alignment check failed: {:?}", err)); let rounds = match (opt.round, opt.all_rounds) { (Some(round), false) => vec![round], @@ -171,16 +199,6 @@ async fn main() -> anyhow::Result<()> { let witness_generator_task = match round { AggregationRound::BasicCircuits => { - let start = Instant::now(); - let vk_commitments = get_cached_commitments(Some(setup_data_path.clone())); - let end = start.elapsed(); - tracing::info!("Calculating commitment took: {:?}", end); - assert_eq!( - vk_commitments, - vk_commitments_in_db, - "VK commitments didn't match commitments from DB for protocol version {protocol_version:?}. Cached commitments: {vk_commitments:?}, commitments in database: {vk_commitments_in_db:?}" - ); - let public_blob_store = match config.shall_save_to_public_bucket { false => None, true => Some( diff --git a/prover/crates/lib/keystore/Cargo.toml b/prover/crates/lib/keystore/Cargo.toml index 41e9f0244f69..423df468d0b6 100644 --- a/prover/crates/lib/keystore/Cargo.toml +++ b/prover/crates/lib/keystore/Cargo.toml @@ -11,7 +11,7 @@ categories.workspace = true [dependencies] -zksync_types.workspace = true +zksync_basic_types.workspace = true zksync_utils.workspace = true zksync_prover_fri_types.workspace = true zkevm_test_harness.workspace = true diff --git a/prover/crates/lib/keystore/src/commitment_utils.rs b/prover/crates/lib/keystore/src/commitment_utils.rs index 792efba35adc..6973f86bf41e 100644 --- a/prover/crates/lib/keystore/src/commitment_utils.rs +++ b/prover/crates/lib/keystore/src/commitment_utils.rs @@ -1,16 +1,15 @@ -use std::{str::FromStr, sync::Mutex}; +use std::str::FromStr; use anyhow::Context as _; use hex::ToHex; -use once_cell::sync::Lazy; use zkevm_test_harness::witness::recursive_aggregation::{ compute_leaf_vks_and_params_commitment, compute_node_vk_commitment, }; +use zksync_basic_types::H256; use zksync_prover_fri_types::circuit_definitions::{ boojum::field::goldilocks::GoldilocksField, circuit_definitions::recursion_layer::ZkSyncRecursionLayerStorageType, }; -use zksync_types::{protocol_version::L1VerifierConfig, H256}; use crate::{ keystore::Keystore, @@ -18,80 +17,62 @@ use crate::{ VkCommitments, }; -static KEYSTORE: Lazy>> = Lazy::new(|| Mutex::new(None)); +impl Keystore { + pub fn generate_commitments(&self) -> anyhow::Result { + let leaf_vk_params = get_leaf_vk_params(self).context("get_leaf_vk_params()")?; + let leaf_layer_params = leaf_vk_params + .iter() + .map(|el| el.1.clone()) + .collect::>() + .try_into() + .unwrap(); + let leaf_vk_commitment = compute_leaf_vks_and_params_commitment(leaf_layer_params); -fn circuit_commitments(keystore: &Keystore) -> anyhow::Result { - let commitments = generate_commitments(keystore).context("generate_commitments()")?; - Ok(L1VerifierConfig { - // Instead of loading the FRI scheduler verification key here, - // we load the SNARK-wrapper verification key. - // This is due to the fact that these keys are used only for picking the - // prover jobs / witgen jobs from the DB. The keys are matched with the ones in - // `prover_fri_protocol_versions` table, which has the SNARK-wrapper verification key. - // This is OK because if the FRI VK changes, the SNARK-wrapper VK will change as well. - recursion_scheduler_level_vk_hash: H256::from_str(&commitments.snark_wrapper) - .context("invalid SNARK wrapper VK")?, - }) -} - -pub fn generate_commitments(keystore: &Keystore) -> anyhow::Result { - let leaf_vk_params = get_leaf_vk_params(keystore).context("get_leaf_vk_params()")?; - let leaf_layer_params = leaf_vk_params - .iter() - .map(|el| el.1.clone()) - .collect::>() - .try_into() - .unwrap(); - let leaf_vk_commitment = compute_leaf_vks_and_params_commitment(leaf_layer_params); + let node_vk = self + .load_recursive_layer_verification_key( + ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8, + ) + .context("get_recursive_layer_vk_for_circuit_type(NodeLayerCircuit)")?; + let node_vk_commitment = compute_node_vk_commitment(node_vk.clone()); - let node_vk = keystore - .load_recursive_layer_verification_key( - ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8, - ) - .context("get_recursive_layer_vk_for_circuit_type(NodeLayerCircuit)")?; - let node_vk_commitment = compute_node_vk_commitment(node_vk.clone()); + let scheduler_vk = self + .load_recursive_layer_verification_key( + ZkSyncRecursionLayerStorageType::SchedulerCircuit as u8, + ) + .context("get_recursive_layer_vk_for_circuit_type(SchedulerCircuit)")?; + let scheduler_vk_commitment = compute_node_vk_commitment(scheduler_vk.clone()); - let scheduler_vk = keystore - .load_recursive_layer_verification_key( - ZkSyncRecursionLayerStorageType::SchedulerCircuit as u8, - ) - .context("get_recursive_layer_vk_for_circuit_type(SchedulerCircuit)")?; - let scheduler_vk_commitment = compute_node_vk_commitment(scheduler_vk.clone()); + let hex_concatenator = |hex_array: [GoldilocksField; 4]| { + "0x".to_owned() + + &hex_array + .iter() + .map(|x| format!("{:016x}", x.0)) + .collect::>() + .join("") + }; - let hex_concatenator = |hex_array: [GoldilocksField; 4]| { - "0x".to_owned() - + &hex_array - .iter() - .map(|x| format!("{:016x}", x.0)) - .collect::>() - .join("") - }; + let leaf_aggregation_commitment_hex = hex_concatenator(leaf_vk_commitment); + let node_aggregation_commitment_hex = hex_concatenator(node_vk_commitment); + let scheduler_commitment_hex = hex_concatenator(scheduler_vk_commitment); + let snark_vk_hash: String = calculate_snark_vk_hash(self)?.encode_hex(); - let leaf_aggregation_commitment_hex = hex_concatenator(leaf_vk_commitment); - let node_aggregation_commitment_hex = hex_concatenator(node_vk_commitment); - let scheduler_commitment_hex = hex_concatenator(scheduler_vk_commitment); - let snark_vk_hash: String = calculate_snark_vk_hash(keystore)?.encode_hex(); - - let result = VkCommitments { - leaf: leaf_aggregation_commitment_hex, - node: node_aggregation_commitment_hex, - scheduler: scheduler_commitment_hex, - snark_wrapper: format!("0x{}", snark_vk_hash), - }; - tracing::info!("Commitments: {:?}", result); - Ok(result) -} - -pub fn get_cached_commitments(setup_data_path: Option) -> L1VerifierConfig { - if let Some(setup_data_path) = setup_data_path { - let keystore = Keystore::new_with_setup_data_path(setup_data_path); - let mut keystore_lock = KEYSTORE.lock().unwrap(); - *keystore_lock = Some(keystore); + let result = VkCommitments { + leaf: leaf_aggregation_commitment_hex, + node: node_aggregation_commitment_hex, + scheduler: scheduler_commitment_hex, + snark_wrapper: format!("0x{}", snark_vk_hash), + }; + tracing::info!("Commitments: {:?}", result); + Ok(result) } - let keystore = KEYSTORE.lock().unwrap().clone().unwrap_or_default(); - let commitments = circuit_commitments(&keystore).unwrap(); - - tracing::info!("Using cached commitments {:?}", commitments); - commitments + pub fn verify_scheduler_vk_hash(&self, expected_hash: H256) -> anyhow::Result<()> { + let commitments = self + .generate_commitments() + .context("generate_commitments()")?; + let calculated_hash = + H256::from_str(&commitments.snark_wrapper).context("invalid SNARK wrapper VK")?; + anyhow::ensure!(expected_hash == calculated_hash, "Invalid SNARK wrapper VK hash. Calculated locally: {calculated_hash:?}, provided: {expected_hash:?}"); + Ok(()) + } } diff --git a/prover/crates/lib/keystore/src/keystore.rs b/prover/crates/lib/keystore/src/keystore.rs index 7ba5a3aaa701..8fc2694608f9 100644 --- a/prover/crates/lib/keystore/src/keystore.rs +++ b/prover/crates/lib/keystore/src/keystore.rs @@ -16,10 +16,10 @@ use circuit_definitions::{ }; use serde::{Deserialize, Serialize}; use zkevm_test_harness::data_source::{in_memory_data_source::InMemoryDataSource, SetupDataSource}; +use zksync_basic_types::basic_fri_types::AggregationRound; use zksync_config::configs::FriProverConfig; use zksync_env_config::FromEnv; use zksync_prover_fri_types::ProverServiceDataKey; -use zksync_types::basic_fri_types::AggregationRound; #[cfg(feature = "gpu")] use crate::GoldilocksGpuProverSetupData; diff --git a/prover/crates/lib/keystore/src/utils.rs b/prover/crates/lib/keystore/src/utils.rs index 5387b73e76cd..b74f716dac53 100644 --- a/prover/crates/lib/keystore/src/utils.rs +++ b/prover/crates/lib/keystore/src/utils.rs @@ -13,6 +13,7 @@ use zkevm_test_harness::{ franklin_crypto::bellman::{CurveAffine, PrimeField, PrimeFieldRepr}, witness::recursive_aggregation::compute_leaf_params, }; +use zksync_basic_types::H256; use zksync_prover_fri_types::circuit_definitions::{ boojum::field::goldilocks::GoldilocksField, circuit_definitions::recursion_layer::base_circuit_type_into_recursive_leaf_circuit_type, @@ -21,7 +22,6 @@ use zksync_prover_fri_types::circuit_definitions::{ scheduler::aux::BaseLayerCircuitType, }, }; -use zksync_types::H256; use zksync_utils::locate_workspace; use crate::keystore::Keystore; From 6e057ebf277e0cbc7964079c01ef0348e006a53b Mon Sep 17 00:00:00 2001 From: Artem Fomiuk <88630083+Artemka374@users.noreply.github.com> Date: Thu, 5 Sep 2024 13:23:13 +0300 Subject: [PATCH 026/116] feat(prover): Add error to panic message of prover (#2807) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ To improve debugging ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- .../crates/bin/prover_fri/src/gpu_prover_job_processor.rs | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/prover/crates/bin/prover_fri/src/gpu_prover_job_processor.rs b/prover/crates/bin/prover_fri/src/gpu_prover_job_processor.rs index 4b11353eac5c..0835c8ff4cbf 100644 --- a/prover/crates/bin/prover_fri/src/gpu_prover_job_processor.rs +++ b/prover/crates/bin/prover_fri/src/gpu_prover_job_processor.rs @@ -181,8 +181,11 @@ pub mod gpu_prover { (), &worker, ) - .unwrap_or_else(|_| { - panic!("failed generating GPU proof for id: {}", prover_job.job_id) + .unwrap_or_else(|err| { + panic!( + "failed generating GPU proof for id: {}, error: {:?}", + prover_job.job_id, err + ) }); tracing::info!( "Successfully generated gpu proof for job {} took: {:?}", From 0f2f9bd9ef4c2c7ba98a1fdbfca15d1de2b29997 Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 5 Sep 2024 13:18:48 +0200 Subject: [PATCH 027/116] fix(tee): lowercase enum TEE types (#2798) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ We encountered an issue where the staging environment was unable to deserialize `sgx` to `TeeType::Sgx`. Relevant code: - https://github.com/matter-labs/zksync-era/blob/87b02e3ab5c1f61d59dd0f0eefa9ec33a7b55488/core/lib/basic_types/src/tee_types.rs#L7 - https://github.com/matter-labs/teepot/blob/537521f0ee2bd704fb839fe336f43f8aab5887df/bin/tee-key-preexec/src/main.rs#L53 Relevant logs: - https://grafana.matterlabs.dev/goto/Q5ENugeSR?orgId=1 ## Why ❔ To fix a panic in the staging environment. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- core/lib/basic_types/src/tee_types.rs | 46 +++++++++++++++++-- .../tests/job_serialization.rs | 2 +- core/node/proof_data_handler/src/tests.rs | 4 +- 3 files changed, 46 insertions(+), 6 deletions(-) diff --git a/core/lib/basic_types/src/tee_types.rs b/core/lib/basic_types/src/tee_types.rs index c9be9b6e99d8..d49f2f183885 100644 --- a/core/lib/basic_types/src/tee_types.rs +++ b/core/lib/basic_types/src/tee_types.rs @@ -1,9 +1,49 @@ +use std::fmt; + use serde::{Deserialize, Serialize}; -use strum::{Display, EnumString}; -#[derive(Debug, Clone, Copy, PartialEq, EnumString, Display, Serialize, Deserialize)] +#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "lowercase")] #[non_exhaustive] pub enum TeeType { - #[strum(serialize = "sgx")] Sgx, } + +impl fmt::Display for TeeType { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + TeeType::Sgx => write!(f, "sgx"), + } + } +} + +#[cfg(test)] +mod tests { + use serde_json; + + use super::*; + + #[test] + fn test_serialize_teetype() { + let json_str = "\"sgx\""; + let tee_type: TeeType = serde_json::from_str(json_str).unwrap(); + assert_eq!(tee_type, TeeType::Sgx); + + for json_str in &["\"Sgx\"", "\"SGX\""] { + let result: Result = serde_json::from_str(json_str); + assert!(result.is_err()); + } + } + + #[test] + fn test_deserialize_teetype() { + let tee_type = TeeType::Sgx; + let json_str = serde_json::to_string(&tee_type).unwrap(); + assert_eq!(json_str, "\"sgx\""); + } + + #[test] + fn test_display_teetype() { + assert_eq!(TeeType::Sgx.to_string(), "sgx"); + } +} diff --git a/core/lib/prover_interface/tests/job_serialization.rs b/core/lib/prover_interface/tests/job_serialization.rs index a2d55a140655..a2aee0c2733e 100644 --- a/core/lib/prover_interface/tests/job_serialization.rs +++ b/core/lib/prover_interface/tests/job_serialization.rs @@ -170,7 +170,7 @@ fn test_tee_proof_request_serialization() { "signature": [ 0, 1, 2, 3, 4 ], "pubkey": [ 5, 6, 7, 8, 9 ], "proof": [ 10, 11, 12, 13, 14 ], - "tee_type": "Sgx" + "tee_type": "sgx" }"#; let tee_proof_result = serde_json::from_str::(tee_proof_str).unwrap(); let tee_proof_expected = SubmitTeeProofRequest(Box::new(L1BatchTeeProofForL1 { diff --git a/core/node/proof_data_handler/src/tests.rs b/core/node/proof_data_handler/src/tests.rs index 5d7569d5720c..6ab7e4dec436 100644 --- a/core/node/proof_data_handler/src/tests.rs +++ b/core/node/proof_data_handler/src/tests.rs @@ -94,7 +94,7 @@ async fn request_tee_proof_inputs() { }, L1BatchCommitmentMode::Rollup, ); - let req_body = Body::from(serde_json::to_vec(&json!({ "tee_type": "Sgx" })).unwrap()); + let req_body = Body::from(serde_json::to_vec(&json!({ "tee_type": "sgx" })).unwrap()); let response = app .oneshot( Request::builder() @@ -134,7 +134,7 @@ async fn submit_tee_proof() { "signature": [ 0, 1, 2, 3, 4 ], "pubkey": [ 5, 6, 7, 8, 9 ], "proof": [ 10, 11, 12, 13, 14 ], - "tee_type": "Sgx" + "tee_type": "sgx" }"#; let tee_proof_request = serde_json::from_str::(tee_proof_request_str).unwrap(); From 958dfdcac358897bfd4d2a2ddc1633a23dbfcdc9 Mon Sep 17 00:00:00 2001 From: Archethect Date: Thu, 5 Sep 2024 07:28:23 -0700 Subject: [PATCH 028/116] fix(core): Batched event processing support for Reth (#2623) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Add batching for event processing for a Reth local L1 node. ## Why ❔ Similar to Alchemy and Infura, a Reth local node also has a limit for the maximum number of results it can handle for event requests. ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- core/node/eth_watch/src/client.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/core/node/eth_watch/src/client.rs b/core/node/eth_watch/src/client.rs index 8be556b42889..8d4651099940 100644 --- a/core/node/eth_watch/src/client.rs +++ b/core/node/eth_watch/src/client.rs @@ -40,6 +40,7 @@ pub trait EthClient: 'static + fmt::Debug + Send + Sync { pub const RETRY_LIMIT: usize = 5; const TOO_MANY_RESULTS_INFURA: &str = "query returned more than"; const TOO_MANY_RESULTS_ALCHEMY: &str = "response size exceeded"; +const TOO_MANY_RESULTS_RETH: &str = "query exceeds max block range"; /// Implementation of [`EthClient`] based on HTTP JSON-RPC (encapsulated via [`EthInterface`]). #[derive(Debug)] @@ -178,6 +179,7 @@ impl EthClient for EthHttpQueryClient { // check whether the error is related to having too many results if err_message.contains(TOO_MANY_RESULTS_INFURA) || err_message.contains(TOO_MANY_RESULTS_ALCHEMY) + || err_message.contains(TOO_MANY_RESULTS_RETH) { // get the numeric block ids let from_number = match from { From 1da3f7ea1df94312e7c6818c17bf4109f888e547 Mon Sep 17 00:00:00 2001 From: perekopskiy <53865202+perekopskiy@users.noreply.github.com> Date: Thu, 5 Sep 2024 18:02:27 +0300 Subject: [PATCH 029/116] feat(eth-watch): do not query events from earliest block (#2810) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Removes querying from the earliest batch in eth watch. Instead queries for constant block range and splits queried range in parts if needed ## Why ❔ Vanilla reth doesn't allow eth_logs requests where block range is greater than 1_000_000. This changes allows eth watch to work with this limitation. ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- core/node/eth_watch/src/client.rs | 139 +++++++++++++++++------------- 1 file changed, 78 insertions(+), 61 deletions(-) diff --git a/core/node/eth_watch/src/client.rs b/core/node/eth_watch/src/client.rs index 8d4651099940..67e603041e6c 100644 --- a/core/node/eth_watch/src/client.rs +++ b/core/node/eth_watch/src/client.rs @@ -88,75 +88,34 @@ impl EthHttpQueryClient { } } - async fn get_filter_logs( + fn get_default_address_list(&self) -> Vec
{ + [ + Some(self.diamond_proxy_addr), + Some(self.governance_address), + self.state_transition_manager_address, + self.chain_admin_address, + ] + .into_iter() + .flatten() + .collect() + } + + async fn get_events_inner( &self, from: BlockNumber, to: BlockNumber, - topics: Vec, + topics1: Vec, + topics2: Vec, + addresses: Vec
, + retries_left: usize, ) -> EnrichedClientResult> { let filter = FilterBuilder::default() - .address( - [ - Some(self.diamond_proxy_addr), - Some(self.governance_address), - self.state_transition_manager_address, - self.chain_admin_address, - ] - .into_iter() - .flatten() - .collect(), - ) .from_block(from) .to_block(to) - .topics(Some(topics), None, None, None) + .topics(Some(topics1), Some(topics2), None, None) + .address(addresses) .build(); - self.client.logs(&filter).await - } -} - -#[async_trait::async_trait] -impl EthClient for EthHttpQueryClient { - async fn scheduler_vk_hash( - &self, - verifier_address: Address, - ) -> Result { - // New verifier returns the hash of the verification key. - CallFunctionArgs::new("verificationKeyHash", ()) - .for_contract(verifier_address, &self.verifier_contract_abi) - .call(&self.client) - .await - } - - async fn diamond_cut_by_version( - &self, - packed_version: H256, - ) -> EnrichedClientResult>> { - let Some(state_transition_manager_address) = self.state_transition_manager_address else { - return Ok(None); - }; - - let filter = FilterBuilder::default() - .address(vec![state_transition_manager_address]) - .from_block(BlockNumber::Earliest) - .to_block(BlockNumber::Latest) - .topics( - Some(vec![self.new_upgrade_cut_data_signature]), - Some(vec![packed_version]), - None, - None, - ) - .build(); - let logs = self.client.logs(&filter).await?; - Ok(logs.into_iter().next().map(|log| log.data.0)) - } - - async fn get_events( - &self, - from: BlockNumber, - to: BlockNumber, - retries_left: usize, - ) -> EnrichedClientResult> { - let mut result = self.get_filter_logs(from, to, self.topics.clone()).await; + let mut result = self.client.logs(&filter).await; // This code is compatible with both Infura and Alchemy API providers. // Note: we don't handle rate-limits here - assumption is that we're never going to hit them. @@ -225,6 +184,64 @@ impl EthClient for EthHttpQueryClient { result } +} + +#[async_trait::async_trait] +impl EthClient for EthHttpQueryClient { + async fn scheduler_vk_hash( + &self, + verifier_address: Address, + ) -> Result { + // New verifier returns the hash of the verification key. + CallFunctionArgs::new("verificationKeyHash", ()) + .for_contract(verifier_address, &self.verifier_contract_abi) + .call(&self.client) + .await + } + + async fn diamond_cut_by_version( + &self, + packed_version: H256, + ) -> EnrichedClientResult>> { + const LOOK_BACK_BLOCK_RANGE: u64 = 1_000_000; + + let Some(state_transition_manager_address) = self.state_transition_manager_address else { + return Ok(None); + }; + + let to_block = self.client.block_number().await?; + let from_block = to_block.saturating_sub((LOOK_BACK_BLOCK_RANGE - 1).into()); + + let logs = self + .get_events_inner( + from_block.into(), + to_block.into(), + vec![self.new_upgrade_cut_data_signature], + vec![packed_version], + vec![state_transition_manager_address], + RETRY_LIMIT, + ) + .await?; + + Ok(logs.into_iter().next().map(|log| log.data.0)) + } + + async fn get_events( + &self, + from: BlockNumber, + to: BlockNumber, + retries_left: usize, + ) -> EnrichedClientResult> { + self.get_events_inner( + from, + to, + self.topics.clone(), + Vec::new(), + self.get_default_address_list(), + retries_left, + ) + .await + } async fn finalized_block_number(&self) -> EnrichedClientResult { if let Some(confirmations) = self.confirmations_for_eth_event { From 50da6c460196a7bc1f55c82844cf62ae2ceec0bb Mon Sep 17 00:00:00 2001 From: Artem Fomiuk <88630083+Artemka374@users.noreply.github.com> Date: Thu, 5 Sep 2024 19:11:51 +0300 Subject: [PATCH 030/116] feat(prover): Make compressor build with 80 CUDA arch. (#2812) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Add arch 80 to docker image of compressor ## Why ❔ To be able to run it on NVIDIA A100 ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- docker/proof-fri-gpu-compressor/Dockerfile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docker/proof-fri-gpu-compressor/Dockerfile b/docker/proof-fri-gpu-compressor/Dockerfile index 45f2ffa51b04..a3d92d113cde 100644 --- a/docker/proof-fri-gpu-compressor/Dockerfile +++ b/docker/proof-fri-gpu-compressor/Dockerfile @@ -4,7 +4,8 @@ FROM nvidia/cuda:12.2.0-devel-ubuntu22.04 as builder ARG DEBIAN_FRONTEND=noninteractive ARG CUDA_ARCH=89 -ENV CUDAARCHS=${CUDA_ARCH} +ARG A100_CUDA_ARCH=80 +ENV CUDAARCHS=${CUDA_ARCH};${A100_CUDA_ARCH} RUN apt-get update && apt-get install -y curl clang openssl libssl-dev gcc g++ git \ pkg-config build-essential libclang-dev && \ From fcffb0621122807e6499c1836a8b6bb95b1df1d7 Mon Sep 17 00:00:00 2001 From: zksync-era-bot <147085853+zksync-era-bot@users.noreply.github.com> Date: Thu, 5 Sep 2024 23:45:23 +0400 Subject: [PATCH 031/116] chore(main): release core 24.24.0 (#2773) :robot: I have created a release *beep* *boop* --- ## [24.24.0](https://github.com/matter-labs/zksync-era/compare/core-v24.23.0...core-v24.24.0) (2024-09-05) ### Features * conditional cbt l1 updates ([#2748](https://github.com/matter-labs/zksync-era/issues/2748)) ([6d18061](https://github.com/matter-labs/zksync-era/commit/6d18061df4a18803d3c6377305ef711ce60317e1)) * **eth-watch:** do not query events from earliest block ([#2810](https://github.com/matter-labs/zksync-era/issues/2810)) ([1da3f7e](https://github.com/matter-labs/zksync-era/commit/1da3f7ea1df94312e7c6818c17bf4109f888e547)) * **genesis:** Validate genesis config against L1 ([#2786](https://github.com/matter-labs/zksync-era/issues/2786)) ([b2dd9a5](https://github.com/matter-labs/zksync-era/commit/b2dd9a5c08fecf0a878632b33a32a78aac11c065)) * Integrate tracers and implement circuits tracer in vm2 ([#2653](https://github.com/matter-labs/zksync-era/issues/2653)) ([87b02e3](https://github.com/matter-labs/zksync-era/commit/87b02e3ab5c1f61d59dd0f0eefa9ec33a7b55488)) * Move prover data to /home/popzxc/workspace/current/zksync-era/prover/data ([#2778](https://github.com/matter-labs/zksync-era/issues/2778)) ([62e4d46](https://github.com/matter-labs/zksync-era/commit/62e4d4619dde9d6bd9102f1410eea75b0e2051c5)) * Remove prover db from house keeper ([#2795](https://github.com/matter-labs/zksync-era/issues/2795)) ([85b7346](https://github.com/matter-labs/zksync-era/commit/85b734664b4306e988da07005860a7ea0fb7d22d)) * **vm-runner:** Implement batch data prefetching ([#2724](https://github.com/matter-labs/zksync-era/issues/2724)) ([d01840d](https://github.com/matter-labs/zksync-era/commit/d01840d5de2cb0f4bead8f1c384b24ba713e6a66)) * **vm:** Extract batch executor to separate crate ([#2702](https://github.com/matter-labs/zksync-era/issues/2702)) ([b82dfa4](https://github.com/matter-labs/zksync-era/commit/b82dfa4d29fce107223c3638fe490b5cb0f28d8c)) * **vm:** Simplify VM interface ([#2760](https://github.com/matter-labs/zksync-era/issues/2760)) ([c3bde47](https://github.com/matter-labs/zksync-era/commit/c3bde47c1e7d16bc00f9b089516ed3691e4f3eb1)) * **zk_toolbox:** add multi-chain CI integration test ([#2594](https://github.com/matter-labs/zksync-era/issues/2594)) ([05c940e](https://github.com/matter-labs/zksync-era/commit/05c940efbd93023c315e5e13c98faee2153cc1cd)) ### Bug Fixes * **config:** Do not panic for observability config ([#2639](https://github.com/matter-labs/zksync-era/issues/2639)) ([1e768d4](https://github.com/matter-labs/zksync-era/commit/1e768d402012f6c7ce83fdd46c55f830ec31416a)) * **core:** Batched event processing support for Reth ([#2623](https://github.com/matter-labs/zksync-era/issues/2623)) ([958dfdc](https://github.com/matter-labs/zksync-era/commit/958dfdcac358897bfd4d2a2ddc1633a23dbfcdc9)) * return correct witness inputs ([#2770](https://github.com/matter-labs/zksync-era/issues/2770)) ([2516e2e](https://github.com/matter-labs/zksync-era/commit/2516e2e5c83673687d61d143daa70e98ccecce53)) * **tee-prover:** increase retries to reduce spurious alerts ([#2776](https://github.com/matter-labs/zksync-era/issues/2776)) ([4fdc806](https://github.com/matter-labs/zksync-era/commit/4fdc80636437090f6ebcfa4e2f1eb50edf53631a)) * **tee-prover:** mitigate panic on redeployments ([#2764](https://github.com/matter-labs/zksync-era/issues/2764)) ([178b386](https://github.com/matter-labs/zksync-era/commit/178b38644f507c5f6d12ba862d0c699e87985dd7)) * **tee:** lowercase enum TEE types ([#2798](https://github.com/matter-labs/zksync-era/issues/2798)) ([0f2f9bd](https://github.com/matter-labs/zksync-era/commit/0f2f9bd9ef4c2c7ba98a1fdbfca15d1de2b29997)) * **vm-runner:** Fix statement timeouts in VM playground ([#2772](https://github.com/matter-labs/zksync-era/issues/2772)) ([d3cd553](https://github.com/matter-labs/zksync-era/commit/d3cd553888a5c903c6eae13a88e92c11602e93de)) ### Performance Improvements * **vm:** Fix VM performance regression on CI loadtest ([#2782](https://github.com/matter-labs/zksync-era/issues/2782)) ([bc0d7d5](https://github.com/matter-labs/zksync-era/commit/bc0d7d5935c8f5409a8e53f1c04c5141409aef31)) --- This PR was generated with [Release Please](https://github.com/googleapis/release-please). See [documentation](https://github.com/googleapis/release-please#release-please). --------- Co-authored-by: zksync-era-bot --- .github/release-please/manifest.json | 2 +- Cargo.lock | 2 +- core/CHANGELOG.md | 32 ++++++++++++++++++++++++++++ core/bin/external_node/Cargo.toml | 2 +- 4 files changed, 35 insertions(+), 3 deletions(-) diff --git a/.github/release-please/manifest.json b/.github/release-please/manifest.json index 4c1d3095bc24..811c773b6f54 100644 --- a/.github/release-please/manifest.json +++ b/.github/release-please/manifest.json @@ -1,5 +1,5 @@ { - "core": "24.23.0", + "core": "24.24.0", "prover": "16.5.0", "zk_toolbox": "0.1.2" } diff --git a/Cargo.lock b/Cargo.lock index 7c45ba3dad99..accd6b344486 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8709,7 +8709,7 @@ dependencies = [ [[package]] name = "zksync_external_node" -version = "24.23.0" +version = "24.24.0" dependencies = [ "anyhow", "assert_matches", diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index 4dea58651129..7d4381b09bef 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,5 +1,37 @@ # Changelog +## [24.24.0](https://github.com/matter-labs/zksync-era/compare/core-v24.23.0...core-v24.24.0) (2024-09-05) + + +### Features + +* conditional cbt l1 updates ([#2748](https://github.com/matter-labs/zksync-era/issues/2748)) ([6d18061](https://github.com/matter-labs/zksync-era/commit/6d18061df4a18803d3c6377305ef711ce60317e1)) +* **eth-watch:** do not query events from earliest block ([#2810](https://github.com/matter-labs/zksync-era/issues/2810)) ([1da3f7e](https://github.com/matter-labs/zksync-era/commit/1da3f7ea1df94312e7c6818c17bf4109f888e547)) +* **genesis:** Validate genesis config against L1 ([#2786](https://github.com/matter-labs/zksync-era/issues/2786)) ([b2dd9a5](https://github.com/matter-labs/zksync-era/commit/b2dd9a5c08fecf0a878632b33a32a78aac11c065)) +* Integrate tracers and implement circuits tracer in vm2 ([#2653](https://github.com/matter-labs/zksync-era/issues/2653)) ([87b02e3](https://github.com/matter-labs/zksync-era/commit/87b02e3ab5c1f61d59dd0f0eefa9ec33a7b55488)) +* Move prover data to /home/popzxc/workspace/current/zksync-era/prover/data ([#2778](https://github.com/matter-labs/zksync-era/issues/2778)) ([62e4d46](https://github.com/matter-labs/zksync-era/commit/62e4d4619dde9d6bd9102f1410eea75b0e2051c5)) +* Remove prover db from house keeper ([#2795](https://github.com/matter-labs/zksync-era/issues/2795)) ([85b7346](https://github.com/matter-labs/zksync-era/commit/85b734664b4306e988da07005860a7ea0fb7d22d)) +* **vm-runner:** Implement batch data prefetching ([#2724](https://github.com/matter-labs/zksync-era/issues/2724)) ([d01840d](https://github.com/matter-labs/zksync-era/commit/d01840d5de2cb0f4bead8f1c384b24ba713e6a66)) +* **vm:** Extract batch executor to separate crate ([#2702](https://github.com/matter-labs/zksync-era/issues/2702)) ([b82dfa4](https://github.com/matter-labs/zksync-era/commit/b82dfa4d29fce107223c3638fe490b5cb0f28d8c)) +* **vm:** Simplify VM interface ([#2760](https://github.com/matter-labs/zksync-era/issues/2760)) ([c3bde47](https://github.com/matter-labs/zksync-era/commit/c3bde47c1e7d16bc00f9b089516ed3691e4f3eb1)) +* **zk_toolbox:** add multi-chain CI integration test ([#2594](https://github.com/matter-labs/zksync-era/issues/2594)) ([05c940e](https://github.com/matter-labs/zksync-era/commit/05c940efbd93023c315e5e13c98faee2153cc1cd)) + + +### Bug Fixes + +* **config:** Do not panic for observability config ([#2639](https://github.com/matter-labs/zksync-era/issues/2639)) ([1e768d4](https://github.com/matter-labs/zksync-era/commit/1e768d402012f6c7ce83fdd46c55f830ec31416a)) +* **core:** Batched event processing support for Reth ([#2623](https://github.com/matter-labs/zksync-era/issues/2623)) ([958dfdc](https://github.com/matter-labs/zksync-era/commit/958dfdcac358897bfd4d2a2ddc1633a23dbfcdc9)) +* return correct witness inputs ([#2770](https://github.com/matter-labs/zksync-era/issues/2770)) ([2516e2e](https://github.com/matter-labs/zksync-era/commit/2516e2e5c83673687d61d143daa70e98ccecce53)) +* **tee-prover:** increase retries to reduce spurious alerts ([#2776](https://github.com/matter-labs/zksync-era/issues/2776)) ([4fdc806](https://github.com/matter-labs/zksync-era/commit/4fdc80636437090f6ebcfa4e2f1eb50edf53631a)) +* **tee-prover:** mitigate panic on redeployments ([#2764](https://github.com/matter-labs/zksync-era/issues/2764)) ([178b386](https://github.com/matter-labs/zksync-era/commit/178b38644f507c5f6d12ba862d0c699e87985dd7)) +* **tee:** lowercase enum TEE types ([#2798](https://github.com/matter-labs/zksync-era/issues/2798)) ([0f2f9bd](https://github.com/matter-labs/zksync-era/commit/0f2f9bd9ef4c2c7ba98a1fdbfca15d1de2b29997)) +* **vm-runner:** Fix statement timeouts in VM playground ([#2772](https://github.com/matter-labs/zksync-era/issues/2772)) ([d3cd553](https://github.com/matter-labs/zksync-era/commit/d3cd553888a5c903c6eae13a88e92c11602e93de)) + + +### Performance Improvements + +* **vm:** Fix VM performance regression on CI loadtest ([#2782](https://github.com/matter-labs/zksync-era/issues/2782)) ([bc0d7d5](https://github.com/matter-labs/zksync-era/commit/bc0d7d5935c8f5409a8e53f1c04c5141409aef31)) + ## [24.23.0](https://github.com/matter-labs/zksync-era/compare/core-v24.22.0...core-v24.23.0) (2024-08-28) diff --git a/core/bin/external_node/Cargo.toml b/core/bin/external_node/Cargo.toml index ecfc60d7ec03..498b11b279b0 100644 --- a/core/bin/external_node/Cargo.toml +++ b/core/bin/external_node/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "zksync_external_node" description = "Non-validator ZKsync node" -version = "24.23.0" # x-release-please-version +version = "24.24.0" # x-release-please-version edition.workspace = true authors.workspace = true homepage.workspace = true From 1559afbd14d5fe78c4ab2a5ef593403302e355f1 Mon Sep 17 00:00:00 2001 From: Alexander Melnikov Date: Fri, 6 Sep 2024 02:01:58 -0600 Subject: [PATCH 032/116] feat(zk_toolbox): Add block explorer support to zk_toolbox (#2768) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ New `zk_inception explorer` command for easy block explorer setup. ### Usage: `zk_inception explorer init` - initializes explorer database and creates config files (executed for all chains, unless `--chain` is passed) `zk_inception explorer backend` - runs backend [services](https://github.com/matter-labs/block-explorer?tab=readme-ov-file#-architecture) (api, data_fetcher, worker) required for block explorer app for a single chain (uses default chain, unless `--chain` is passed) `zk_inception explorer run` - runs block-explorer-app (displays all chains, unless `--chain` is passed) ### Config structure: * Ecosystem level apps configs: * `ecosystem/configs/apps.yaml` - ecosystem-level configuration for apps, edit that if you want to customize the port for portal and explorer apps. * `ecosystem/configs/apps/portal.config.json` - ecosystem-level configuration for portal app, edit that if you want to customize display names, tokens list, URLs, etc. for any chain for portal. Refer to the [format](https://github.com/matter-labs/dapp-portal/blob/main/types/index.d.ts#L137-L149) and documentation from the [dapp-portal](https://github.com/matter-labs/dapp-portal) repository. * `ecosystem/configs/apps/explorer.config.json` - ecosystem-level configuration for explorer app, edit that if you want to customize display names, URLs, etc. for any chain for explorer. Refer to the [format](https://github.com/matter-labs/block-explorer/blob/main/packages/app/src/configs/index.ts#L23) from [block-explorer](https://github.com/matter-labs/block-explorer) repository. * `ecosystem/configs/.generated/explorer.config.js` - this file is auto-generated on every `explorer run` and injected as a runtime config to block-explorer-app docker image for run. * `ecosystem/configs/.generated/portal.config.js` - this file is auto-generated on every `portal` run and injected as a runtime config to dapp-portal docker image for run. * Chain level apps configs: * `chain/configs/explorer-docker-compose.yml` - configures required explorer backend services as a docker compose file, edit that if you want to customize ports, parameters like batches polling interval. It's user responsibility to adjust corresponding JSON app configs if ports are changed in this file. ## Why ❔ Currently, running the block-explorer requires users to manually pull the repository, install all dependencies, prepare database, modify configurations, build the project, and then run it. This PR simplifies the process, allowing users to run the explorer effortlessly with a few commands. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --------- Co-authored-by: Manuel Mauro --- zk_toolbox/README.md | 47 ++++ zk_toolbox/crates/common/src/docker.rs | 35 +-- zk_toolbox/crates/config/src/apps.rs | 59 +++++ zk_toolbox/crates/config/src/consts.rs | 35 ++- .../crates/config/src/docker_compose.rs | 43 ++++ zk_toolbox/crates/config/src/explorer.rs | 147 ++++++++++++ .../crates/config/src/explorer_compose.rs | 214 ++++++++++++++++++ zk_toolbox/crates/config/src/general.rs | 13 +- zk_toolbox/crates/config/src/lib.rs | 5 + zk_toolbox/crates/config/src/portal.rs | 129 +++++++---- zk_toolbox/crates/config/src/traits.rs | 5 + .../zk_inception/src/commands/args/mod.rs | 2 - .../zk_inception/src/commands/args/portal.rs | 12 - .../zk_inception/src/commands/chain/init.rs | 4 +- .../zk_inception/src/commands/containers.rs | 2 +- .../src/commands/ecosystem/create.rs | 6 +- .../src/commands/ecosystem/create_configs.rs | 12 +- .../src/commands/explorer/backend.rs | 39 ++++ .../src/commands/explorer/init.rs | 135 +++++++++++ .../zk_inception/src/commands/explorer/mod.rs | 27 +++ .../zk_inception/src/commands/explorer/run.rs | 98 ++++++++ .../crates/zk_inception/src/commands/mod.rs | 1 + .../zk_inception/src/commands/portal.rs | 180 ++++++++------- zk_toolbox/crates/zk_inception/src/consts.rs | 7 +- .../crates/zk_inception/src/defaults.rs | 10 + zk_toolbox/crates/zk_inception/src/main.rs | 15 +- .../crates/zk_inception/src/messages.rs | 45 +++- 27 files changed, 1166 insertions(+), 161 deletions(-) create mode 100644 zk_toolbox/crates/config/src/apps.rs create mode 100644 zk_toolbox/crates/config/src/docker_compose.rs create mode 100644 zk_toolbox/crates/config/src/explorer.rs create mode 100644 zk_toolbox/crates/config/src/explorer_compose.rs delete mode 100644 zk_toolbox/crates/zk_inception/src/commands/args/portal.rs create mode 100644 zk_toolbox/crates/zk_inception/src/commands/explorer/backend.rs create mode 100644 zk_toolbox/crates/zk_inception/src/commands/explorer/init.rs create mode 100644 zk_toolbox/crates/zk_inception/src/commands/explorer/mod.rs create mode 100644 zk_toolbox/crates/zk_inception/src/commands/explorer/run.rs diff --git a/zk_toolbox/README.md b/zk_toolbox/README.md index 469e36a65f64..a3b44fa98b32 100644 --- a/zk_toolbox/README.md +++ b/zk_toolbox/README.md @@ -247,6 +247,53 @@ Run the external node: zk_inception en run ``` +### Portal + +Once you have at least one chain initialized, you can run the [portal](https://github.com/matter-labs/dapp-portal) - a +web-app to bridge tokens between L1 and L2 and more: + +```bash +zk_inception portal +``` + +This command will start the dockerized portal app using configuration from `apps/portal.config.json` file inside your +ecosystem directory. You can edit this file to configure the portal app if needed. By default, portal starts on +`http://localhost:3030`, you can configure the port in `apps.yaml` file. + +### Explorer + +For better understanding of the blockchain data, you can use the +[explorer](https://github.com/matter-labs/block-explorer) - a web-app to view and inspect transactions, blocks, +contracts and more. + +First, each chain should be initialized: + +```bash +zk_inception explorer init +``` + +This command creates a database to store explorer data and generatesdocker compose file with explorer services +(`explorer-docker-compose.yml`). + +Next, for each chain you want to have an explorer, you need to start its backend services: + +```bash +zk_inception explorer backend --chain +``` + +This command uses previously created docker compose file to start the services (api, data fetcher, worker) required for +the explorer. + +Finally, you can run the explorer app: + +```bash +zk_inception explorer run +``` + +This command will start the dockerized explorer app using configuration from `apps/explorer.config.json` file inside +your ecosystem directory. You can edit this file to configure the app if needed. By default, explorer starts on +`http://localhost:3010`, you can configure the port in `apps.yaml` file. + ### Update To update your node: diff --git a/zk_toolbox/crates/common/src/docker.rs b/zk_toolbox/crates/common/src/docker.rs index 0ca31383f9cc..a5731808814f 100644 --- a/zk_toolbox/crates/common/src/docker.rs +++ b/zk_toolbox/crates/common/src/docker.rs @@ -1,26 +1,33 @@ -use std::collections::HashMap; - +use url::Url; use xshell::{cmd, Shell}; use crate::cmd::Cmd; -pub fn up(shell: &Shell, docker_compose_file: &str) -> anyhow::Result<()> { - Ok(Cmd::new(cmd!(shell, "docker compose -f {docker_compose_file} up -d")).run()?) +pub fn up(shell: &Shell, docker_compose_file: &str, detach: bool) -> anyhow::Result<()> { + let args = if detach { vec!["-d"] } else { vec![] }; + let mut cmd = Cmd::new(cmd!( + shell, + "docker compose -f {docker_compose_file} up {args...}" + )); + cmd = if !detach { cmd.with_force_run() } else { cmd }; + Ok(cmd.run()?) } pub fn down(shell: &Shell, docker_compose_file: &str) -> anyhow::Result<()> { Ok(Cmd::new(cmd!(shell, "docker compose -f {docker_compose_file} down")).run()?) } -pub fn run( - shell: &Shell, - docker_image: &str, - docker_args: HashMap, -) -> anyhow::Result<()> { - let mut args = vec![]; - for (key, value) in docker_args.iter() { - args.push(key); - args.push(value); +pub fn run(shell: &Shell, docker_image: &str, docker_args: Vec) -> anyhow::Result<()> { + Ok(Cmd::new(cmd!(shell, "docker run {docker_args...} {docker_image}")).run()?) +} + +pub fn adjust_localhost_for_docker(mut url: Url) -> anyhow::Result { + if let Some(host) = url.host_str() { + if host == "localhost" || host == "127.0.0.1" { + url.set_host(Some("host.docker.internal"))?; + } + } else { + anyhow::bail!("Failed to parse: no host"); } - Ok(Cmd::new(cmd!(shell, "docker run {args...} {docker_image}")).run()?) + Ok(url) } diff --git a/zk_toolbox/crates/config/src/apps.rs b/zk_toolbox/crates/config/src/apps.rs new file mode 100644 index 000000000000..697b35b0851b --- /dev/null +++ b/zk_toolbox/crates/config/src/apps.rs @@ -0,0 +1,59 @@ +use std::path::{Path, PathBuf}; + +use serde::{Deserialize, Serialize}; +use xshell::Shell; + +use crate::{ + consts::{APPS_CONFIG_FILE, DEFAULT_EXPLORER_PORT, DEFAULT_PORTAL_PORT, LOCAL_CONFIGS_PATH}, + traits::{FileConfigWithDefaultName, ReadConfig, SaveConfig, ZkToolboxConfig}, +}; + +/// Ecosystem level configuration for the apps (portal and explorer). +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct AppsEcosystemConfig { + pub portal: AppEcosystemConfig, + pub explorer: AppEcosystemConfig, +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct AppEcosystemConfig { + pub http_port: u16, +} + +impl ZkToolboxConfig for AppsEcosystemConfig {} +impl FileConfigWithDefaultName for AppsEcosystemConfig { + const FILE_NAME: &'static str = APPS_CONFIG_FILE; +} + +impl AppsEcosystemConfig { + pub fn get_config_path(ecosystem_base_path: &Path) -> PathBuf { + ecosystem_base_path + .join(LOCAL_CONFIGS_PATH) + .join(APPS_CONFIG_FILE) + } + + pub fn read_or_create_default(shell: &Shell) -> anyhow::Result { + let config_path = Self::get_config_path(&shell.current_dir()); + match Self::read(shell, &config_path) { + Ok(config) => Ok(config), + Err(_) => { + let config = Self::default(); + config.save(shell, &config_path)?; + Ok(config) + } + } + } +} + +impl Default for AppsEcosystemConfig { + fn default() -> Self { + AppsEcosystemConfig { + portal: AppEcosystemConfig { + http_port: DEFAULT_PORTAL_PORT, + }, + explorer: AppEcosystemConfig { + http_port: DEFAULT_EXPLORER_PORT, + }, + } + } +} diff --git a/zk_toolbox/crates/config/src/consts.rs b/zk_toolbox/crates/config/src/consts.rs index b4bbbdffbe24..1e1c0998f00e 100644 --- a/zk_toolbox/crates/config/src/consts.rs +++ b/zk_toolbox/crates/config/src/consts.rs @@ -30,12 +30,43 @@ pub const ERA_OBSERVABILITY_COMPOSE_FILE: &str = "era-observability/docker-compo pub const ERA_OBSERBAVILITY_DIR: &str = "era-observability"; /// Era observability repo link pub const ERA_OBSERBAVILITY_GIT_REPO: &str = "https://github.com/matter-labs/era-observability"; +pub(crate) const LOCAL_APPS_PATH: &str = "apps/"; +pub(crate) const LOCAL_CHAINS_PATH: &str = "chains/"; pub(crate) const LOCAL_CONFIGS_PATH: &str = "configs/"; +pub(crate) const LOCAL_GENERATED_PATH: &str = ".generated/"; pub(crate) const LOCAL_DB_PATH: &str = "db/"; pub(crate) const LOCAL_ARTIFACTS_PATH: &str = "artifacts/"; -/// Name of portal config file -pub const PORTAL_CONFIG_FILE: &str = "portal.config.js"; +/// Name of apps config file +pub const APPS_CONFIG_FILE: &str = "apps.yaml"; +/// Name of portal runtime config file (auto-generated) +pub const PORTAL_JS_CONFIG_FILE: &str = "portal.config.js"; +/// Name of portal config JSON file +pub const PORTAL_CONFIG_FILE: &str = "portal.config.json"; +/// Name of explorer runtime config file (auto-generated) +pub const EXPLORER_JS_CONFIG_FILE: &str = "explorer.config.js"; +/// Name of explorer config JSON file +pub const EXPLORER_CONFIG_FILE: &str = "explorer.config.json"; +/// Name of explorer docker compose file +pub const EXPLORER_DOCKER_COMPOSE_FILE: &str = "explorer-docker-compose.yml"; + +/// Default port for the explorer app +pub const DEFAULT_EXPLORER_PORT: u16 = 3010; +/// Default port for the portal app +pub const DEFAULT_PORTAL_PORT: u16 = 3030; +/// Default port for the explorer worker service +pub const DEFAULT_EXPLORER_WORKER_PORT: u16 = 3001; +/// Default port for the explorer API service +pub const DEFAULT_EXPLORER_API_PORT: u16 = 3002; +/// Default port for the explorer data fetcher service +pub const DEFAULT_EXPLORER_DATA_FETCHER_PORT: u16 = 3040; + +pub const EXPLORER_API_DOCKER_IMAGE: &str = "matterlabs/block-explorer-api"; +pub const EXPLORER_DATA_FETCHER_DOCKER_IMAGE: &str = "matterlabs/block-explorer-data-fetcher"; +pub const EXPLORER_WORKER_DOCKER_IMAGE: &str = "matterlabs/block-explorer-worker"; + +/// Interval (in milliseconds) for polling new batches to process in explorer app +pub const EXPLORER_BATCHES_PROCESSING_POLLING_INTERVAL: u64 = 1000; /// Path to ecosystem contacts pub(crate) const ECOSYSTEM_PATH: &str = "etc/env/ecosystems"; diff --git a/zk_toolbox/crates/config/src/docker_compose.rs b/zk_toolbox/crates/config/src/docker_compose.rs new file mode 100644 index 000000000000..05c6e73eaea5 --- /dev/null +++ b/zk_toolbox/crates/config/src/docker_compose.rs @@ -0,0 +1,43 @@ +use std::collections::HashMap; + +use serde::{Deserialize, Serialize}; + +use crate::traits::ZkToolboxConfig; + +#[derive(Debug, Default, Serialize, Deserialize, Clone)] +pub struct DockerComposeConfig { + pub services: HashMap, + #[serde(skip_serializing_if = "Option::is_none")] + pub name: Option, + #[serde(flatten)] + pub other: serde_json::Value, +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct DockerComposeService { + pub image: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub platform: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub ports: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + pub environment: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + pub volumes: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + pub depends_on: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + pub restart: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub extra_hosts: Option>, + #[serde(flatten)] + pub other: serde_json::Value, +} + +impl ZkToolboxConfig for DockerComposeConfig {} + +impl DockerComposeConfig { + pub fn add_service(&mut self, name: &str, service: DockerComposeService) { + self.services.insert(name.to_string(), service); + } +} diff --git a/zk_toolbox/crates/config/src/explorer.rs b/zk_toolbox/crates/config/src/explorer.rs new file mode 100644 index 000000000000..ee7a59e5105c --- /dev/null +++ b/zk_toolbox/crates/config/src/explorer.rs @@ -0,0 +1,147 @@ +use std::path::{Path, PathBuf}; + +use serde::{Deserialize, Serialize}; +use xshell::Shell; + +use crate::{ + consts::{ + EXPLORER_CONFIG_FILE, EXPLORER_JS_CONFIG_FILE, LOCAL_APPS_PATH, LOCAL_CONFIGS_PATH, + LOCAL_GENERATED_PATH, + }, + traits::{ReadConfig, SaveConfig, ZkToolboxConfig}, +}; + +/// Explorer JSON configuration file. This file contains configuration for the explorer app. +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct ExplorerConfig { + pub app_environment: String, + pub environment_config: EnvironmentConfig, + #[serde(flatten)] + pub other: serde_json::Value, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +pub struct EnvironmentConfig { + pub networks: Vec, +} + +#[derive(Serialize, Deserialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct ExplorerChainConfig { + pub name: String, // L2 network chain name (the one used during the chain initialization) + pub l2_network_name: String, // How the network is displayed in the app dropdown + pub l2_chain_id: u64, + pub rpc_url: String, // L2 RPC URL + pub api_url: String, // L2 API URL + pub base_token_address: String, // L2 base token address (currently always 0x800A) + pub hostnames: Vec, // Custom domain to use when switched to this chain in the app + pub icon: String, // Icon to show in the explorer dropdown + pub maintenance: bool, // Maintenance warning + pub published: bool, // If false, the chain will not be shown in the explorer dropdown + #[serde(skip_serializing_if = "Option::is_none")] + pub bridge_url: Option, // Link to the portal bridge + #[serde(skip_serializing_if = "Option::is_none")] + pub l1_explorer_url: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub verification_api_url: Option, // L2 verification API URL + #[serde(flatten)] + pub other: serde_json::Value, +} + +impl ExplorerConfig { + /// Returns the path to the explorer configuration file. + pub fn get_config_path(ecosystem_base_path: &Path) -> PathBuf { + ecosystem_base_path + .join(LOCAL_CONFIGS_PATH) + .join(LOCAL_APPS_PATH) + .join(EXPLORER_CONFIG_FILE) + } + + /// Reads the existing config or creates a default one if it doesn't exist. + pub fn read_or_create_default(shell: &Shell) -> anyhow::Result { + let config_path = Self::get_config_path(&shell.current_dir()); + match Self::read(shell, &config_path) { + Ok(config) => Ok(config), + Err(_) => { + let config = Self::default(); + config.save(shell, &config_path)?; + Ok(config) + } + } + } + + /// Adds or updates a given chain configuration. + pub fn add_chain_config(&mut self, config: &ExplorerChainConfig) { + // Replace if config with the same network name already exists + if let Some(index) = self + .environment_config + .networks + .iter() + .position(|c| c.name == config.name) + { + self.environment_config.networks[index] = config.clone(); + return; + } + self.environment_config.networks.push(config.clone()); + } + + /// Retains only the chains whose names are present in the given vector. + pub fn filter(&mut self, chain_names: &[String]) { + self.environment_config + .networks + .retain(|config| chain_names.contains(&config.name)); + } + + /// Hides all chains except those specified in the given vector. + pub fn hide_except(&mut self, chain_names: &[String]) { + for network in &mut self.environment_config.networks { + network.published = chain_names.contains(&network.name); + } + } + + /// Checks if a chain with the given name exists in the configuration. + pub fn contains(&self, chain_name: &String) -> bool { + self.environment_config + .networks + .iter() + .any(|config| &config.name == chain_name) + } + + pub fn is_empty(&self) -> bool { + self.environment_config.networks.is_empty() + } + + pub fn save_as_js(&self, shell: &Shell) -> anyhow::Result { + // The block-explorer-app is served as a pre-built static app in a Docker image. + // It uses a JavaScript file (config.js) that injects the configuration at runtime + // by overwriting the '##runtimeConfig' property of the window object. + // This file will be mounted to the Docker image when it runs. + let path = Self::get_generated_js_config_path(&shell.current_dir()); + let json = serde_json::to_string_pretty(&self)?; + let config_js_content = format!("window['##runtimeConfig'] = {};", json); + shell.write_file(path.clone(), config_js_content.as_bytes())?; + Ok(path) + } + + fn get_generated_js_config_path(ecosystem_base_path: &Path) -> PathBuf { + ecosystem_base_path + .join(LOCAL_CONFIGS_PATH) + .join(LOCAL_GENERATED_PATH) + .join(EXPLORER_JS_CONFIG_FILE) + } +} + +impl Default for ExplorerConfig { + fn default() -> Self { + ExplorerConfig { + app_environment: "default".to_string(), + environment_config: EnvironmentConfig { + networks: Vec::new(), + }, + other: serde_json::Value::Null, + } + } +} + +impl ZkToolboxConfig for ExplorerConfig {} diff --git a/zk_toolbox/crates/config/src/explorer_compose.rs b/zk_toolbox/crates/config/src/explorer_compose.rs new file mode 100644 index 000000000000..ca9abc1e3e23 --- /dev/null +++ b/zk_toolbox/crates/config/src/explorer_compose.rs @@ -0,0 +1,214 @@ +use std::{ + collections::HashMap, + path::{Path, PathBuf}, +}; + +use anyhow::Context; +use common::{db, docker::adjust_localhost_for_docker}; +use serde::{Deserialize, Serialize}; +use url::Url; + +use crate::{ + consts::{ + DEFAULT_EXPLORER_API_PORT, DEFAULT_EXPLORER_DATA_FETCHER_PORT, + DEFAULT_EXPLORER_WORKER_PORT, EXPLORER_API_DOCKER_IMAGE, + EXPLORER_DATA_FETCHER_DOCKER_IMAGE, EXPLORER_DOCKER_COMPOSE_FILE, + EXPLORER_WORKER_DOCKER_IMAGE, LOCAL_CHAINS_PATH, LOCAL_CONFIGS_PATH, + }, + docker_compose::{DockerComposeConfig, DockerComposeService}, + traits::ZkToolboxConfig, + EXPLORER_BATCHES_PROCESSING_POLLING_INTERVAL, +}; + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct ExplorerBackendPorts { + pub api_http_port: u16, + pub data_fetcher_http_port: u16, + pub worker_http_port: u16, +} + +impl ExplorerBackendPorts { + pub fn with_offset(&self, offset: u16) -> Self { + ExplorerBackendPorts { + api_http_port: self.api_http_port + offset, + data_fetcher_http_port: self.data_fetcher_http_port + offset, + worker_http_port: self.worker_http_port + offset, + } + } +} + +impl Default for ExplorerBackendPorts { + fn default() -> Self { + ExplorerBackendPorts { + api_http_port: DEFAULT_EXPLORER_API_PORT, + data_fetcher_http_port: DEFAULT_EXPLORER_DATA_FETCHER_PORT, + worker_http_port: DEFAULT_EXPLORER_WORKER_PORT, + } + } +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct ExplorerBackendConfig { + pub database_url: Url, + pub ports: ExplorerBackendPorts, + pub batches_processing_polling_interval: u64, +} + +impl ExplorerBackendConfig { + pub fn new(database_url: Url, ports: &ExplorerBackendPorts) -> Self { + ExplorerBackendConfig { + database_url, + ports: ports.clone(), + batches_processing_polling_interval: EXPLORER_BATCHES_PROCESSING_POLLING_INTERVAL, + } + } +} + +/// Chain-level explorer backend docker compose file. +/// It contains configuration for api, data fetcher, and worker services. +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct ExplorerBackendComposeConfig { + #[serde(flatten)] + pub docker_compose: DockerComposeConfig, +} + +impl ZkToolboxConfig for ExplorerBackendComposeConfig {} + +impl ExplorerBackendComposeConfig { + const API_NAME: &'static str = "api"; + const DATA_FETCHER_NAME: &'static str = "data-fetcher"; + const WORKER_NAME: &'static str = "worker"; + + pub fn new( + chain_name: &str, + l2_rpc_url: Url, + config: &ExplorerBackendConfig, + ) -> anyhow::Result { + let db_url = adjust_localhost_for_docker(config.database_url.clone())?; + let l2_rpc_url = adjust_localhost_for_docker(l2_rpc_url)?; + + let mut services: HashMap = HashMap::new(); + services.insert( + Self::API_NAME.to_string(), + Self::create_api_service(config.ports.api_http_port, db_url.as_ref()), + ); + services.insert( + Self::DATA_FETCHER_NAME.to_string(), + Self::create_data_fetcher_service( + config.ports.data_fetcher_http_port, + l2_rpc_url.as_ref(), + ), + ); + + let worker = Self::create_worker_service( + config.ports.worker_http_port, + config.ports.data_fetcher_http_port, + l2_rpc_url.as_ref(), + &db_url, + config.batches_processing_polling_interval, + ) + .context("Failed to create worker service")?; + services.insert(Self::WORKER_NAME.to_string(), worker); + + Ok(Self { + docker_compose: DockerComposeConfig { + name: Some(format!("{chain_name}-explorer")), + services, + other: serde_json::Value::Null, + }, + }) + } + + fn create_api_service(port: u16, db_url: &str) -> DockerComposeService { + DockerComposeService { + image: EXPLORER_API_DOCKER_IMAGE.to_string(), + platform: Some("linux/amd64".to_string()), + ports: Some(vec![format!("{}:{}", port, port)]), + volumes: None, + depends_on: Some(vec![Self::WORKER_NAME.to_string()]), + restart: None, + environment: Some(HashMap::from([ + ("PORT".to_string(), port.to_string()), + ("LOG_LEVEL".to_string(), "verbose".to_string()), + ("NODE_ENV".to_string(), "development".to_string()), + ("DATABASE_URL".to_string(), db_url.to_string()), + ])), + extra_hosts: Some(vec!["host.docker.internal:host-gateway".to_string()]), + other: serde_json::Value::Null, + } + } + + fn create_data_fetcher_service(port: u16, l2_rpc_url: &str) -> DockerComposeService { + DockerComposeService { + image: EXPLORER_DATA_FETCHER_DOCKER_IMAGE.to_string(), + platform: Some("linux/amd64".to_string()), + ports: Some(vec![format!("{}:{}", port, port)]), + volumes: None, + depends_on: None, + restart: None, + environment: Some(HashMap::from([ + ("PORT".to_string(), port.to_string()), + ("LOG_LEVEL".to_string(), "verbose".to_string()), + ("NODE_ENV".to_string(), "development".to_string()), + ("BLOCKCHAIN_RPC_URL".to_string(), l2_rpc_url.to_string()), + ])), + extra_hosts: Some(vec!["host.docker.internal:host-gateway".to_string()]), + other: serde_json::Value::Null, + } + } + + fn create_worker_service( + port: u16, + data_fetcher_port: u16, + l2_rpc_url: &str, + db_url: &Url, + batches_processing_polling_interval: u64, + ) -> anyhow::Result { + let data_fetcher_url = format!("http://{}:{}", Self::DATA_FETCHER_NAME, data_fetcher_port); + + // Parse database URL + let db_config = db::DatabaseConfig::from_url(db_url)?; + let db_user = db_url.username().to_string(); + let db_password = db_url.password().unwrap_or(""); + let db_port = db_url.port().unwrap_or(5432); + let db_host = db_url + .host_str() + .context("Failed to parse database host")? + .to_string(); + + Ok(DockerComposeService { + image: EXPLORER_WORKER_DOCKER_IMAGE.to_string(), + platform: Some("linux/amd64".to_string()), + ports: None, + volumes: None, + depends_on: None, + restart: None, + environment: Some(HashMap::from([ + ("PORT".to_string(), port.to_string()), + ("LOG_LEVEL".to_string(), "verbose".to_string()), + ("NODE_ENV".to_string(), "development".to_string()), + ("DATABASE_HOST".to_string(), db_host.to_string()), + ("DATABASE_PORT".to_string(), db_port.to_string()), + ("DATABASE_USER".to_string(), db_user.to_string()), + ("DATABASE_PASSWORD".to_string(), db_password.to_string()), + ("DATABASE_NAME".to_string(), db_config.name.to_string()), + ("BLOCKCHAIN_RPC_URL".to_string(), l2_rpc_url.to_string()), + ("DATA_FETCHER_URL".to_string(), data_fetcher_url), + ( + "BATCHES_PROCESSING_POLLING_INTERVAL".to_string(), + batches_processing_polling_interval.to_string(), + ), + ])), + extra_hosts: Some(vec!["host.docker.internal:host-gateway".to_string()]), + other: serde_json::Value::Null, + }) + } + + pub fn get_config_path(ecosystem_base_path: &Path, chain_name: &str) -> PathBuf { + ecosystem_base_path + .join(LOCAL_CHAINS_PATH) + .join(chain_name) + .join(LOCAL_CONFIGS_PATH) + .join(EXPLORER_DOCKER_COMPOSE_FILE) + } +} diff --git a/zk_toolbox/crates/config/src/general.rs b/zk_toolbox/crates/config/src/general.rs index bcbe69e47196..41c2e4c33cfd 100644 --- a/zk_toolbox/crates/config/src/general.rs +++ b/zk_toolbox/crates/config/src/general.rs @@ -9,7 +9,7 @@ use zksync_protobuf_config::{decode_yaml_repr, encode_yaml_repr}; use crate::{ consts::GENERAL_FILE, - traits::{FileConfigWithDefaultName, ReadConfig, SaveConfig}, + traits::{ConfigWithL2RpcUrl, FileConfigWithDefaultName, ReadConfig, SaveConfig}, }; pub struct RocksDbs { @@ -211,3 +211,14 @@ impl ReadConfig for GeneralConfig { decode_yaml_repr::(&path, false) } } + +impl ConfigWithL2RpcUrl for GeneralConfig { + fn get_l2_rpc_url(&self) -> anyhow::Result { + self.api_config + .as_ref() + .map(|api_config| &api_config.web3_json_rpc.http_url) + .context("API config is missing")? + .parse() + .context("Failed to parse L2 RPC URL") + } +} diff --git a/zk_toolbox/crates/config/src/lib.rs b/zk_toolbox/crates/config/src/lib.rs index 4e00962229bc..3c7443f24490 100644 --- a/zk_toolbox/crates/config/src/lib.rs +++ b/zk_toolbox/crates/config/src/lib.rs @@ -1,3 +1,4 @@ +pub use apps::*; pub use chain::*; pub use consts::*; pub use contracts::*; @@ -11,6 +12,7 @@ pub use wallet_creation::*; pub use wallets::*; pub use zksync_protobuf_config::{decode_yaml_repr, encode_yaml_repr}; +mod apps; mod chain; mod consts; mod contracts; @@ -23,6 +25,9 @@ mod secrets; mod wallet_creation; mod wallets; +pub mod docker_compose; +pub mod explorer; +pub mod explorer_compose; pub mod external_node; pub mod forge_interface; pub mod portal; diff --git a/zk_toolbox/crates/config/src/portal.rs b/zk_toolbox/crates/config/src/portal.rs index 4b68d5744cd9..c787c6cc7026 100644 --- a/zk_toolbox/crates/config/src/portal.rs +++ b/zk_toolbox/crates/config/src/portal.rs @@ -5,28 +5,25 @@ use types::TokenInfo; use xshell::Shell; use crate::{ - consts::{LOCAL_CONFIGS_PATH, PORTAL_CONFIG_FILE}, - traits::{FileConfigWithDefaultName, ReadConfig, SaveConfig}, + consts::{ + LOCAL_APPS_PATH, LOCAL_CONFIGS_PATH, LOCAL_GENERATED_PATH, PORTAL_CONFIG_FILE, + PORTAL_JS_CONFIG_FILE, + }, + traits::{ReadConfig, SaveConfig, ZkToolboxConfig}, }; +/// Portal JSON configuration file. This file contains configuration for the portal app. #[derive(Serialize, Deserialize, Debug, Clone)] #[serde(rename_all = "camelCase")] -pub struct PortalRuntimeConfig { +pub struct PortalConfig { pub node_type: String, - pub hyperchains_config: HyperchainsConfig, + pub hyperchains_config: Vec, + #[serde(flatten)] + pub other: serde_json::Value, } #[derive(Serialize, Deserialize, Debug, Clone)] -pub struct HyperchainsConfig(pub Vec); - -impl HyperchainsConfig { - pub fn is_empty(&self) -> bool { - self.0.is_empty() - } -} - -#[derive(Serialize, Deserialize, Debug, Clone)] -pub struct HyperchainConfig { +pub struct PortalChainConfig { pub network: NetworkConfig, pub tokens: Vec, } @@ -35,10 +32,12 @@ pub struct HyperchainConfig { #[serde(rename_all = "camelCase")] pub struct NetworkConfig { pub id: u64, // L2 Network ID - pub key: String, // L2 Network key - pub name: String, // L2 Network name + pub key: String, // L2 Network key (chain name used during the initialization) + pub name: String, // L2 Network name (displayed in the app dropdown) pub rpc_url: String, // L2 RPC URL #[serde(skip_serializing_if = "Option::is_none")] + pub hidden: Option, // If true, the chain will not be shown in the app dropdown + #[serde(skip_serializing_if = "Option::is_none")] pub block_explorer_url: Option, // L2 Block Explorer URL #[serde(skip_serializing_if = "Option::is_none")] pub block_explorer_api: Option, // L2 Block Explorer API @@ -46,6 +45,8 @@ pub struct NetworkConfig { pub public_l1_network_id: Option, // Ethereum Mainnet or Ethereum Sepolia Testnet ID #[serde(skip_serializing_if = "Option::is_none")] pub l1_network: Option, + #[serde(flatten)] + pub other: serde_json::Value, } #[derive(Serialize, Deserialize, Debug, Clone)] @@ -81,44 +82,94 @@ pub struct TokenConfig { pub name: Option, } -impl PortalRuntimeConfig { +impl PortalConfig { + /// Returns the path to the portal configuration file. pub fn get_config_path(ecosystem_base_path: &Path) -> PathBuf { ecosystem_base_path .join(LOCAL_CONFIGS_PATH) + .join(LOCAL_APPS_PATH) .join(PORTAL_CONFIG_FILE) } -} -impl FileConfigWithDefaultName for PortalRuntimeConfig { - const FILE_NAME: &'static str = PORTAL_CONFIG_FILE; -} + /// Reads the existing config or creates a default one if it doesn't exist. + pub fn read_or_create_default(shell: &Shell) -> anyhow::Result { + let config_path = Self::get_config_path(&shell.current_dir()); + match Self::read(shell, &config_path) { + Ok(config) => Ok(config), + Err(_) => { + let config = Self::default(); + config.save(shell, &config_path)?; + Ok(config) + } + } + } + + /// Adds or updates a given chain configuration. + pub fn add_chain_config(&mut self, config: &PortalChainConfig) { + // Replace if config with the same network key already exists + if let Some(index) = self + .hyperchains_config + .iter() + .position(|c| c.network.key == config.network.key) + { + self.hyperchains_config[index] = config.clone(); + return; + } + self.hyperchains_config.push(config.clone()); + } + + /// Retains only the chains whose names are present in the given vector. + pub fn filter(&mut self, chain_names: &[String]) { + self.hyperchains_config + .retain(|config| chain_names.contains(&config.network.key)); + } + + /// Hides all chains except those specified in the given vector. + pub fn hide_except(&mut self, chain_names: &[String]) { + for config in &mut self.hyperchains_config { + config.network.hidden = Some(!chain_names.contains(&config.network.key)); + } + } -impl SaveConfig for PortalRuntimeConfig { - fn save(&self, shell: &Shell, path: impl AsRef) -> anyhow::Result<()> { + /// Checks if a chain with the given name exists in the configuration. + pub fn contains(&self, chain_name: &String) -> bool { + self.hyperchains_config + .iter() + .any(|config| &config.network.key == chain_name) + } + + pub fn is_empty(&self) -> bool { + self.hyperchains_config.is_empty() + } + + pub fn save_as_js(&self, shell: &Shell) -> anyhow::Result { // The dapp-portal is served as a pre-built static app in a Docker image. // It uses a JavaScript file (config.js) that injects the configuration at runtime // by overwriting the '##runtimeConfig' property of the window object. - // Therefore, we generate a JavaScript file instead of a JSON file. // This file will be mounted to the Docker image when it runs. + let path = Self::get_generated_js_config_path(&shell.current_dir()); let json = serde_json::to_string_pretty(&self)?; let config_js_content = format!("window['##runtimeConfig'] = {};", json); - Ok(shell.write_file(path, config_js_content.as_bytes())?) + shell.write_file(path.clone(), config_js_content.as_bytes())?; + Ok(path) + } + + fn get_generated_js_config_path(ecosystem_base_path: &Path) -> PathBuf { + ecosystem_base_path + .join(LOCAL_CONFIGS_PATH) + .join(LOCAL_GENERATED_PATH) + .join(PORTAL_JS_CONFIG_FILE) } } -impl ReadConfig for PortalRuntimeConfig { - fn read(shell: &Shell, path: impl AsRef) -> anyhow::Result { - let config_js_content = shell.read_file(path)?; - // Extract the JSON part from the JavaScript file - let json_start = config_js_content - .find('{') - .ok_or_else(|| anyhow::anyhow!("Invalid config file format"))?; - let json_end = config_js_content - .rfind('}') - .ok_or_else(|| anyhow::anyhow!("Invalid config file format"))?; - let json_str = &config_js_content[json_start..=json_end]; - // Parse the JSON into PortalRuntimeConfig - let config: PortalRuntimeConfig = serde_json::from_str(json_str)?; - Ok(config) +impl Default for PortalConfig { + fn default() -> Self { + PortalConfig { + node_type: "hyperchain".to_string(), + hyperchains_config: Vec::new(), + other: serde_json::Value::Null, + } } } + +impl ZkToolboxConfig for PortalConfig {} diff --git a/zk_toolbox/crates/config/src/traits.rs b/zk_toolbox/crates/config/src/traits.rs index 1f00b39b040a..bb0722762e31 100644 --- a/zk_toolbox/crates/config/src/traits.rs +++ b/zk_toolbox/crates/config/src/traits.rs @@ -5,6 +5,7 @@ use common::files::{ read_json_file, read_toml_file, read_yaml_file, save_json_file, save_toml_file, save_yaml_file, }; use serde::{de::DeserializeOwned, Serialize}; +use url::Url; use xshell::Shell; // Configs that we use only inside zk toolbox, we don't have protobuf implementation for them. @@ -156,3 +157,7 @@ fn save_with_comment( } Ok(()) } + +pub trait ConfigWithL2RpcUrl { + fn get_l2_rpc_url(&self) -> anyhow::Result; +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/args/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/args/mod.rs index a27b653edf52..d18b05c910e5 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/args/mod.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/args/mod.rs @@ -1,9 +1,7 @@ pub use containers::*; -pub use portal::*; pub use run_server::*; pub use update::*; mod containers; -mod portal; mod run_server; mod update; diff --git a/zk_toolbox/crates/zk_inception/src/commands/args/portal.rs b/zk_toolbox/crates/zk_inception/src/commands/args/portal.rs deleted file mode 100644 index e31058aad5d0..000000000000 --- a/zk_toolbox/crates/zk_inception/src/commands/args/portal.rs +++ /dev/null @@ -1,12 +0,0 @@ -use clap::Parser; -use serde::{Deserialize, Serialize}; - -#[derive(Debug, Serialize, Deserialize, Parser)] -pub struct PortalArgs { - #[clap( - long, - default_value = "3030", - help = "The port number for the portal app" - )] - pub port: u16, -} diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/init.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/init.rs index 921eeaa98af8..793fbbf31aee 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/init.rs @@ -28,7 +28,7 @@ use crate::{ genesis::genesis, set_token_multiplier_setter::set_token_multiplier_setter, }, - portal::create_and_save_portal_config, + portal::update_portal_config, }, consts::AMOUNT_FOR_DISTRIBUTION_TO_WALLETS, messages::{ @@ -154,7 +154,7 @@ pub async fn init( .await .context(MSG_GENESIS_DATABASE_ERR)?; - create_and_save_portal_config(ecosystem_config, shell) + update_portal_config(shell, chain_config) .await .context(MSG_PORTAL_FAILED_TO_CREATE_CONFIG_ERR)?; diff --git a/zk_toolbox/crates/zk_inception/src/commands/containers.rs b/zk_toolbox/crates/zk_inception/src/commands/containers.rs index 17c32c04bc2f..81d7970df839 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/containers.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/containers.rs @@ -40,7 +40,7 @@ pub fn initialize_docker(shell: &Shell, ecosystem: &EcosystemConfig) -> anyhow:: } fn start_container(shell: &Shell, compose_file: &str, retry_msg: &str) -> anyhow::Result<()> { - while let Err(err) = docker::up(shell, compose_file) { + while let Err(err) = docker::up(shell, compose_file, true) { logger::error(err.to_string()); if !common::PromptConfirm::new(retry_msg).default(true).ask() { return Err(err); diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/create.rs b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/create.rs index f9940c8a9798..356b5322980f 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/create.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/create.rs @@ -15,7 +15,10 @@ use crate::{ containers::{initialize_docker, start_containers}, ecosystem::{ args::create::EcosystemCreateArgs, - create_configs::{create_erc20_deployment_config, create_initial_deployments_config}, + create_configs::{ + create_apps_config, create_erc20_deployment_config, + create_initial_deployments_config, + }, }, }, messages::{ @@ -75,6 +78,7 @@ fn create(args: EcosystemCreateArgs, shell: &Shell) -> anyhow::Result<()> { create_initial_deployments_config(shell, &configs_path)?; create_erc20_deployment_config(shell, &configs_path)?; + create_apps_config(shell, &configs_path)?; let ecosystem_config = EcosystemConfig { name: ecosystem_name.clone(), diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/create_configs.rs b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/create_configs.rs index b4f42313e3d0..38358355ff97 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/create_configs.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/create_configs.rs @@ -2,7 +2,8 @@ use std::path::Path; use config::{ forge_interface::deploy_ecosystem::input::{Erc20DeploymentConfig, InitialDeploymentConfig}, - traits::SaveConfigWithCommentAndBasePath, + traits::{SaveConfigWithBasePath, SaveConfigWithCommentAndBasePath}, + AppsEcosystemConfig, }; use xshell::Shell; @@ -33,3 +34,12 @@ pub fn create_erc20_deployment_config( )?; Ok(config) } + +pub fn create_apps_config( + shell: &Shell, + ecosystem_configs_path: &Path, +) -> anyhow::Result { + let config = AppsEcosystemConfig::default(); + config.save_with_base_path(shell, ecosystem_configs_path)?; + Ok(config) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/explorer/backend.rs b/zk_toolbox/crates/zk_inception/src/commands/explorer/backend.rs new file mode 100644 index 000000000000..6fdd3faa9807 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/explorer/backend.rs @@ -0,0 +1,39 @@ +use std::path::Path; + +use anyhow::Context; +use common::{config::global_config, docker}; +use config::{explorer_compose::ExplorerBackendComposeConfig, EcosystemConfig}; +use xshell::Shell; + +use crate::messages::{ + msg_explorer_chain_not_initialized, MSG_CHAIN_NOT_FOUND_ERR, + MSG_EXPLORER_FAILED_TO_RUN_DOCKER_SERVICES_ERR, +}; + +pub(crate) fn run(shell: &Shell) -> anyhow::Result<()> { + let ecosystem_config = EcosystemConfig::from_file(shell)?; + let chain_config = ecosystem_config + .load_chain(global_config().chain_name.clone()) + .context(MSG_CHAIN_NOT_FOUND_ERR)?; + let chain_name = chain_config.name.clone(); + // Read chain-level explorer backend docker compose file + let ecosystem_path = shell.current_dir(); + let backend_config_path = + ExplorerBackendComposeConfig::get_config_path(&ecosystem_path, &chain_config.name); + if !backend_config_path.exists() { + anyhow::bail!(msg_explorer_chain_not_initialized(&chain_name)); + } + // Run docker compose + run_backend(shell, &backend_config_path)?; + Ok(()) +} + +fn run_backend(shell: &Shell, explorer_compose_config_path: &Path) -> anyhow::Result<()> { + if let Some(docker_compose_file) = explorer_compose_config_path.to_str() { + docker::up(shell, docker_compose_file, false) + .context(MSG_EXPLORER_FAILED_TO_RUN_DOCKER_SERVICES_ERR)?; + } else { + anyhow::bail!("Invalid docker compose file"); + } + Ok(()) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/explorer/init.rs b/zk_toolbox/crates/zk_inception/src/commands/explorer/init.rs new file mode 100644 index 000000000000..43700d91a0df --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/explorer/init.rs @@ -0,0 +1,135 @@ +use anyhow::Context; +use common::{config::global_config, db, logger, Prompt}; +use config::{ + explorer::{ExplorerChainConfig, ExplorerConfig}, + explorer_compose::{ExplorerBackendComposeConfig, ExplorerBackendConfig, ExplorerBackendPorts}, + traits::{ConfigWithL2RpcUrl, SaveConfig}, + ChainConfig, EcosystemConfig, +}; +use slugify_rs::slugify; +use url::Url; +use xshell::Shell; + +use crate::{ + commands::chain::args::init::PortOffset, + consts::L2_BASE_TOKEN_ADDRESS, + defaults::{generate_explorer_db_name, DATABASE_EXPLORER_URL}, + messages::{ + msg_chain_load_err, msg_explorer_db_name_prompt, msg_explorer_db_url_prompt, + msg_explorer_initializing_database_for, MSG_EXPLORER_FAILED_TO_DROP_DATABASE_ERR, + MSG_EXPLORER_INITIALIZED, + }, +}; + +pub(crate) async fn run(shell: &Shell) -> anyhow::Result<()> { + let ecosystem_config = EcosystemConfig::from_file(shell)?; + // If specific chain is provided, initialize only that chain; otherwise, initialize all chains + let chains_enabled = match global_config().chain_name { + Some(ref chain_name) => vec![chain_name.clone()], + None => ecosystem_config.list_of_chains(), + }; + // Initialize chains one by one + let mut explorer_config = ExplorerConfig::read_or_create_default(shell)?; + for chain_name in chains_enabled.iter() { + // Load chain config + let chain_config = ecosystem_config + .load_chain(Some(chain_name.clone())) + .context(msg_chain_load_err(chain_name))?; + // Build backend config - parameters required to create explorer backend services + let backend_config = build_backend_config(&chain_config); + // Initialize explorer database + initialize_explorer_database(&backend_config.database_url).await?; + // Create explorer backend docker compose file + let l2_rpc_url = chain_config.get_general_config()?.get_l2_rpc_url()?; + let backend_compose_config = + ExplorerBackendComposeConfig::new(chain_name, l2_rpc_url, &backend_config)?; + let backend_compose_config_path = + ExplorerBackendComposeConfig::get_config_path(&shell.current_dir(), chain_name); + backend_compose_config.save(shell, &backend_compose_config_path)?; + // Add chain to explorer.json + let explorer_chain_config = build_explorer_chain_config(&chain_config, &backend_config)?; + explorer_config.add_chain_config(&explorer_chain_config); + } + // Save explorer config + let config_path = ExplorerConfig::get_config_path(&shell.current_dir()); + explorer_config.save(shell, config_path)?; + + logger::outro(MSG_EXPLORER_INITIALIZED); + Ok(()) +} + +fn build_backend_config(chain_config: &ChainConfig) -> ExplorerBackendConfig { + // Prompt explorer database name + logger::info(msg_explorer_initializing_database_for(&chain_config.name)); + let db_config = fill_database_values_with_prompt(chain_config); + + // Allocate ports for backend services + let backend_ports = allocate_explorer_services_ports(chain_config); + + // Build explorer backend config + ExplorerBackendConfig::new(db_config.full_url(), &backend_ports) +} + +async fn initialize_explorer_database(db_url: &Url) -> anyhow::Result<()> { + let db_config = db::DatabaseConfig::from_url(db_url)?; + db::drop_db_if_exists(&db_config) + .await + .context(MSG_EXPLORER_FAILED_TO_DROP_DATABASE_ERR)?; + db::init_db(&db_config).await?; + Ok(()) +} + +fn fill_database_values_with_prompt(config: &ChainConfig) -> db::DatabaseConfig { + let defaul_db_name: String = generate_explorer_db_name(config); + let chain_name = config.name.clone(); + let explorer_db_url = Prompt::new(&msg_explorer_db_url_prompt(&chain_name)) + .default(DATABASE_EXPLORER_URL.as_str()) + .ask(); + let explorer_db_name: String = Prompt::new(&msg_explorer_db_name_prompt(&chain_name)) + .default(&defaul_db_name) + .ask(); + let explorer_db_name = slugify!(&explorer_db_name, separator = "_"); + db::DatabaseConfig::new(explorer_db_url, explorer_db_name) +} + +fn allocate_explorer_services_ports(chain_config: &ChainConfig) -> ExplorerBackendPorts { + // Try to allocate intuitive ports with an offset from the defaults + let offset: u16 = PortOffset::from_chain_id(chain_config.id as u16).into(); + ExplorerBackendPorts::default().with_offset(offset) +} + +fn build_explorer_chain_config( + chain_config: &ChainConfig, + backend_config: &ExplorerBackendConfig, +) -> anyhow::Result { + let general_config = chain_config.get_general_config()?; + // Get L2 RPC URL from general config + let l2_rpc_url = general_config.get_l2_rpc_url()?; + // Get Verification API URL from general config + let verification_api_url = general_config + .contract_verifier + .as_ref() + .map(|verifier| &verifier.url) + .context("verification_url")?; + // Build API URL + let api_port = backend_config.ports.api_http_port; + let api_url = format!("http://127.0.0.1:{}", api_port); + + // Build explorer chain config + Ok(ExplorerChainConfig { + name: chain_config.name.clone(), + l2_network_name: chain_config.name.clone(), + l2_chain_id: chain_config.chain_id.as_u64(), + rpc_url: l2_rpc_url.to_string(), + api_url: api_url.to_string(), + base_token_address: L2_BASE_TOKEN_ADDRESS.to_string(), + hostnames: Vec::new(), + icon: "/images/icons/zksync-arrows.svg".to_string(), + maintenance: false, + published: true, + bridge_url: None, + l1_explorer_url: None, + verification_api_url: Some(verification_api_url.to_string()), + other: serde_json::Value::Null, + }) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/explorer/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/explorer/mod.rs new file mode 100644 index 000000000000..4b66d49598c4 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/explorer/mod.rs @@ -0,0 +1,27 @@ +use clap::Subcommand; +use xshell::Shell; + +mod backend; +mod init; +mod run; + +#[derive(Subcommand, Debug)] +pub enum ExplorerCommands { + /// Initialize explorer (create database to store explorer data and generate docker + /// compose file with explorer services). Runs for all chains, unless --chain is passed + Init, + /// Start explorer backend services (api, data_fetcher, worker) for a given chain. + /// Uses default chain, unless --chain is passed + #[command(alias = "backend")] + RunBackend, + /// Run explorer app + Run, +} + +pub(crate) async fn run(shell: &Shell, args: ExplorerCommands) -> anyhow::Result<()> { + match args { + ExplorerCommands::Init => init::run(shell).await, + ExplorerCommands::Run => run::run(shell), + ExplorerCommands::RunBackend => backend::run(shell), + } +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/explorer/run.rs b/zk_toolbox/crates/zk_inception/src/commands/explorer/run.rs new file mode 100644 index 000000000000..a6519f62edba --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/explorer/run.rs @@ -0,0 +1,98 @@ +use std::path::Path; + +use anyhow::Context; +use common::{config::global_config, docker, logger}; +use config::{explorer::*, traits::SaveConfig, AppsEcosystemConfig, EcosystemConfig}; +use xshell::Shell; + +use crate::{ + consts::{EXPLORER_APP_DOCKER_CONFIG_PATH, EXPLORER_APP_DOCKER_IMAGE}, + messages::{ + msg_explorer_running_with_config, msg_explorer_starting_on, + MSG_EXPLORER_FAILED_TO_CREATE_CONFIG_ERR, MSG_EXPLORER_FAILED_TO_FIND_ANY_CHAIN_ERR, + MSG_EXPLORER_FAILED_TO_RUN_DOCKER_ERR, + }, +}; + +pub(crate) fn run(shell: &Shell) -> anyhow::Result<()> { + let ecosystem_config = EcosystemConfig::from_file(shell)?; + let ecosystem_path = shell.current_dir(); + // Get ecosystem level apps.yaml config + let apps_config = AppsEcosystemConfig::read_or_create_default(shell)?; + // If specific_chain is provided, run only with that chain; otherwise, run with all chains + let chains_enabled = match global_config().chain_name { + Some(ref chain_name) => vec![chain_name.clone()], + None => ecosystem_config.list_of_chains(), + }; + + // Read explorer config + let config_path = ExplorerConfig::get_config_path(&ecosystem_path); + let mut explorer_config = ExplorerConfig::read_or_create_default(shell) + .context(MSG_EXPLORER_FAILED_TO_CREATE_CONFIG_ERR)?; + + // Validate and update explorer config + explorer_config.filter(&ecosystem_config.list_of_chains()); + explorer_config.hide_except(&chains_enabled); + if explorer_config.is_empty() { + anyhow::bail!(MSG_EXPLORER_FAILED_TO_FIND_ANY_CHAIN_ERR); + } + + // Save explorer config + explorer_config.save(shell, &config_path)?; + + let config_js_path = explorer_config + .save_as_js(shell) + .context(MSG_EXPLORER_FAILED_TO_CREATE_CONFIG_ERR)?; + + logger::info(msg_explorer_running_with_config(&config_path)); + logger::info(msg_explorer_starting_on( + "127.0.0.1", + apps_config.explorer.http_port, + )); + let name = explorer_app_name(&ecosystem_config.name); + run_explorer( + shell, + &config_js_path, + &name, + apps_config.explorer.http_port, + )?; + Ok(()) +} + +fn run_explorer( + shell: &Shell, + config_file_path: &Path, + name: &str, + port: u16, +) -> anyhow::Result<()> { + let port_mapping = format!("{}:{}", port, port); + let volume_mapping = format!( + "{}:{}", + config_file_path.display(), + EXPLORER_APP_DOCKER_CONFIG_PATH + ); + + let docker_args: Vec = vec![ + "--platform".to_string(), + "linux/amd64".to_string(), + "--name".to_string(), + name.to_string(), + "-p".to_string(), + port_mapping, + "-v".to_string(), + volume_mapping, + "-e".to_string(), + format!("PORT={}", port), + "--rm".to_string(), + ]; + + docker::run(shell, EXPLORER_APP_DOCKER_IMAGE, docker_args) + .with_context(|| MSG_EXPLORER_FAILED_TO_RUN_DOCKER_ERR)?; + Ok(()) +} + +/// Generates a name for the explorer app Docker container. +/// Will be passed as `--name` argument to `docker run`. +fn explorer_app_name(ecosystem_name: &str) -> String { + format!("{}-explorer-app", ecosystem_name) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/mod.rs index 0ac363beb2da..523faea04786 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/mod.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/mod.rs @@ -3,6 +3,7 @@ pub mod chain; pub mod containers; pub mod contract_verifier; pub mod ecosystem; +pub mod explorer; pub mod external_node; pub mod portal; pub mod prover; diff --git a/zk_toolbox/crates/zk_inception/src/commands/portal.rs b/zk_toolbox/crates/zk_inception/src/commands/portal.rs index cc939f3fb3ea..5bf211211779 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/portal.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/portal.rs @@ -1,33 +1,30 @@ -use std::{collections::HashMap, path::Path}; +use std::path::Path; -use anyhow::{anyhow, Context}; -use common::{docker, ethereum, logger}; +use anyhow::Context; +use common::{config::global_config, docker, ethereum, logger}; use config::{ portal::*, - traits::{ReadConfig, SaveConfig}, - ChainConfig, EcosystemConfig, + traits::{ConfigWithL2RpcUrl, SaveConfig}, + AppsEcosystemConfig, ChainConfig, EcosystemConfig, }; use ethers::types::Address; use types::{BaseToken, TokenInfo}; use xshell::Shell; use crate::{ - commands::args::PortalArgs, - consts::{L2_BASE_TOKEN_ADDRESS, PORTAL_DOCKER_CONTAINER_PORT, PORTAL_DOCKER_IMAGE}, + consts::{L2_BASE_TOKEN_ADDRESS, PORTAL_DOCKER_CONFIG_PATH, PORTAL_DOCKER_IMAGE}, messages::{ - msg_portal_starting_on, MSG_PORTAL_CONFIG_IS_EMPTY_ERR, - MSG_PORTAL_FAILED_TO_CREATE_CONFIG_ERR, MSG_PORTAL_FAILED_TO_RUN_DOCKER_ERR, + msg_portal_running_with_config, msg_portal_starting_on, + MSG_PORTAL_FAILED_TO_CREATE_CONFIG_ERR, MSG_PORTAL_FAILED_TO_FIND_ANY_CHAIN_ERR, + MSG_PORTAL_FAILED_TO_RUN_DOCKER_ERR, }, }; -async fn create_hyperchain_config(chain_config: &ChainConfig) -> anyhow::Result { +async fn build_portal_chain_config( + chain_config: &ChainConfig, +) -> anyhow::Result { // Get L2 RPC URL from general config - let general_config = chain_config.get_general_config()?; - let rpc_url = general_config - .api_config - .as_ref() - .map(|api_config| &api_config.web3_json_rpc.http_url) - .context("api_config")?; + let l2_rpc_url = chain_config.get_general_config()?.get_l2_rpc_url()?; // Get L1 RPC URL from secrects config let secrets_config = chain_config.get_secrets_config()?; let l1_rpc_url = secrets_config @@ -68,97 +65,126 @@ async fn create_hyperchain_config(chain_config: &ChainConfig) -> anyhow::Result< name: Some(base_token_info.name.to_string()), }]; // Build hyperchain config - Ok(HyperchainConfig { + Ok(PortalChainConfig { network: NetworkConfig { id: chain_config.chain_id.as_u64(), key: chain_config.name.clone(), name: chain_config.name.clone(), - rpc_url: rpc_url.to_string(), + rpc_url: l2_rpc_url.to_string(), l1_network, public_l1_network_id: None, block_explorer_url: None, block_explorer_api: None, + hidden: None, + other: serde_json::Value::Null, }, tokens, }) } -async fn create_hyperchains_config( - chain_configs: &[ChainConfig], -) -> anyhow::Result { - let mut hyperchain_configs = Vec::new(); - for chain_config in chain_configs { - if let Ok(config) = create_hyperchain_config(chain_config).await { - hyperchain_configs.push(config) - } - } - Ok(HyperchainsConfig(hyperchain_configs)) +pub async fn update_portal_config( + shell: &Shell, + chain_config: &ChainConfig, +) -> anyhow::Result { + // Build and append portal chain config to the portal config + let portal_chain_config = build_portal_chain_config(chain_config).await?; + let mut portal_config = PortalConfig::read_or_create_default(shell)?; + portal_config.add_chain_config(&portal_chain_config); + // Save portal config + let config_path = PortalConfig::get_config_path(&shell.current_dir()); + portal_config.save(shell, config_path)?; + Ok(portal_config) } -pub async fn create_portal_config( +/// Validates portal config - appends missing chains and removes unknown chains +async fn validate_portal_config( + portal_config: &mut PortalConfig, ecosystem_config: &EcosystemConfig, -) -> anyhow::Result { - let chains: Vec = ecosystem_config.list_of_chains(); - let mut chain_configs = Vec::new(); - for chain in chains { - if let Some(chain_config) = ecosystem_config.load_chain(Some(chain.clone())) { - chain_configs.push(chain_config) +) -> anyhow::Result<()> { + let chain_names = ecosystem_config.list_of_chains(); + for chain_name in &chain_names { + if portal_config.contains(chain_name) { + continue; + } + // Append missing chain, chain might not be initialized, so ignoring errors + if let Some(chain_config) = ecosystem_config.load_chain(Some(chain_name.clone())) { + if let Ok(portal_chain_config) = build_portal_chain_config(&chain_config).await { + portal_config.add_chain_config(&portal_chain_config); + } } } - let hyperchains_config = create_hyperchains_config(&chain_configs).await?; - if hyperchains_config.is_empty() { - anyhow::bail!("Failed to create any valid hyperchain config") - } - let runtime_config = PortalRuntimeConfig { - node_type: "hyperchain".to_string(), - hyperchains_config, - }; - Ok(runtime_config) -} - -pub async fn create_and_save_portal_config( - ecosystem_config: &EcosystemConfig, - shell: &Shell, -) -> anyhow::Result { - let portal_config = create_portal_config(ecosystem_config).await?; - let config_path = PortalRuntimeConfig::get_config_path(&shell.current_dir()); - portal_config.save(shell, config_path)?; - Ok(portal_config) + portal_config.filter(&chain_names); + Ok(()) } -pub async fn run(shell: &Shell, args: PortalArgs) -> anyhow::Result<()> { +pub async fn run(shell: &Shell) -> anyhow::Result<()> { let ecosystem_config: EcosystemConfig = EcosystemConfig::from_file(shell)?; - let config_path = PortalRuntimeConfig::get_config_path(&shell.current_dir()); - logger::info(format!( - "Using portal config file at {}", - config_path.display() - )); - - let portal_config = match PortalRuntimeConfig::read(shell, &config_path) { - Ok(config) => config, - Err(_) => create_and_save_portal_config(&ecosystem_config, shell) - .await - .context(MSG_PORTAL_FAILED_TO_CREATE_CONFIG_ERR)?, + // Get ecosystem level apps.yaml config + let apps_config = AppsEcosystemConfig::read_or_create_default(shell)?; + // Display all chains, unless --chain is passed + let chains_enabled = match global_config().chain_name { + Some(ref chain_name) => vec![chain_name.clone()], + None => ecosystem_config.list_of_chains(), }; - if portal_config.hyperchains_config.is_empty() { - return Err(anyhow!(MSG_PORTAL_CONFIG_IS_EMPTY_ERR)); + + // Read portal config + let config_path = PortalConfig::get_config_path(&shell.current_dir()); + let mut portal_config = PortalConfig::read_or_create_default(shell) + .context(MSG_PORTAL_FAILED_TO_CREATE_CONFIG_ERR)?; + + // Validate and update portal config + validate_portal_config(&mut portal_config, &ecosystem_config).await?; + portal_config.hide_except(&chains_enabled); + if portal_config.is_empty() { + anyhow::bail!(MSG_PORTAL_FAILED_TO_FIND_ANY_CHAIN_ERR); } - logger::info(msg_portal_starting_on("127.0.0.1", args.port)); - run_portal(shell, &config_path, args.port)?; + // Save portal config + portal_config.save(shell, &config_path)?; + + let config_js_path = portal_config + .save_as_js(shell) + .context(MSG_PORTAL_FAILED_TO_CREATE_CONFIG_ERR)?; + + logger::info(msg_portal_running_with_config(&config_path)); + logger::info(msg_portal_starting_on( + "127.0.0.1", + apps_config.portal.http_port, + )); + let name = portal_app_name(&ecosystem_config.name); + run_portal(shell, &config_js_path, &name, apps_config.portal.http_port)?; Ok(()) } -fn run_portal(shell: &Shell, config_file_path: &Path, port: u16) -> anyhow::Result<()> { - let port_mapping = format!("{}:{}", port, PORTAL_DOCKER_CONTAINER_PORT); - let volume_mapping = format!("{}:/usr/src/app/dist/config.js", config_file_path.display()); +fn run_portal(shell: &Shell, config_file_path: &Path, name: &str, port: u16) -> anyhow::Result<()> { + let port_mapping = format!("{}:{}", port, port); + let volume_mapping = format!( + "{}:{}", + config_file_path.display(), + PORTAL_DOCKER_CONFIG_PATH + ); - let mut docker_args: HashMap = HashMap::new(); - docker_args.insert("--platform".to_string(), "linux/amd64".to_string()); - docker_args.insert("-p".to_string(), port_mapping); - docker_args.insert("-v".to_string(), volume_mapping); + let docker_args: Vec = vec![ + "--platform".to_string(), + "linux/amd64".to_string(), + "--name".to_string(), + name.to_string(), + "-p".to_string(), + port_mapping, + "-v".to_string(), + volume_mapping, + "-e".to_string(), + format!("PORT={}", port), + "--rm".to_string(), + ]; docker::run(shell, PORTAL_DOCKER_IMAGE, docker_args) .with_context(|| MSG_PORTAL_FAILED_TO_RUN_DOCKER_ERR)?; Ok(()) } + +/// Generates a name for the portal app Docker container. +/// Will be passed as `--name` argument to `docker run`. +fn portal_app_name(ecosystem_name: &str) -> String { + format!("{}-portal-app", ecosystem_name) +} diff --git a/zk_toolbox/crates/zk_inception/src/consts.rs b/zk_toolbox/crates/zk_inception/src/consts.rs index 7463dc28570e..7db976c61033 100644 --- a/zk_toolbox/crates/zk_inception/src/consts.rs +++ b/zk_toolbox/crates/zk_inception/src/consts.rs @@ -8,5 +8,10 @@ pub const DEFAULT_CREDENTIALS_FILE: &str = "~/.config/gcloud/application_default pub const DEFAULT_PROOF_STORE_DIR: &str = "artifacts"; pub const BELLMAN_CUDA_DIR: &str = "era-bellman-cuda"; pub const L2_BASE_TOKEN_ADDRESS: &str = "0x000000000000000000000000000000000000800A"; + +/// Path to the JS runtime config for the block-explorer-app docker container to be mounted to +pub const EXPLORER_APP_DOCKER_CONFIG_PATH: &str = "/usr/src/app/packages/app/dist/config.js"; +pub const EXPLORER_APP_DOCKER_IMAGE: &str = "matterlabs/block-explorer-app"; +/// Path to the JS runtime config for the dapp-portal docker container to be mounted to +pub const PORTAL_DOCKER_CONFIG_PATH: &str = "/usr/src/app/dist/config.js"; pub const PORTAL_DOCKER_IMAGE: &str = "matterlabs/dapp-portal"; -pub const PORTAL_DOCKER_CONTAINER_PORT: u16 = 3000; diff --git a/zk_toolbox/crates/zk_inception/src/defaults.rs b/zk_toolbox/crates/zk_inception/src/defaults.rs index 34b0eeae4195..544e28377403 100644 --- a/zk_toolbox/crates/zk_inception/src/defaults.rs +++ b/zk_toolbox/crates/zk_inception/src/defaults.rs @@ -7,6 +7,8 @@ lazy_static! { Url::parse("postgres://postgres:notsecurepassword@localhost:5432").unwrap(); pub static ref DATABASE_PROVER_URL: Url = Url::parse("postgres://postgres:notsecurepassword@localhost:5432").unwrap(); + pub static ref DATABASE_EXPLORER_URL: Url = + Url::parse("postgres://postgres:notsecurepassword@localhost:5432").unwrap(); } pub const ROCKS_DB_STATE_KEEPER: &str = "state_keeper"; @@ -40,6 +42,14 @@ pub fn generate_db_names(config: &ChainConfig) -> DBNames { } } +pub fn generate_explorer_db_name(config: &ChainConfig) -> String { + format!( + "zksync_explorer_{}_{}", + config.l1_network.to_string().to_ascii_lowercase(), + config.name + ) +} + pub fn generate_external_node_db_name(config: &ChainConfig) -> String { format!( "external_node_{}_{}", diff --git a/zk_toolbox/crates/zk_inception/src/main.rs b/zk_toolbox/crates/zk_inception/src/main.rs index cb1b5388196a..f6f7d83dede6 100644 --- a/zk_toolbox/crates/zk_inception/src/main.rs +++ b/zk_toolbox/crates/zk_inception/src/main.rs @@ -13,11 +13,8 @@ use config::EcosystemConfig; use xshell::Shell; use crate::commands::{ - args::{PortalArgs, RunServerArgs}, - chain::ChainCommands, - ecosystem::EcosystemCommands, - external_node::ExternalNodeCommands, - prover::ProverCommands, + args::RunServerArgs, chain::ChainCommands, ecosystem::EcosystemCommands, + explorer::ExplorerCommands, external_node::ExternalNodeCommands, prover::ProverCommands, }; pub mod accept_ownership; @@ -60,7 +57,10 @@ pub enum InceptionSubcommands { #[command(subcommand)] ContractVerifier(ContractVerifierCommands), /// Run dapp-portal - Portal(PortalArgs), + Portal, + /// Run block-explorer + #[command(subcommand)] + Explorer(ExplorerCommands), /// Update ZKsync #[command(alias = "u")] Update(UpdateArgs), @@ -123,7 +123,8 @@ async fn run_subcommand(inception_args: Inception, shell: &Shell) -> anyhow::Res InceptionSubcommands::ContractVerifier(args) => { commands::contract_verifier::run(shell, args).await? } - InceptionSubcommands::Portal(args) => commands::portal::run(shell, args).await?, + InceptionSubcommands::Explorer(args) => commands::explorer::run(shell, args).await?, + InceptionSubcommands::Portal => commands::portal::run(shell).await?, InceptionSubcommands::Update(args) => commands::update::run(shell, args)?, InceptionSubcommands::Markdown => { clap_markdown::print_help_markdown::(); diff --git a/zk_toolbox/crates/zk_inception/src/messages.rs b/zk_toolbox/crates/zk_inception/src/messages.rs index 25933d39db30..cca3e3b549b1 100644 --- a/zk_toolbox/crates/zk_inception/src/messages.rs +++ b/zk_toolbox/crates/zk_inception/src/messages.rs @@ -117,6 +117,9 @@ pub(super) fn msg_chain_doesnt_exist_err(chain_name: &str, chains: &Vec) chain_name, chains ) } +pub(super) fn msg_chain_load_err(chain_name: &str) -> String { + format!("Failed to load chain config for {chain_name}") +} /// Chain create related messages pub(super) const MSG_PROVER_MODE_HELP: &str = "Prover options"; @@ -199,6 +202,14 @@ pub(super) fn msg_server_db_name_prompt(chain_name: &str) -> String { format!("Please provide server database name for chain {chain_name}") } +pub(super) fn msg_explorer_db_url_prompt(chain_name: &str) -> String { + format!("Please provide explorer database url for chain {chain_name}") +} + +pub(super) fn msg_explorer_db_name_prompt(chain_name: &str) -> String { + format!("Please provide explorer database name for chain {chain_name}") +} + /// Chain initialize bridges related messages pub(super) const MSG_DEPLOYING_L2_CONTRACT_SPINNER: &str = "Deploying l2 contracts"; @@ -231,14 +242,46 @@ pub(super) const MSG_FAILED_TO_RUN_SERVER_ERR: &str = "Failed to start server"; pub(super) const MSG_PREPARING_EN_CONFIGS: &str = "Preparing External Node config"; /// Portal related messages -pub(super) const MSG_PORTAL_CONFIG_IS_EMPTY_ERR: &str = "Hyperchains config is empty"; +pub(super) const MSG_PORTAL_FAILED_TO_FIND_ANY_CHAIN_ERR: &str = + "Failed to find any valid chain to run portal for"; pub(super) const MSG_PORTAL_FAILED_TO_CREATE_CONFIG_ERR: &str = "Failed to create portal config"; pub(super) const MSG_PORTAL_FAILED_TO_RUN_DOCKER_ERR: &str = "Failed to run portal docker container"; +pub(super) fn msg_portal_running_with_config(path: &Path) -> String { + format!("Running portal with configuration from: {}", path.display()) +} pub(super) fn msg_portal_starting_on(host: &str, port: u16) -> String { format!("Starting portal on http://{host}:{port}") } +/// Explorer related messages +pub(super) const MSG_EXPLORER_FAILED_TO_DROP_DATABASE_ERR: &str = + "Failed to drop explorer database"; +pub(super) const MSG_EXPLORER_FAILED_TO_RUN_DOCKER_SERVICES_ERR: &str = + "Failed to run docker compose with explorer services"; +pub(super) const MSG_EXPLORER_FAILED_TO_RUN_DOCKER_ERR: &str = + "Failed to run explorer docker container"; +pub(super) const MSG_EXPLORER_FAILED_TO_CREATE_CONFIG_ERR: &str = + "Failed to create explorer config"; +pub(super) const MSG_EXPLORER_FAILED_TO_FIND_ANY_CHAIN_ERR: &str = + "Failed to find any valid chain to run explorer for. Did you run `zk_inception explorer init`?"; +pub(super) const MSG_EXPLORER_INITIALIZED: &str = "Explorer has been initialized successfully"; +pub(super) fn msg_explorer_initializing_database_for(chain: &str) -> String { + format!("Initializing explorer database for {chain} chain") +} +pub(super) fn msg_explorer_running_with_config(path: &Path) -> String { + format!( + "Running explorer with configuration from: {}", + path.display() + ) +} +pub(super) fn msg_explorer_starting_on(host: &str, port: u16) -> String { + format!("Starting explorer on http://{host}:{port}") +} +pub(super) fn msg_explorer_chain_not_initialized(chain: &str) -> String { + format!("Chain {chain} is not initialized for explorer: run `zk_inception explorer init --chain {chain}` first") +} + /// Forge utils related messages pub(super) const MSG_DEPLOYER_PK_NOT_SET_ERR: &str = "Deployer private key is not set"; From cab13b8d36fbcd7a49073904f1d04bfc36e81645 Mon Sep 17 00:00:00 2001 From: Akosh Farkash Date: Fri, 6 Sep 2024 09:19:01 +0100 Subject: [PATCH 033/116] feat(zk_toolbox): Deploy ConsensusRegistry (BFT-504) (#2713) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Adds a `zk_inception chain deploy-consensus-registry` command. TODO: - [x] Change `contracts` submodule back to `main` once https://github.com/matter-labs/era-contracts/pull/735 is merged ### Contract Owner The agreement was that on testnet the `ConsensusRegistry` contract should be owned by the governor account, which is 0xD64e136566a9E04eb05B30184fF577F52682D182, while on mainnet it should be owned by the [developer multisig account](https://app.safe.global/transactions/queue?safe=eth:0x9e543149DdfEEE18e95A4655D07096398Dd2Bf52). The owner is set in [DeployL2ContractsInput::consensus_registry_owner](https://github.com/matter-labs/zksync-era/blob/f4b7c12431d4bb063c735947f74e30c749119b5f/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/input.rs#L19) which has access to contract and wallet configuration and these are written to a config file just before deployment. ~~I added an optional `developer_multisig` wallet to `WalletConfig`, so the address can be added at the same place as the `governor` address is; if `developer_multisig` is missing then `governor` is used. I suppose it could be made part of the `ContractsConfig` instead, but since this is a wallet with funds that developers can access, I thought it wouldn't be out of place in `wallets.yaml` even if one doesn't have any of the corresponding private keys. Let me know if I should be using something else.~~ ### Testing Since the `zk_toolbox` is replacing the `zk` commands, and `zk init` doesn't deploy the consensus registry, we have to use the following commands to see that the contract is built, deployed and its address is written to the config file: ```shell ./bin/zkt zk_inception ecosystem create zk_inception containers zk_inception ecosystem init --dev ``` After this we can check if we see the address in the generated config file: ```console ❯ cat ./chains/era/configs/contracts.yaml | yq .l2.consensus_registry 0x72ada8c211f45e768c9a7781793da84daf1d0d1b ``` Finally clean up: ```shell zk_supervisor clean all ``` ## Why ❔ So that we can deploy the L2 consensus registry contract using the `zk_toolbox`. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --------- Co-authored-by: Grzegorz Prusak --- contracts | 2 +- zk_toolbox/crates/config/src/contracts.rs | 13 +- .../deploy_l2_contracts/input.rs | 4 + .../deploy_l2_contracts/output.rs | 8 +- .../src/commands/chain/deploy_l2_contracts.rs | 113 ++++++++++++------ .../zk_inception/src/commands/chain/mod.rs | 8 +- 6 files changed, 106 insertions(+), 42 deletions(-) diff --git a/contracts b/contracts index fd4aebcfe883..d3687694f71d 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit fd4aebcfe8833b26e096e87e142a5e7e4744f3fa +Subproject commit d3687694f71d83fa286b9c186b4c3ea173028f83 diff --git a/zk_toolbox/crates/config/src/contracts.rs b/zk_toolbox/crates/config/src/contracts.rs index 6042c4bea088..19d432909487 100644 --- a/zk_toolbox/crates/config/src/contracts.rs +++ b/zk_toolbox/crates/config/src/contracts.rs @@ -5,7 +5,9 @@ use crate::{ consts::CONTRACTS_FILE, forge_interface::{ deploy_ecosystem::output::DeployL1Output, - deploy_l2_contracts::output::{DefaultL2UpgradeOutput, InitializeBridgeOutput}, + deploy_l2_contracts::output::{ + ConsensusRegistryOutput, DefaultL2UpgradeOutput, InitializeBridgeOutput, + }, register_chain::output::RegisterChainOutput, }, traits::{FileConfigWithDefaultName, ZkToolboxConfig}, @@ -84,6 +86,14 @@ impl ContractsConfig { Ok(()) } + pub fn set_consensus_registry( + &mut self, + consensus_registry_output: &ConsensusRegistryOutput, + ) -> anyhow::Result<()> { + self.l2.consensus_registry = Some(consensus_registry_output.consensus_registry_proxy); + Ok(()) + } + pub fn set_default_l2_upgrade( &mut self, default_upgrade_output: &DefaultL2UpgradeOutput, @@ -140,4 +150,5 @@ pub struct L1Contracts { pub struct L2Contracts { pub testnet_paymaster_addr: Address, pub default_l2_upgrader: Address, + pub consensus_registry: Option
, } diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/input.rs b/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/input.rs index f48fd0ba2b5e..b20b58f99c58 100644 --- a/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/input.rs +++ b/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/input.rs @@ -6,6 +6,8 @@ use crate::{traits::ZkToolboxConfig, ChainConfig}; impl ZkToolboxConfig for DeployL2ContractsInput {} +/// Fields corresponding to `contracts/l1-contracts/deploy-script-config-template/config-deploy-l2-config.toml` +/// which are read by `contracts/l1-contracts/deploy-scripts/DeployL2Contracts.sol`. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct DeployL2ContractsInput { pub era_chain_id: L2ChainId, @@ -14,6 +16,7 @@ pub struct DeployL2ContractsInput { pub bridgehub: Address, pub governance: Address, pub erc20_bridge: Address, + pub consensus_registry_owner: Address, } impl DeployL2ContractsInput { @@ -27,6 +30,7 @@ impl DeployL2ContractsInput { bridgehub: contracts.ecosystem_contracts.bridgehub_proxy_addr, governance: wallets.governor.address, erc20_bridge: contracts.bridges.erc20.l1_address, + consensus_registry_owner: wallets.governor.address, }) } } diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/output.rs b/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/output.rs index 22f3dc9381b3..860e7e293f99 100644 --- a/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/output.rs +++ b/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/output.rs @@ -4,8 +4,8 @@ use serde::{Deserialize, Serialize}; use crate::traits::ZkToolboxConfig; impl ZkToolboxConfig for InitializeBridgeOutput {} - impl ZkToolboxConfig for DefaultL2UpgradeOutput {} +impl ZkToolboxConfig for ConsensusRegistryOutput {} #[derive(Debug, Clone, Serialize, Deserialize)] pub struct InitializeBridgeOutput { @@ -17,3 +17,9 @@ pub struct InitializeBridgeOutput { pub struct DefaultL2UpgradeOutput { pub l2_default_upgrader: Address, } + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ConsensusRegistryOutput { + pub consensus_registry_implementation: Address, + pub consensus_registry_proxy: Address, +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_l2_contracts.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_l2_contracts.rs index 30f361e44af2..3625abfb15a9 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_l2_contracts.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_l2_contracts.rs @@ -11,7 +11,7 @@ use config::{ forge_interface::{ deploy_l2_contracts::{ input::DeployL2ContractsInput, - output::{DefaultL2UpgradeOutput, InitializeBridgeOutput}, + output::{ConsensusRegistryOutput, DefaultL2UpgradeOutput, InitializeBridgeOutput}, }, script_params::DEPLOY_L2_CONTRACTS_SCRIPT_PARAMS, }, @@ -31,7 +31,8 @@ use crate::{ pub enum Deploy2ContractsOption { All, Upgrader, - IntiailizeBridges, + InitiailizeBridges, + ConsensusRegistry, } pub async fn run( @@ -70,7 +71,17 @@ pub async fn run( ) .await?; } - Deploy2ContractsOption::IntiailizeBridges => { + Deploy2ContractsOption::ConsensusRegistry => { + deploy_consensus_registry( + shell, + &chain_config, + &ecosystem_config, + &mut contracts, + args, + ) + .await?; + } + Deploy2ContractsOption::InitiailizeBridges => { initialize_bridges( shell, &chain_config, @@ -88,6 +99,25 @@ pub async fn run( Ok(()) } +/// Build the L2 contracts, deploy one or all of them with `forge`, then update the config +/// by reading one or all outputs written by the deploy scripts. +async fn build_and_deploy( + shell: &Shell, + chain_config: &ChainConfig, + ecosystem_config: &EcosystemConfig, + forge_args: ForgeScriptArgs, + signature: Option<&str>, + mut update_config: impl FnMut(&Shell, &Path) -> anyhow::Result<()>, +) -> anyhow::Result<()> { + build_l2_contracts(shell, &ecosystem_config.link_to_code)?; + call_forge(shell, chain_config, ecosystem_config, forge_args, signature).await?; + update_config( + shell, + &DEPLOY_L2_CONTRACTS_SCRIPT_PARAMS.output(&chain_config.link_to_code), + )?; + Ok(()) +} + pub async fn initialize_bridges( shell: &Shell, chain_config: &ChainConfig, @@ -95,22 +125,17 @@ pub async fn initialize_bridges( contracts_config: &mut ContractsConfig, forge_args: ForgeScriptArgs, ) -> anyhow::Result<()> { - build_l2_contracts(shell, &ecosystem_config.link_to_code)?; - call_forge( + build_and_deploy( shell, chain_config, ecosystem_config, forge_args, Some("runDeploySharedBridge"), + |shell, out| { + contracts_config.set_l2_shared_bridge(&InitializeBridgeOutput::read(shell, out)?) + }, ) - .await?; - let output = InitializeBridgeOutput::read( - shell, - DEPLOY_L2_CONTRACTS_SCRIPT_PARAMS.output(&chain_config.link_to_code), - )?; - - contracts_config.set_l2_shared_bridge(&output)?; - Ok(()) + .await } pub async fn deploy_upgrader( @@ -120,48 +145,60 @@ pub async fn deploy_upgrader( contracts_config: &mut ContractsConfig, forge_args: ForgeScriptArgs, ) -> anyhow::Result<()> { - build_l2_contracts(shell, &ecosystem_config.link_to_code)?; - call_forge( + build_and_deploy( shell, chain_config, ecosystem_config, forge_args, Some("runDefaultUpgrader"), + |shell, out| { + contracts_config.set_default_l2_upgrade(&DefaultL2UpgradeOutput::read(shell, out)?) + }, ) - .await?; - let output = DefaultL2UpgradeOutput::read( - shell, - DEPLOY_L2_CONTRACTS_SCRIPT_PARAMS.output(&chain_config.link_to_code), - )?; - - contracts_config.set_default_l2_upgrade(&output)?; - Ok(()) + .await } -pub async fn deploy_l2_contracts( +pub async fn deploy_consensus_registry( shell: &Shell, chain_config: &ChainConfig, ecosystem_config: &EcosystemConfig, contracts_config: &mut ContractsConfig, forge_args: ForgeScriptArgs, ) -> anyhow::Result<()> { - build_l2_contracts(shell, &ecosystem_config.link_to_code)?; - call_forge(shell, chain_config, ecosystem_config, forge_args, None).await?; - let output = InitializeBridgeOutput::read( + build_and_deploy( shell, - DEPLOY_L2_CONTRACTS_SCRIPT_PARAMS.output(&chain_config.link_to_code), - )?; - - contracts_config.set_l2_shared_bridge(&output)?; + chain_config, + ecosystem_config, + forge_args, + Some("runDeployConsensusRegistry"), + |shell, out| { + contracts_config.set_consensus_registry(&ConsensusRegistryOutput::read(shell, out)?) + }, + ) + .await +} - let output = DefaultL2UpgradeOutput::read( +pub async fn deploy_l2_contracts( + shell: &Shell, + chain_config: &ChainConfig, + ecosystem_config: &EcosystemConfig, + contracts_config: &mut ContractsConfig, + forge_args: ForgeScriptArgs, +) -> anyhow::Result<()> { + build_and_deploy( shell, - DEPLOY_L2_CONTRACTS_SCRIPT_PARAMS.output(&chain_config.link_to_code), - )?; - - contracts_config.set_default_l2_upgrade(&output)?; - - Ok(()) + chain_config, + ecosystem_config, + forge_args, + None, + |shell, out| { + contracts_config.set_l2_shared_bridge(&InitializeBridgeOutput::read(shell, out)?)?; + contracts_config.set_default_l2_upgrade(&DefaultL2UpgradeOutput::read(shell, out)?)?; + contracts_config.set_consensus_registry(&ConsensusRegistryOutput::read(shell, out)?)?; + Ok(()) + }, + ) + .await } async fn call_forge( diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs index dbddc923336a..afc92d2288bf 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs @@ -31,6 +31,9 @@ pub enum ChainCommands { /// Deploy all l2 contracts #[command(alias = "l2")] DeployL2Contracts(ForgeScriptArgs), + /// Deploy L2 consensus registry + #[command(alias = "consensus")] + DeployConsensusRegistry(ForgeScriptArgs), /// Deploy Default Upgrader Upgrader(ForgeScriptArgs), /// Deploy paymaster smart contract @@ -48,11 +51,14 @@ pub(crate) async fn run(shell: &Shell, args: ChainCommands) -> anyhow::Result<() ChainCommands::DeployL2Contracts(args) => { deploy_l2_contracts::run(args, shell, Deploy2ContractsOption::All).await } + ChainCommands::DeployConsensusRegistry(args) => { + deploy_l2_contracts::run(args, shell, Deploy2ContractsOption::ConsensusRegistry).await + } ChainCommands::Upgrader(args) => { deploy_l2_contracts::run(args, shell, Deploy2ContractsOption::Upgrader).await } ChainCommands::InitializeBridges(args) => { - deploy_l2_contracts::run(args, shell, Deploy2ContractsOption::IntiailizeBridges).await + deploy_l2_contracts::run(args, shell, Deploy2ContractsOption::InitiailizeBridges).await } ChainCommands::DeployPaymaster(args) => deploy_paymaster::run(args, shell).await, ChainCommands::UpdateTokenMultiplierSetter(args) => { From 6db091e0ea3e7c13dd06cc383c3e930180b870fc Mon Sep 17 00:00:00 2001 From: Igor Aleksanov Date: Fri, 6 Sep 2024 12:21:42 +0400 Subject: [PATCH 034/116] chore(ci): Fix cargo deny check and make output readable (#2814) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ - Allow new advisory (low impact; hard to fix). - Omit printing the tree in output; because with it the output is not readable. ## Why ❔ - Unblock CI - Make cargo deny CI usable. --- .github/workflows/cargo-license.yaml | 3 +++ deny.toml | 4 ++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/.github/workflows/cargo-license.yaml b/.github/workflows/cargo-license.yaml index b1909fc75039..72eb8d0d865b 100644 --- a/.github/workflows/cargo-license.yaml +++ b/.github/workflows/cargo-license.yaml @@ -6,3 +6,6 @@ jobs: steps: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - uses: EmbarkStudios/cargo-deny-action@8371184bd11e21dcf8ac82ebf8c9c9f74ebf7268 # v2.0.1 + with: + command: check + command-arguments: "--hide-inclusion-graph" diff --git a/deny.toml b/deny.toml index aadb868aa394..b840ec5176e8 100644 --- a/deny.toml +++ b/deny.toml @@ -12,10 +12,10 @@ ignore = [ "RUSTSEC-2022-0041", # crossbeam-utils vulnerability, dependency coming from bellman_ce "RUSTSEC-2024-0320", # yaml_rust dependency being unmaintained, dependency in core, we should consider moving to yaml_rust2 fork "RUSTSEC-2020-0168", # mach dependency being unmaintained, dependency in consensus, we should consider moving to mach2 fork + "RUSTSEC-2024-0370", # `cs_derive` needs to be updated to not rely on `proc-macro-error` # all below caused by StructOpt which we still use and we should move to clap v3 instead "RUSTSEC-2021-0145", "RUSTSEC-2021-0139", - ] [licenses] @@ -51,7 +51,7 @@ ignore = false registries = [] [bans] -multiple-versions = "warn" +multiple-versions = "allow" wildcards = "allow" highlight = "all" workspace-default-features = "allow" From 64f95514c99f95da2a19a97ff064c29a97efc22f Mon Sep 17 00:00:00 2001 From: Igor Aleksanov Date: Fri, 6 Sep 2024 12:45:59 +0400 Subject: [PATCH 035/116] feat: (DB migration) Rename recursion_scheduler_level_vk_hash to snark_wrapper_vk_hash (#2809) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ We have a configuration field `recursion_scheduler_level_vk_hash` which actually stores `snark_wrapper_vk_hash` inside. It happened because an old config value was utilized for the new purpose some time ago. This PR changes the name of field in a non-breaking way: - `serde` (de)serialization happens with both `alias` and `rename(serialize = "..")`, so that we serialize the field the same way as before, but can deserialize either way. This is used for env configs and API. - `protobuf` deserialization is done by introducing a new field, and reading whatever one is available. - `protobuf` serialization always produced the _new_ field, so newly generated configs should have new field name. - ~~⚠️ DB column names was left as-is, because renaming DB columns is not a trivial process.~~ - Upd: Migration was added. It copies the old column to the new one and switches to the new one right away. ## Why ❔ Having incorrect name that doesn't represent the value stored is confusing and can lead to errors. --- Cargo.lock | 1 + core/lib/basic_types/src/protocol_version.rs | 25 ++++++++++- core/lib/config/Cargo.toml | 3 ++ core/lib/config/src/configs/genesis.rs | 45 ++++++++++++++++++- core/lib/config/src/testonly.rs | 2 +- ...24b83027a8e050598b0cd4cfeb75e7fe89fdd.json | 16 ------- ...b3c0210383d8698f6f84f694fece9fd59f3d5.json | 16 +++++++ ...2e3eaf72dfe0fa96b335b76c9506cb1ebdcc.json} | 6 +-- ...e7ed268bb6c5f3465c7e64beaa226c066f2b.json} | 4 +- ...d36e6c9d9e70dc52677c6b335b3ed4025db85.json | 23 ---------- ...298ed3fc5e6eb1c78c285bd20f6401771df25.json | 23 ++++++++++ ...ecurision-scheduler-level-vk-hash.down.sql | 3 ++ ...-recurision-scheduler-level-vk-hash.up.sql | 8 ++++ .../src/models/storage_protocol_version.rs | 6 +-- core/lib/dal/src/protocol_versions_dal.rs | 20 ++++----- core/lib/env_config/src/genesis.rs | 2 +- core/lib/protobuf_config/src/genesis.rs | 17 ++++--- .../src/proto/config/genesis.proto | 3 +- core/lib/types/src/protocol_upgrade.rs | 6 +-- .../node/api_server/src/web3/namespaces/en.rs | 2 +- core/node/eth_sender/src/aggregator.rs | 7 +-- core/node/eth_sender/src/eth_tx_aggregator.rs | 8 ++-- core/node/genesis/src/lib.rs | 9 ++-- .../prover_cli/src/commands/insert_version.rs | 4 +- .../bin/prover_cli/src/commands/status/l1.rs | 6 +-- .../crates/bin/witness_generator/src/main.rs | 3 +- ...8c05583e8415d2e1d8c503f640e77d282b0d5.json | 23 ++++++++++ ...1fc79400930dddc84e042c5a4dc8a2e8508a5.json | 23 ---------- ...52e85f85202637916cfcf4b34c6780536f105.json | 16 ------- ...52aeb5f06c26f68d131dd242f6ed68816c513.json | 22 --------- ...c23ff743fc01c92e28ed447a8e124062fa62c.json | 20 +++++++++ ...f5a15d2fc5ad3e5ce738a2f6ee3eecfc96736.json | 16 +++++++ ...7999388451886a3eb9b4481b55404b16b89ac.json | 20 --------- ...ecurision-scheduler-level-vk-hash.down.sql | 3 ++ ...-recurision-scheduler-level-vk-hash.up.sql | 8 ++++ .../src/fri_protocol_versions_dal.rs | 16 +++---- 36 files changed, 246 insertions(+), 189 deletions(-) delete mode 100644 core/lib/dal/.sqlx/query-0d421637db03b83aa33468b7d3424b83027a8e050598b0cd4cfeb75e7fe89fdd.json create mode 100644 core/lib/dal/.sqlx/query-3ddb13ffbafae2374527886e592b3c0210383d8698f6f84f694fece9fd59f3d5.json rename core/lib/dal/.sqlx/{query-e89e8cc58a2078157d06f3064ccad9773d45ef6d548f03d643007d3bc1072526.json => query-85576fdbb4bd6e3a6e43511c065a2e3eaf72dfe0fa96b335b76c9506cb1ebdcc.json} (70%) rename core/lib/dal/.sqlx/{query-6f05b8ad720f9c1fae9292c16b5960b7bd48b48b63d9db071ef94c5fec4660c9.json => query-a23ae928d8351d3a6ed64d8db777e7ed268bb6c5f3465c7e64beaa226c066f2b.json} (64%) delete mode 100644 core/lib/dal/.sqlx/query-d93ebd47a227a6086a5eb963c7ed36e6c9d9e70dc52677c6b335b3ed4025db85.json create mode 100644 core/lib/dal/.sqlx/query-e9d91aa8e30152a5b6a321cb94a298ed3fc5e6eb1c78c285bd20f6401771df25.json create mode 100644 core/lib/dal/migrations/20240905123059_rename-recurision-scheduler-level-vk-hash.down.sql create mode 100644 core/lib/dal/migrations/20240905123059_rename-recurision-scheduler-level-vk-hash.up.sql create mode 100644 prover/crates/lib/prover_dal/.sqlx/query-419206075cd24f96f00d65f3d138c05583e8415d2e1d8c503f640e77d282b0d5.json delete mode 100644 prover/crates/lib/prover_dal/.sqlx/query-4f26bae35dd959448d9728ef7321fc79400930dddc84e042c5a4dc8a2e8508a5.json delete mode 100644 prover/crates/lib/prover_dal/.sqlx/query-5da848354a84b20ae3f0240f6a352e85f85202637916cfcf4b34c6780536f105.json delete mode 100644 prover/crates/lib/prover_dal/.sqlx/query-61b2b858d4636809c21838635aa52aeb5f06c26f68d131dd242f6ed68816c513.json create mode 100644 prover/crates/lib/prover_dal/.sqlx/query-6e58af820b4dd867cd794a04c97c23ff743fc01c92e28ed447a8e124062fa62c.json create mode 100644 prover/crates/lib/prover_dal/.sqlx/query-89a25708d0b0a15e1e56ee8fd69f5a15d2fc5ad3e5ce738a2f6ee3eecfc96736.json delete mode 100644 prover/crates/lib/prover_dal/.sqlx/query-ed0f22d3d2c8b0d9bd1b4360a0f7999388451886a3eb9b4481b55404b16b89ac.json create mode 100644 prover/crates/lib/prover_dal/migrations/20240905124208_rename-recurision-scheduler-level-vk-hash.down.sql create mode 100644 prover/crates/lib/prover_dal/migrations/20240905124208_rename-recurision-scheduler-level-vk-hash.up.sql diff --git a/Cargo.lock b/Cargo.lock index accd6b344486..2d6263f7ab4e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8213,6 +8213,7 @@ dependencies = [ "rand 0.8.5", "secrecy", "serde", + "serde_json", "tracing", "url", "zksync_basic_types", diff --git a/core/lib/basic_types/src/protocol_version.rs b/core/lib/basic_types/src/protocol_version.rs index 265c06987afd..640a92c00da0 100644 --- a/core/lib/basic_types/src/protocol_version.rs +++ b/core/lib/basic_types/src/protocol_version.rs @@ -238,7 +238,12 @@ impl Detokenize for VerifierParams { #[derive(Debug, Clone, Copy, Default, Eq, PartialEq, Serialize, Deserialize)] pub struct L1VerifierConfig { - pub recursion_scheduler_level_vk_hash: H256, + // Rename is required to not introduce breaking changes in the API for existing clients. + #[serde( + alias = "recursion_scheduler_level_vk_hash", + rename(serialize = "recursion_scheduler_level_vk_hash") + )] + pub snark_wrapper_vk_hash: H256, } impl From for VmVersion { @@ -394,4 +399,22 @@ mod tests { assert_eq!(version, unpacked); } + + #[test] + fn test_verifier_config_serde() { + let de = [ + r#"{"recursion_scheduler_level_vk_hash": "0x1111111111111111111111111111111111111111111111111111111111111111"}"#, + r#"{"snark_wrapper_vk_hash": "0x1111111111111111111111111111111111111111111111111111111111111111"}"#, + ]; + for de in de.iter() { + let _: L1VerifierConfig = serde_json::from_str(de) + .unwrap_or_else(|err| panic!("Failed deserialization. String: {de}, error {err}")); + } + let ser = L1VerifierConfig { + snark_wrapper_vk_hash: H256::repeat_byte(0x11), + }; + let ser_str = serde_json::to_string(&ser).unwrap(); + let expected_str = r#"{"recursion_scheduler_level_vk_hash":"0x1111111111111111111111111111111111111111111111111111111111111111"}"#; + assert_eq!(ser_str, expected_str); + } } diff --git a/core/lib/config/Cargo.toml b/core/lib/config/Cargo.toml index b13948448cdd..d1ab5ce8438f 100644 --- a/core/lib/config/Cargo.toml +++ b/core/lib/config/Cargo.toml @@ -24,6 +24,9 @@ rand.workspace = true secrecy.workspace = true serde = { workspace = true, features = ["derive"] } +[dev-dependencies] +serde_json.workspace = true + [features] default = [] observability_ext = ["zksync_vlog", "tracing"] diff --git a/core/lib/config/src/configs/genesis.rs b/core/lib/config/src/configs/genesis.rs index 2c5c91128431..6c4bacc3a6e2 100644 --- a/core/lib/config/src/configs/genesis.rs +++ b/core/lib/config/src/configs/genesis.rs @@ -20,7 +20,14 @@ pub struct GenesisConfig { pub l1_chain_id: L1ChainId, pub sl_chain_id: Option, pub l2_chain_id: L2ChainId, - pub recursion_scheduler_level_vk_hash: H256, + // Note: `serde` isn't used with protobuf config. The same alias is implemented in + // `zksync_protobuf_config` manually. + // Rename is required to not introduce breaking changes in the API for existing clients. + #[serde( + alias = "recursion_scheduler_level_vk_hash", + rename(serialize = "recursion_scheduler_level_vk_hash") + )] + pub snark_wrapper_vk_hash: H256, pub fee_account: Address, pub dummy_verifier: bool, pub l1_batch_commit_data_generator_mode: L1BatchCommitmentMode, @@ -37,7 +44,7 @@ impl GenesisConfig { GenesisConfig { genesis_root_hash: Some(H256::repeat_byte(0x01)), rollup_last_leaf_index: Some(26), - recursion_scheduler_level_vk_hash: H256::repeat_byte(0x02), + snark_wrapper_vk_hash: H256::repeat_byte(0x02), fee_account: Default::default(), genesis_commitment: Some(H256::repeat_byte(0x17)), bootloader_hash: Default::default(), @@ -54,3 +61,37 @@ impl GenesisConfig { } } } + +#[cfg(test)] +mod tests { + use super::GenesisConfig; + + // This test checks that serde overrides (`rename`, `alias`) work for `snark_wrapper_vk_hash` field. + #[test] + fn genesis_serde_snark_wrapper_vk_hash() { + let genesis = GenesisConfig::for_tests(); + let genesis_str = serde_json::to_string(&genesis).unwrap(); + + // Check that we use backward-compatible name in serialization. + // If you want to remove this check, make sure that all the potential clients are updated. + assert!( + genesis_str.contains("recursion_scheduler_level_vk_hash"), + "Serialization should use backward-compatible name" + ); + + let genesis2: GenesisConfig = serde_json::from_str(&genesis_str).unwrap(); + assert_eq!(genesis, genesis2); + + let genesis_json = r#"{ + "snark_wrapper_vk_hash": "0x1111111111111111111111111111111111111111111111111111111111111111", + "l1_chain_id": 1, + "l2_chain_id": 1, + "fee_account": "0x1111111111111111111111111111111111111111", + "dummy_verifier": false, + "l1_batch_commit_data_generator_mode": "Rollup" + }"#; + serde_json::from_str::(genesis_json).unwrap_or_else(|err| { + panic!("Failed to parse genesis config with a new name: {}", err) + }); + } +} diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index ea27bf8ab3ab..028b5e38055f 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -728,7 +728,7 @@ impl Distribution for EncodeDist { l1_chain_id: L1ChainId(self.sample(rng)), sl_chain_id: None, l2_chain_id: L2ChainId::default(), - recursion_scheduler_level_vk_hash: rng.gen(), + snark_wrapper_vk_hash: rng.gen(), dummy_verifier: rng.gen(), l1_batch_commit_data_generator_mode: match rng.gen_range(0..2) { 0 => L1BatchCommitmentMode::Rollup, diff --git a/core/lib/dal/.sqlx/query-0d421637db03b83aa33468b7d3424b83027a8e050598b0cd4cfeb75e7fe89fdd.json b/core/lib/dal/.sqlx/query-0d421637db03b83aa33468b7d3424b83027a8e050598b0cd4cfeb75e7fe89fdd.json deleted file mode 100644 index 5e10786c7e3f..000000000000 --- a/core/lib/dal/.sqlx/query-0d421637db03b83aa33468b7d3424b83027a8e050598b0cd4cfeb75e7fe89fdd.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n protocol_patches (minor, patch, recursion_scheduler_level_vk_hash, created_at)\n VALUES\n ($1, $2, $3, NOW())\n ON CONFLICT DO NOTHING\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int4", - "Int4", - "Bytea" - ] - }, - "nullable": [] - }, - "hash": "0d421637db03b83aa33468b7d3424b83027a8e050598b0cd4cfeb75e7fe89fdd" -} diff --git a/core/lib/dal/.sqlx/query-3ddb13ffbafae2374527886e592b3c0210383d8698f6f84f694fece9fd59f3d5.json b/core/lib/dal/.sqlx/query-3ddb13ffbafae2374527886e592b3c0210383d8698f6f84f694fece9fd59f3d5.json new file mode 100644 index 000000000000..5652e186ceb9 --- /dev/null +++ b/core/lib/dal/.sqlx/query-3ddb13ffbafae2374527886e592b3c0210383d8698f6f84f694fece9fd59f3d5.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n protocol_patches (minor, patch, snark_wrapper_vk_hash, created_at)\n VALUES\n ($1, $2, $3, NOW())\n ON CONFLICT DO NOTHING\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int4", + "Int4", + "Bytea" + ] + }, + "nullable": [] + }, + "hash": "3ddb13ffbafae2374527886e592b3c0210383d8698f6f84f694fece9fd59f3d5" +} diff --git a/core/lib/dal/.sqlx/query-e89e8cc58a2078157d06f3064ccad9773d45ef6d548f03d643007d3bc1072526.json b/core/lib/dal/.sqlx/query-85576fdbb4bd6e3a6e43511c065a2e3eaf72dfe0fa96b335b76c9506cb1ebdcc.json similarity index 70% rename from core/lib/dal/.sqlx/query-e89e8cc58a2078157d06f3064ccad9773d45ef6d548f03d643007d3bc1072526.json rename to core/lib/dal/.sqlx/query-85576fdbb4bd6e3a6e43511c065a2e3eaf72dfe0fa96b335b76c9506cb1ebdcc.json index 68b595b50274..3297d411d8a7 100644 --- a/core/lib/dal/.sqlx/query-e89e8cc58a2078157d06f3064ccad9773d45ef6d548f03d643007d3bc1072526.json +++ b/core/lib/dal/.sqlx/query-85576fdbb4bd6e3a6e43511c065a2e3eaf72dfe0fa96b335b76c9506cb1ebdcc.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n protocol_versions.id AS \"minor!\",\n protocol_versions.timestamp,\n protocol_versions.bootloader_code_hash,\n protocol_versions.default_account_code_hash,\n protocol_patches.patch,\n protocol_patches.recursion_scheduler_level_vk_hash\n FROM\n protocol_versions\n JOIN protocol_patches ON protocol_patches.minor = protocol_versions.id\n WHERE\n id = $1\n ORDER BY\n protocol_patches.patch DESC\n LIMIT\n 1\n ", + "query": "\n SELECT\n protocol_versions.id AS \"minor!\",\n protocol_versions.timestamp,\n protocol_versions.bootloader_code_hash,\n protocol_versions.default_account_code_hash,\n protocol_patches.patch,\n protocol_patches.snark_wrapper_vk_hash\n FROM\n protocol_versions\n JOIN protocol_patches ON protocol_patches.minor = protocol_versions.id\n WHERE\n id = $1\n ORDER BY\n protocol_patches.patch DESC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -30,7 +30,7 @@ }, { "ordinal": 5, - "name": "recursion_scheduler_level_vk_hash", + "name": "snark_wrapper_vk_hash", "type_info": "Bytea" } ], @@ -48,5 +48,5 @@ false ] }, - "hash": "e89e8cc58a2078157d06f3064ccad9773d45ef6d548f03d643007d3bc1072526" + "hash": "85576fdbb4bd6e3a6e43511c065a2e3eaf72dfe0fa96b335b76c9506cb1ebdcc" } diff --git a/core/lib/dal/.sqlx/query-6f05b8ad720f9c1fae9292c16b5960b7bd48b48b63d9db071ef94c5fec4660c9.json b/core/lib/dal/.sqlx/query-a23ae928d8351d3a6ed64d8db777e7ed268bb6c5f3465c7e64beaa226c066f2b.json similarity index 64% rename from core/lib/dal/.sqlx/query-6f05b8ad720f9c1fae9292c16b5960b7bd48b48b63d9db071ef94c5fec4660c9.json rename to core/lib/dal/.sqlx/query-a23ae928d8351d3a6ed64d8db777e7ed268bb6c5f3465c7e64beaa226c066f2b.json index 32a9955cc270..ac10e8b1a8f0 100644 --- a/core/lib/dal/.sqlx/query-6f05b8ad720f9c1fae9292c16b5960b7bd48b48b63d9db071ef94c5fec4660c9.json +++ b/core/lib/dal/.sqlx/query-a23ae928d8351d3a6ed64d8db777e7ed268bb6c5f3465c7e64beaa226c066f2b.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n patch\n FROM\n protocol_patches\n WHERE\n minor = $1\n AND recursion_scheduler_level_vk_hash = $2\n ORDER BY\n patch DESC\n ", + "query": "\n SELECT\n patch\n FROM\n protocol_patches\n WHERE\n minor = $1\n AND snark_wrapper_vk_hash = $2\n ORDER BY\n patch DESC\n ", "describe": { "columns": [ { @@ -19,5 +19,5 @@ false ] }, - "hash": "6f05b8ad720f9c1fae9292c16b5960b7bd48b48b63d9db071ef94c5fec4660c9" + "hash": "a23ae928d8351d3a6ed64d8db777e7ed268bb6c5f3465c7e64beaa226c066f2b" } diff --git a/core/lib/dal/.sqlx/query-d93ebd47a227a6086a5eb963c7ed36e6c9d9e70dc52677c6b335b3ed4025db85.json b/core/lib/dal/.sqlx/query-d93ebd47a227a6086a5eb963c7ed36e6c9d9e70dc52677c6b335b3ed4025db85.json deleted file mode 100644 index 0fd16adc474d..000000000000 --- a/core/lib/dal/.sqlx/query-d93ebd47a227a6086a5eb963c7ed36e6c9d9e70dc52677c6b335b3ed4025db85.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n recursion_scheduler_level_vk_hash\n FROM\n protocol_patches\n WHERE\n minor = $1\n AND patch = $2\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "recursion_scheduler_level_vk_hash", - "type_info": "Bytea" - } - ], - "parameters": { - "Left": [ - "Int4", - "Int4" - ] - }, - "nullable": [ - false - ] - }, - "hash": "d93ebd47a227a6086a5eb963c7ed36e6c9d9e70dc52677c6b335b3ed4025db85" -} diff --git a/core/lib/dal/.sqlx/query-e9d91aa8e30152a5b6a321cb94a298ed3fc5e6eb1c78c285bd20f6401771df25.json b/core/lib/dal/.sqlx/query-e9d91aa8e30152a5b6a321cb94a298ed3fc5e6eb1c78c285bd20f6401771df25.json new file mode 100644 index 000000000000..fa47ccab50ab --- /dev/null +++ b/core/lib/dal/.sqlx/query-e9d91aa8e30152a5b6a321cb94a298ed3fc5e6eb1c78c285bd20f6401771df25.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n snark_wrapper_vk_hash\n FROM\n protocol_patches\n WHERE\n minor = $1\n AND patch = $2\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "snark_wrapper_vk_hash", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Int4", + "Int4" + ] + }, + "nullable": [ + false + ] + }, + "hash": "e9d91aa8e30152a5b6a321cb94a298ed3fc5e6eb1c78c285bd20f6401771df25" +} diff --git a/core/lib/dal/migrations/20240905123059_rename-recurision-scheduler-level-vk-hash.down.sql b/core/lib/dal/migrations/20240905123059_rename-recurision-scheduler-level-vk-hash.down.sql new file mode 100644 index 000000000000..daa108d4ff39 --- /dev/null +++ b/core/lib/dal/migrations/20240905123059_rename-recurision-scheduler-level-vk-hash.down.sql @@ -0,0 +1,3 @@ +UPDATE protocol_patches SET recursion_scheduler_level_vk_hash = snark_wrapper_vk_hash WHERE recursion_scheduler_level_vk_hash = ''::bytea; +ALTER TABLE protocol_patches DROP COLUMN snark_wrapper_vk_hash; +ALTER TABLE protocol_patches ALTER COLUMN recursion_scheduler_level_vk_hash DROP DEFAULT; diff --git a/core/lib/dal/migrations/20240905123059_rename-recurision-scheduler-level-vk-hash.up.sql b/core/lib/dal/migrations/20240905123059_rename-recurision-scheduler-level-vk-hash.up.sql new file mode 100644 index 000000000000..730b3a50d8a0 --- /dev/null +++ b/core/lib/dal/migrations/20240905123059_rename-recurision-scheduler-level-vk-hash.up.sql @@ -0,0 +1,8 @@ +ALTER TABLE protocol_patches ADD COLUMN snark_wrapper_vk_hash BYTEA NOT NULL DEFAULT ''::bytea; +ALTER TABLE protocol_patches ALTER COLUMN recursion_scheduler_level_vk_hash SET DEFAULT ''::bytea; +UPDATE protocol_patches SET snark_wrapper_vk_hash = recursion_scheduler_level_vk_hash; +-- Default was only needed to migrate old rows, we don't want this field to be forgotten by accident after migration. +ALTER TABLE protocol_patches ALTER COLUMN snark_wrapper_vk_hash DROP DEFAULT; + +-- Old column should be removed once the migration is on the mainnet. +COMMENT ON COLUMN protocol_patches.recursion_scheduler_level_vk_hash IS 'This column is deprecated and will be removed in the future. Use snark_wrapper_vk_hash instead.'; diff --git a/core/lib/dal/src/models/storage_protocol_version.rs b/core/lib/dal/src/models/storage_protocol_version.rs index c19fa560b67c..e53bf7b9d0a4 100644 --- a/core/lib/dal/src/models/storage_protocol_version.rs +++ b/core/lib/dal/src/models/storage_protocol_version.rs @@ -13,7 +13,7 @@ pub struct StorageProtocolVersion { pub minor: i32, pub patch: i32, pub timestamp: i64, - pub recursion_scheduler_level_vk_hash: Vec, + pub snark_wrapper_vk_hash: Vec, pub bootloader_code_hash: Vec, pub default_account_code_hash: Vec, } @@ -29,9 +29,7 @@ pub(crate) fn protocol_version_from_storage( }, timestamp: storage_version.timestamp as u64, l1_verifier_config: L1VerifierConfig { - recursion_scheduler_level_vk_hash: H256::from_slice( - &storage_version.recursion_scheduler_level_vk_hash, - ), + snark_wrapper_vk_hash: H256::from_slice(&storage_version.snark_wrapper_vk_hash), }, base_system_contracts_hashes: BaseSystemContractsHashes { bootloader: H256::from_slice(&storage_version.bootloader_code_hash), diff --git a/core/lib/dal/src/protocol_versions_dal.rs b/core/lib/dal/src/protocol_versions_dal.rs index 0d17044e6c51..8cb5094fd49e 100644 --- a/core/lib/dal/src/protocol_versions_dal.rs +++ b/core/lib/dal/src/protocol_versions_dal.rs @@ -71,16 +71,14 @@ impl ProtocolVersionsDal<'_, '_> { sqlx::query!( r#" INSERT INTO - protocol_patches (minor, patch, recursion_scheduler_level_vk_hash, created_at) + protocol_patches (minor, patch, snark_wrapper_vk_hash, created_at) VALUES ($1, $2, $3, NOW()) ON CONFLICT DO NOTHING "#, version.minor as i32, version.patch.0 as i32, - l1_verifier_config - .recursion_scheduler_level_vk_hash - .as_bytes(), + l1_verifier_config.snark_wrapper_vk_hash.as_bytes(), ) .instrument("save_protocol_version#patch") .with_arg("version", &version) @@ -235,7 +233,7 @@ impl ProtocolVersionsDal<'_, '_> { protocol_versions.bootloader_code_hash, protocol_versions.default_account_code_hash, protocol_patches.patch, - protocol_patches.recursion_scheduler_level_vk_hash + protocol_patches.snark_wrapper_vk_hash FROM protocol_versions JOIN protocol_patches ON protocol_patches.minor = protocol_versions.id @@ -268,7 +266,7 @@ impl ProtocolVersionsDal<'_, '_> { let row = sqlx::query!( r#" SELECT - recursion_scheduler_level_vk_hash + snark_wrapper_vk_hash FROM protocol_patches WHERE @@ -282,16 +280,14 @@ impl ProtocolVersionsDal<'_, '_> { .await .unwrap()?; Some(L1VerifierConfig { - recursion_scheduler_level_vk_hash: H256::from_slice( - &row.recursion_scheduler_level_vk_hash, - ), + snark_wrapper_vk_hash: H256::from_slice(&row.snark_wrapper_vk_hash), }) } pub async fn get_patch_versions_for_vk( &mut self, minor_version: ProtocolVersionId, - recursion_scheduler_level_vk_hash: H256, + snark_wrapper_vk_hash: H256, ) -> DalResult> { let rows = sqlx::query!( r#" @@ -301,12 +297,12 @@ impl ProtocolVersionsDal<'_, '_> { protocol_patches WHERE minor = $1 - AND recursion_scheduler_level_vk_hash = $2 + AND snark_wrapper_vk_hash = $2 ORDER BY patch DESC "#, minor_version as i32, - recursion_scheduler_level_vk_hash.as_bytes() + snark_wrapper_vk_hash.as_bytes() ) .instrument("get_patch_versions_for_vk") .fetch_all(self.storage) diff --git a/core/lib/env_config/src/genesis.rs b/core/lib/env_config/src/genesis.rs index 1eb83ae2f39e..bf30fd4cc339 100644 --- a/core/lib/env_config/src/genesis.rs +++ b/core/lib/env_config/src/genesis.rs @@ -72,7 +72,7 @@ impl FromEnv for GenesisConfig { l1_chain_id: L1ChainId(network_config.network.chain_id().0), sl_chain_id: Some(network_config.network.chain_id()), l2_chain_id: network_config.zksync_network_id, - recursion_scheduler_level_vk_hash: contracts_config.snark_wrapper_vk_hash, + snark_wrapper_vk_hash: contracts_config.snark_wrapper_vk_hash, fee_account: state_keeper .fee_account_addr .context("Fee account required for genesis")?, diff --git a/core/lib/protobuf_config/src/genesis.rs b/core/lib/protobuf_config/src/genesis.rs index 92f639aa224e..59896aa244d8 100644 --- a/core/lib/protobuf_config/src/genesis.rs +++ b/core/lib/protobuf_config/src/genesis.rs @@ -43,6 +43,13 @@ impl ProtoRepr for proto::Genesis { 0.into(), ) }; + // Check either of fields, use old name as a fallback. + let snark_wrapper_vk_hash = match (&prover.snark_wrapper_vk_hash, &prover.recursion_scheduler_level_vk_hash) { + (Some(x), _) => parse_h256(x).context("snark_wrapper_vk_hash")?, + (_, Some(x)) => parse_h256(x).context("recursion_scheduler_level_vk_hash")?, + _ => anyhow::bail!("Either snark_wrapper_vk_hash or recursion_scheduler_level_vk_hash should be presented"), + }; + Ok(Self::Type { protocol_version: Some(protocol_version), genesis_root_hash: Some( @@ -75,9 +82,7 @@ impl ProtoRepr for proto::Genesis { l2_chain_id: required(&self.l2_chain_id) .and_then(|x| L2ChainId::try_from(*x).map_err(|a| anyhow::anyhow!(a))) .context("l2_chain_id")?, - recursion_scheduler_level_vk_hash: required(&prover.recursion_scheduler_level_vk_hash) - .and_then(|x| parse_h256(x)) - .context("recursion_scheduler_level_vk_hash")?, + snark_wrapper_vk_hash, fee_account: required(&self.fee_account) .and_then(|x| parse_h160(x)) .context("fee_account")?, @@ -104,11 +109,9 @@ impl ProtoRepr for proto::Genesis { l1_chain_id: Some(this.l1_chain_id.0), l2_chain_id: Some(this.l2_chain_id.as_u64()), prover: Some(proto::Prover { - recursion_scheduler_level_vk_hash: Some(format!( - "{:?}", - this.recursion_scheduler_level_vk_hash - )), + recursion_scheduler_level_vk_hash: None, // Deprecated field. dummy_verifier: Some(this.dummy_verifier), + snark_wrapper_vk_hash: Some(format!("{:?}", this.snark_wrapper_vk_hash)), }), l1_batch_commit_data_generator_mode: Some( proto::L1BatchCommitDataGeneratorMode::new( diff --git a/core/lib/protobuf_config/src/proto/config/genesis.proto b/core/lib/protobuf_config/src/proto/config/genesis.proto index 6e679d865d92..08cbb954fcbc 100644 --- a/core/lib/protobuf_config/src/proto/config/genesis.proto +++ b/core/lib/protobuf_config/src/proto/config/genesis.proto @@ -8,8 +8,9 @@ enum L1BatchCommitDataGeneratorMode { } message Prover { - optional string recursion_scheduler_level_vk_hash = 1; // required; H256 + optional string recursion_scheduler_level_vk_hash = 1; // optional and deprecated, used as alias for `snark_wrapper_vk_hash`; H256 optional bool dummy_verifier = 5; + optional string snark_wrapper_vk_hash = 6; // optional (required if `recursion_scheduler_level_vk_hash` is not set); H256 reserved 2, 3, 4; reserved "recursion_node_level_vk_hash", "recursion_leaf_level_vk_hash", "recursion_circuits_set_vks_hash"; } diff --git a/core/lib/types/src/protocol_upgrade.rs b/core/lib/types/src/protocol_upgrade.rs index bc9bd7667e82..1afb108a0536 100644 --- a/core/lib/types/src/protocol_upgrade.rs +++ b/core/lib/types/src/protocol_upgrade.rs @@ -282,14 +282,14 @@ impl ProtocolVersion { pub fn apply_upgrade( &self, upgrade: ProtocolUpgrade, - new_scheduler_vk_hash: Option, + new_snark_wrapper_vk_hash: Option, ) -> ProtocolVersion { ProtocolVersion { version: upgrade.version, timestamp: upgrade.timestamp, l1_verifier_config: L1VerifierConfig { - recursion_scheduler_level_vk_hash: new_scheduler_vk_hash - .unwrap_or(self.l1_verifier_config.recursion_scheduler_level_vk_hash), + snark_wrapper_vk_hash: new_snark_wrapper_vk_hash + .unwrap_or(self.l1_verifier_config.snark_wrapper_vk_hash), }, base_system_contracts_hashes: BaseSystemContractsHashes { bootloader: upgrade diff --git a/core/node/api_server/src/web3/namespaces/en.rs b/core/node/api_server/src/web3/namespaces/en.rs index 604d38ef94ab..ca15352fd1ac 100644 --- a/core/node/api_server/src/web3/namespaces/en.rs +++ b/core/node/api_server/src/web3/namespaces/en.rs @@ -157,7 +157,7 @@ impl EnNamespace { l1_chain_id: self.state.api_config.l1_chain_id, sl_chain_id: Some(self.state.api_config.l1_chain_id.into()), l2_chain_id: self.state.api_config.l2_chain_id, - recursion_scheduler_level_vk_hash: verifier_config.recursion_scheduler_level_vk_hash, + snark_wrapper_vk_hash: verifier_config.snark_wrapper_vk_hash, fee_account, dummy_verifier: self.state.api_config.dummy_verifier, l1_batch_commit_data_generator_mode: self diff --git a/core/node/eth_sender/src/aggregator.rs b/core/node/eth_sender/src/aggregator.rs index de6a6982088b..1e0bd315b9d9 100644 --- a/core/node/eth_sender/src/aggregator.rs +++ b/core/node/eth_sender/src/aggregator.rs @@ -333,16 +333,13 @@ impl Aggregator { // keys that correspond to one on L1. let allowed_patch_versions = storage .protocol_versions_dal() - .get_patch_versions_for_vk( - minor_version, - l1_verifier_config.recursion_scheduler_level_vk_hash, - ) + .get_patch_versions_for_vk(minor_version, l1_verifier_config.snark_wrapper_vk_hash) .await .unwrap(); if allowed_patch_versions.is_empty() { tracing::warn!( "No patch version corresponds to the verification key on L1: {:?}", - l1_verifier_config.recursion_scheduler_level_vk_hash + l1_verifier_config.snark_wrapper_vk_hash ); return None; }; diff --git a/core/node/eth_sender/src/eth_tx_aggregator.rs b/core/node/eth_sender/src/eth_tx_aggregator.rs index 7f304e2f72b7..6e9e71d74ea4 100644 --- a/core/node/eth_sender/src/eth_tx_aggregator.rs +++ b/core/node/eth_sender/src/eth_tx_aggregator.rs @@ -317,7 +317,7 @@ impl EthTxAggregator { } /// Loads current verifier config on L1 - async fn get_recursion_scheduler_level_vk_hash( + async fn get_snark_wrapper_vk_hash( &mut self, verifier_address: Address, ) -> Result { @@ -344,15 +344,15 @@ impl EthTxAggregator { })?; let contracts_are_pre_shared_bridge = protocol_version_id.is_pre_shared_bridge(); - let recursion_scheduler_level_vk_hash = self - .get_recursion_scheduler_level_vk_hash(verifier_address) + let snark_wrapper_vk_hash = self + .get_snark_wrapper_vk_hash(verifier_address) .await .map_err(|err| { tracing::error!("Failed to get VK hash from the Verifier {err:?}"); err })?; let l1_verifier_config = L1VerifierConfig { - recursion_scheduler_level_vk_hash, + snark_wrapper_vk_hash, }; if let Some(agg_op) = self .aggregator diff --git a/core/node/genesis/src/lib.rs b/core/node/genesis/src/lib.rs index 6713e5a4bcc2..1f30d314bb06 100644 --- a/core/node/genesis/src/lib.rs +++ b/core/node/genesis/src/lib.rs @@ -175,8 +175,7 @@ pub fn mock_genesis_config() -> GenesisConfig { l1_chain_id: L1ChainId(9), sl_chain_id: None, l2_chain_id: L2ChainId::default(), - recursion_scheduler_level_vk_hash: first_l1_verifier_config - .recursion_scheduler_level_vk_hash, + snark_wrapper_vk_hash: first_l1_verifier_config.snark_wrapper_vk_hash, fee_account: Default::default(), dummy_verifier: false, l1_batch_commit_data_generator_mode: Default::default(), @@ -190,7 +189,7 @@ pub async fn insert_genesis_batch( ) -> Result { let mut transaction = storage.start_transaction().await?; let verifier_config = L1VerifierConfig { - recursion_scheduler_level_vk_hash: genesis_params.config.recursion_scheduler_level_vk_hash, + snark_wrapper_vk_hash: genesis_params.config.snark_wrapper_vk_hash, }; create_genesis_l1_batch( @@ -297,10 +296,10 @@ pub async fn validate_genesis_params( .call(query_client) .await?; - if verification_key_hash != genesis_params.config().recursion_scheduler_level_vk_hash { + if verification_key_hash != genesis_params.config().snark_wrapper_vk_hash { return Err(anyhow::anyhow!( "Verification key hash mismatch: {verification_key_hash:?} on contract, {:?} in config", - genesis_params.config().recursion_scheduler_level_vk_hash + genesis_params.config().snark_wrapper_vk_hash )); } diff --git a/prover/crates/bin/prover_cli/src/commands/insert_version.rs b/prover/crates/bin/prover_cli/src/commands/insert_version.rs index 7f30719a713b..e89d2024e26f 100644 --- a/prover/crates/bin/prover_cli/src/commands/insert_version.rs +++ b/prover/crates/bin/prover_cli/src/commands/insert_version.rs @@ -35,7 +35,7 @@ pub async fn run(args: Args, config: ProverCLIConfig) -> anyhow::Result<()> { let protocol_version_patch = VersionPatch(args.patch); - let snark_wrapper = H256::from_str(&args.snark_wrapper).unwrap_or_else(|_| { + let snark_wrapper_vk_hash = H256::from_str(&args.snark_wrapper).unwrap_or_else(|_| { panic!("Invalid snark wrapper hash"); }); @@ -43,7 +43,7 @@ pub async fn run(args: Args, config: ProverCLIConfig) -> anyhow::Result<()> { .save_prover_protocol_version( ProtocolSemanticVersion::new(protocol_version, protocol_version_patch), L1VerifierConfig { - recursion_scheduler_level_vk_hash: snark_wrapper, + snark_wrapper_vk_hash, }, ) .await; diff --git a/prover/crates/bin/prover_cli/src/commands/status/l1.rs b/prover/crates/bin/prover_cli/src/commands/status/l1.rs index 16cecc103828..4b403215e9c2 100644 --- a/prover/crates/bin/prover_cli/src/commands/status/l1.rs +++ b/prover/crates/bin/prover_cli/src/commands/status/l1.rs @@ -78,7 +78,7 @@ pub(crate) async fn run() -> anyhow::Result<()> { .await?; let node_l1_verifier_config = L1VerifierConfig { - recursion_scheduler_level_vk_hash: node_verification_key_hash, + snark_wrapper_vk_hash: node_verification_key_hash, }; let prover_connection_pool = ConnectionPool::::builder( @@ -149,7 +149,7 @@ fn pretty_print_l1_verifier_config( ) { print_hash_comparison( "Verifier key", - node_l1_verifier_config.recursion_scheduler_level_vk_hash, - db_l1_verifier_config.recursion_scheduler_level_vk_hash, + node_l1_verifier_config.snark_wrapper_vk_hash, + db_l1_verifier_config.snark_wrapper_vk_hash, ); } diff --git a/prover/crates/bin/witness_generator/src/main.rs b/prover/crates/bin/witness_generator/src/main.rs index 2dca22c24579..06414e43be3c 100644 --- a/prover/crates/bin/witness_generator/src/main.rs +++ b/prover/crates/bin/witness_generator/src/main.rs @@ -81,8 +81,7 @@ async fn ensure_protocol_alignment( } }; let keystore = Keystore::new_with_setup_data_path(setup_data_path); - // `recursion_scheduler_level_vk_hash` actually stores `scheduler_vk_hash` for historical reasons. - let scheduler_vk_hash = vk_commitments_in_db.recursion_scheduler_level_vk_hash; + let scheduler_vk_hash = vk_commitments_in_db.snark_wrapper_vk_hash; keystore .verify_scheduler_vk_hash(scheduler_vk_hash) .with_context(|| diff --git a/prover/crates/lib/prover_dal/.sqlx/query-419206075cd24f96f00d65f3d138c05583e8415d2e1d8c503f640e77d282b0d5.json b/prover/crates/lib/prover_dal/.sqlx/query-419206075cd24f96f00d65f3d138c05583e8415d2e1d8c503f640e77d282b0d5.json new file mode 100644 index 000000000000..ff5b1727e26a --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-419206075cd24f96f00d65f3d138c05583e8415d2e1d8c503f640e77d282b0d5.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n snark_wrapper_vk_hash\n FROM\n prover_fri_protocol_versions\n WHERE\n id = $1\n AND protocol_version_patch = $2\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "snark_wrapper_vk_hash", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Int4", + "Int4" + ] + }, + "nullable": [ + false + ] + }, + "hash": "419206075cd24f96f00d65f3d138c05583e8415d2e1d8c503f640e77d282b0d5" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-4f26bae35dd959448d9728ef7321fc79400930dddc84e042c5a4dc8a2e8508a5.json b/prover/crates/lib/prover_dal/.sqlx/query-4f26bae35dd959448d9728ef7321fc79400930dddc84e042c5a4dc8a2e8508a5.json deleted file mode 100644 index 73cd88457cd1..000000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-4f26bae35dd959448d9728ef7321fc79400930dddc84e042c5a4dc8a2e8508a5.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n recursion_scheduler_level_vk_hash\n FROM\n prover_fri_protocol_versions\n WHERE\n id = $1\n AND protocol_version_patch = $2\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "recursion_scheduler_level_vk_hash", - "type_info": "Bytea" - } - ], - "parameters": { - "Left": [ - "Int4", - "Int4" - ] - }, - "nullable": [ - false - ] - }, - "hash": "4f26bae35dd959448d9728ef7321fc79400930dddc84e042c5a4dc8a2e8508a5" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-5da848354a84b20ae3f0240f6a352e85f85202637916cfcf4b34c6780536f105.json b/prover/crates/lib/prover_dal/.sqlx/query-5da848354a84b20ae3f0240f6a352e85f85202637916cfcf4b34c6780536f105.json deleted file mode 100644 index c985254f247e..000000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-5da848354a84b20ae3f0240f6a352e85f85202637916cfcf4b34c6780536f105.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n prover_fri_protocol_versions (id, recursion_scheduler_level_vk_hash, created_at, protocol_version_patch)\n VALUES\n ($1, $2, NOW(), $3)\n ON CONFLICT (id, protocol_version_patch) DO NOTHING\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int4", - "Bytea", - "Int4" - ] - }, - "nullable": [] - }, - "hash": "5da848354a84b20ae3f0240f6a352e85f85202637916cfcf4b34c6780536f105" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-61b2b858d4636809c21838635aa52aeb5f06c26f68d131dd242f6ed68816c513.json b/prover/crates/lib/prover_dal/.sqlx/query-61b2b858d4636809c21838635aa52aeb5f06c26f68d131dd242f6ed68816c513.json deleted file mode 100644 index c713af9a210d..000000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-61b2b858d4636809c21838635aa52aeb5f06c26f68d131dd242f6ed68816c513.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n l1_batch_number\n FROM\n prover_jobs_fri\n WHERE\n status <> 'skipped'\n AND status <> 'successful'\n AND aggregation_round = $1\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "l1_batch_number", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Int2" - ] - }, - "nullable": [ - false - ] - }, - "hash": "61b2b858d4636809c21838635aa52aeb5f06c26f68d131dd242f6ed68816c513" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-6e58af820b4dd867cd794a04c97c23ff743fc01c92e28ed447a8e124062fa62c.json b/prover/crates/lib/prover_dal/.sqlx/query-6e58af820b4dd867cd794a04c97c23ff743fc01c92e28ed447a8e124062fa62c.json new file mode 100644 index 000000000000..b5025c6ed18d --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-6e58af820b4dd867cd794a04c97c23ff743fc01c92e28ed447a8e124062fa62c.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n snark_wrapper_vk_hash\n FROM\n prover_fri_protocol_versions\n ORDER BY\n id DESC\n LIMIT\n 1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "snark_wrapper_vk_hash", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false + ] + }, + "hash": "6e58af820b4dd867cd794a04c97c23ff743fc01c92e28ed447a8e124062fa62c" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-89a25708d0b0a15e1e56ee8fd69f5a15d2fc5ad3e5ce738a2f6ee3eecfc96736.json b/prover/crates/lib/prover_dal/.sqlx/query-89a25708d0b0a15e1e56ee8fd69f5a15d2fc5ad3e5ce738a2f6ee3eecfc96736.json new file mode 100644 index 000000000000..d8bd3223905c --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-89a25708d0b0a15e1e56ee8fd69f5a15d2fc5ad3e5ce738a2f6ee3eecfc96736.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n prover_fri_protocol_versions (id, snark_wrapper_vk_hash, created_at, protocol_version_patch)\n VALUES\n ($1, $2, NOW(), $3)\n ON CONFLICT (id, protocol_version_patch) DO NOTHING\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int4", + "Bytea", + "Int4" + ] + }, + "nullable": [] + }, + "hash": "89a25708d0b0a15e1e56ee8fd69f5a15d2fc5ad3e5ce738a2f6ee3eecfc96736" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-ed0f22d3d2c8b0d9bd1b4360a0f7999388451886a3eb9b4481b55404b16b89ac.json b/prover/crates/lib/prover_dal/.sqlx/query-ed0f22d3d2c8b0d9bd1b4360a0f7999388451886a3eb9b4481b55404b16b89ac.json deleted file mode 100644 index d699aae174c7..000000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-ed0f22d3d2c8b0d9bd1b4360a0f7999388451886a3eb9b4481b55404b16b89ac.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n recursion_scheduler_level_vk_hash\n FROM\n prover_fri_protocol_versions\n ORDER BY\n id DESC\n LIMIT\n 1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "recursion_scheduler_level_vk_hash", - "type_info": "Bytea" - } - ], - "parameters": { - "Left": [] - }, - "nullable": [ - false - ] - }, - "hash": "ed0f22d3d2c8b0d9bd1b4360a0f7999388451886a3eb9b4481b55404b16b89ac" -} diff --git a/prover/crates/lib/prover_dal/migrations/20240905124208_rename-recurision-scheduler-level-vk-hash.down.sql b/prover/crates/lib/prover_dal/migrations/20240905124208_rename-recurision-scheduler-level-vk-hash.down.sql new file mode 100644 index 000000000000..8d1681440769 --- /dev/null +++ b/prover/crates/lib/prover_dal/migrations/20240905124208_rename-recurision-scheduler-level-vk-hash.down.sql @@ -0,0 +1,3 @@ +UPDATE prover_fri_protocol_versions SET recursion_scheduler_level_vk_hash = snark_wrapper_vk_hash WHERE recursion_scheduler_level_vk_hash = ''::bytea; +ALTER TABLE prover_fri_protocol_versions DROP COLUMN snark_wrapper_vk_hash; +ALTER TABLE prover_fri_protocol_versions ALTER COLUMN recursion_scheduler_level_vk_hash DROP DEFAULT; diff --git a/prover/crates/lib/prover_dal/migrations/20240905124208_rename-recurision-scheduler-level-vk-hash.up.sql b/prover/crates/lib/prover_dal/migrations/20240905124208_rename-recurision-scheduler-level-vk-hash.up.sql new file mode 100644 index 000000000000..98eb1ee791c2 --- /dev/null +++ b/prover/crates/lib/prover_dal/migrations/20240905124208_rename-recurision-scheduler-level-vk-hash.up.sql @@ -0,0 +1,8 @@ +ALTER TABLE prover_fri_protocol_versions ADD COLUMN snark_wrapper_vk_hash BYTEA NOT NULL DEFAULT ''::bytea; +ALTER TABLE prover_fri_protocol_versions ALTER COLUMN recursion_scheduler_level_vk_hash SET DEFAULT ''::bytea; +UPDATE prover_fri_protocol_versions SET snark_wrapper_vk_hash = recursion_scheduler_level_vk_hash; +-- Default was only needed to migrate old rows, we don't want this field to be forgotten by accident after migration. +ALTER TABLE prover_fri_protocol_versions ALTER COLUMN snark_wrapper_vk_hash DROP DEFAULT; + +-- Old column should be removed once the migration is on the mainnet. +COMMENT ON COLUMN prover_fri_protocol_versions.recursion_scheduler_level_vk_hash IS 'This column is deprecated and will be removed in the future. Use snark_wrapper_vk_hash instead.'; diff --git a/prover/crates/lib/prover_dal/src/fri_protocol_versions_dal.rs b/prover/crates/lib/prover_dal/src/fri_protocol_versions_dal.rs index caf620882bc2..50df1046e67d 100644 --- a/prover/crates/lib/prover_dal/src/fri_protocol_versions_dal.rs +++ b/prover/crates/lib/prover_dal/src/fri_protocol_versions_dal.rs @@ -20,14 +20,14 @@ impl FriProtocolVersionsDal<'_, '_> { sqlx::query!( r#" INSERT INTO - prover_fri_protocol_versions (id, recursion_scheduler_level_vk_hash, created_at, protocol_version_patch) + prover_fri_protocol_versions (id, snark_wrapper_vk_hash, created_at, protocol_version_patch) VALUES ($1, $2, NOW(), $3) ON CONFLICT (id, protocol_version_patch) DO NOTHING "#, id.minor as i32, l1_verifier_config - .recursion_scheduler_level_vk_hash + .snark_wrapper_vk_hash .as_bytes(), id.patch.0 as i32 ) @@ -43,7 +43,7 @@ impl FriProtocolVersionsDal<'_, '_> { sqlx::query!( r#" SELECT - recursion_scheduler_level_vk_hash + snark_wrapper_vk_hash FROM prover_fri_protocol_versions WHERE @@ -57,9 +57,7 @@ impl FriProtocolVersionsDal<'_, '_> { .await .unwrap() .map(|row| L1VerifierConfig { - recursion_scheduler_level_vk_hash: H256::from_slice( - &row.recursion_scheduler_level_vk_hash, - ), + snark_wrapper_vk_hash: H256::from_slice(&row.snark_wrapper_vk_hash), }) } @@ -67,7 +65,7 @@ impl FriProtocolVersionsDal<'_, '_> { let result = sqlx::query!( r#" SELECT - recursion_scheduler_level_vk_hash + snark_wrapper_vk_hash FROM prover_fri_protocol_versions ORDER BY @@ -80,9 +78,7 @@ impl FriProtocolVersionsDal<'_, '_> { .await?; Ok(L1VerifierConfig { - recursion_scheduler_level_vk_hash: H256::from_slice( - &result.recursion_scheduler_level_vk_hash, - ), + snark_wrapper_vk_hash: H256::from_slice(&result.snark_wrapper_vk_hash), }) } From ac75d8734030e9a7afebaef01d77a4120d1523c3 Mon Sep 17 00:00:00 2001 From: Igor Aleksanov Date: Fri, 6 Sep 2024 16:32:50 +0400 Subject: [PATCH 036/116] refactor(prover_keystore): Reduce number of Keystore constructors (#2819) - Remove `Keystore::default`, which implicitly used env config to find the setup path. - Remove dependency on `zksync_config` and `zksync_env_config` from keystore crate. - Reduce the number of constructors for `Keystore` - Pass `Keystore` instead of `setup_data_path` to the components to make access more explicit. *What next?* The following will be done separately to not overly expand the PR: - Remove implicit lookups from `Keystore` completely. Config-less lookup should be done by the caller, not the keystore. - Remove boilerplate code from `Keystore` (e.g. family of `load_x` and `load_y` methods -- this should be reworked as traits) - Cover code with tests. --- prover/Cargo.lock | 2 - .../proof_fri_compressor/src/compressor.rs | 13 ++--- .../bin/proof_fri_compressor/src/main.rs | 11 ++-- .../src/gpu_prover_job_processor.rs | 14 +++-- prover/crates/bin/prover_fri/src/main.rs | 15 ++++- .../prover_fri/src/prover_job_processor.rs | 16 ++++-- .../crates/bin/prover_fri/tests/basic_test.rs | 2 +- .../src/commitment_generator.rs | 2 +- .../src/main.rs | 10 ++-- .../src/tests.rs | 6 +- .../witness_generator/src/leaf_aggregation.rs | 19 +++---- .../crates/bin/witness_generator/src/main.rs | 25 ++++----- .../witness_generator/src/node_aggregation.rs | 11 ++-- .../witness_generator/src/recursion_tip.rs | 11 ++-- .../bin/witness_generator/src/scheduler.rs | 11 ++-- .../bin/witness_generator/tests/basic_test.rs | 24 ++++---- .../witness_vector_generator/src/generator.rs | 14 ++--- .../bin/witness_vector_generator/src/main.rs | 6 +- .../tests/basic_test.rs | 3 +- prover/crates/lib/keystore/Cargo.toml | 2 - prover/crates/lib/keystore/src/keystore.rs | 55 +++++++------------ prover/crates/lib/keystore/src/utils.rs | 2 +- 22 files changed, 127 insertions(+), 147 deletions(-) diff --git a/prover/Cargo.lock b/prover/Cargo.lock index bc7d7e3693ad..24e8638876bf 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -8236,8 +8236,6 @@ dependencies = [ "tracing", "zkevm_test_harness", "zksync_basic_types", - "zksync_config", - "zksync_env_config", "zksync_prover_fri_types", "zksync_utils", ] diff --git a/prover/crates/bin/proof_fri_compressor/src/compressor.rs b/prover/crates/bin/proof_fri_compressor/src/compressor.rs index c7747b2e45bd..077347bce9be 100644 --- a/prover/crates/bin/proof_fri_compressor/src/compressor.rs +++ b/prover/crates/bin/proof_fri_compressor/src/compressor.rs @@ -35,7 +35,7 @@ pub struct ProofCompressor { compression_mode: u8, max_attempts: u32, protocol_version: ProtocolSemanticVersion, - setup_data_path: String, + keystore: Keystore, } impl ProofCompressor { @@ -45,7 +45,7 @@ impl ProofCompressor { compression_mode: u8, max_attempts: u32, protocol_version: ProtocolSemanticVersion, - setup_data_path: String, + keystore: Keystore, ) -> Self { Self { blob_store, @@ -53,7 +53,7 @@ impl ProofCompressor { compression_mode, max_attempts, protocol_version, - setup_data_path, + keystore, } } @@ -62,9 +62,8 @@ impl ProofCompressor { l1_batch: L1BatchNumber, proof: ZkSyncRecursionLayerProof, _compression_mode: u8, - setup_data_path: String, + keystore: Keystore, ) -> anyhow::Result { - let keystore = Keystore::new_with_setup_data_path(setup_data_path); let scheduler_vk = keystore .load_recursive_layer_verification_key( ZkSyncRecursionLayerStorageType::SchedulerCircuit as u8, @@ -178,9 +177,9 @@ impl JobProcessor for ProofCompressor { ) -> JoinHandle> { let compression_mode = self.compression_mode; let block_number = *job_id; - let setup_data_path = self.setup_data_path.clone(); + let keystore = self.keystore.clone(); tokio::task::spawn_blocking(move || { - Self::compress_proof(block_number, job, compression_mode, setup_data_path) + Self::compress_proof(block_number, job, compression_mode, keystore) }) } diff --git a/prover/crates/bin/proof_fri_compressor/src/main.rs b/prover/crates/bin/proof_fri_compressor/src/main.rs index e2086b228b69..f06b4b8f89e5 100644 --- a/prover/crates/bin/proof_fri_compressor/src/main.rs +++ b/prover/crates/bin/proof_fri_compressor/src/main.rs @@ -11,6 +11,7 @@ use zksync_env_config::object_store::ProverObjectStoreConfig; use zksync_object_store::ObjectStoreFactory; use zksync_prover_dal::{ConnectionPool, Prover}; use zksync_prover_fri_types::PROVER_PROTOCOL_SEMANTIC_VERSION; +use zksync_prover_keystore::keystore::Keystore; use zksync_queued_job_processor::JobProcessor; use zksync_utils::wait_for_tasks::ManagedTasks; use zksync_vlog::prometheus::PrometheusExporterConfig; @@ -70,16 +71,18 @@ async fn main() -> anyhow::Result<()> { let protocol_version = PROVER_PROTOCOL_SEMANTIC_VERSION; + let prover_config = general_config + .prover_config + .expect("ProverConfig doesn't exist"); + let keystore = + Keystore::locate().with_setup_path(Some(prover_config.setup_data_path.clone().into())); let proof_compressor = ProofCompressor::new( blob_store, pool, config.compression_mode, config.max_attempts, protocol_version, - general_config - .prover_config - .expect("ProverConfig doesn't exist") - .setup_data_path, + keystore, ); let (stop_sender, stop_receiver) = watch::channel(false); diff --git a/prover/crates/bin/prover_fri/src/gpu_prover_job_processor.rs b/prover/crates/bin/prover_fri/src/gpu_prover_job_processor.rs index 0835c8ff4cbf..240251df15bf 100644 --- a/prover/crates/bin/prover_fri/src/gpu_prover_job_processor.rs +++ b/prover/crates/bin/prover_fri/src/gpu_prover_job_processor.rs @@ -55,6 +55,7 @@ pub mod gpu_prover { #[allow(dead_code)] pub struct Prover { + keystore: Keystore, blob_store: Arc, public_blob_store: Option>, config: Arc, @@ -73,6 +74,7 @@ pub mod gpu_prover { impl Prover { #[allow(dead_code)] pub fn new( + keystore: Keystore, blob_store: Arc, public_blob_store: Option>, config: FriProverConfig, @@ -93,6 +95,7 @@ pub mod gpu_prover { None => ProverContext::create().expect("failed initializing gpu prover context"), }; Prover { + keystore, blob_store, public_blob_store, config: Arc::new(config), @@ -120,9 +123,8 @@ pub mod gpu_prover { .clone(), SetupLoadMode::FromDisk => { let started_at = Instant::now(); - let keystore = - Keystore::new_with_setup_data_path(self.config.setup_data_path.clone()); - let artifact: GoldilocksGpuProverSetupData = keystore + let artifact: GoldilocksGpuProverSetupData = self + .keystore .load_gpu_setup_data_for_circuit_type(key.clone()) .context("load_gpu_setup_data_for_circuit_type()")?; @@ -339,7 +341,10 @@ pub mod gpu_prover { } } - pub fn load_setup_data_cache(config: &FriProverConfig) -> anyhow::Result { + pub fn load_setup_data_cache( + keystore: &Keystore, + config: &FriProverConfig, + ) -> anyhow::Result { Ok(match config.setup_load_mode { zksync_config::configs::fri_prover::SetupLoadMode::FromDisk => SetupLoadMode::FromDisk, zksync_config::configs::fri_prover::SetupLoadMode::FromMemory => { @@ -359,7 +364,6 @@ pub mod gpu_prover { &config.specialized_group_id, prover_setup_metadata_list ); - let keystore = Keystore::new_with_setup_data_path(config.setup_data_path.clone()); for prover_setup_metadata in prover_setup_metadata_list { let key = setup_metadata_to_setup_data_key(&prover_setup_metadata); let setup_data = keystore diff --git a/prover/crates/bin/prover_fri/src/main.rs b/prover/crates/bin/prover_fri/src/main.rs index b93eb9c03958..8191653efec6 100644 --- a/prover/crates/bin/prover_fri/src/main.rs +++ b/prover/crates/bin/prover_fri/src/main.rs @@ -182,6 +182,8 @@ async fn get_prover_tasks( _max_allocation: Option, _init_notifier: Arc, ) -> anyhow::Result>>> { + use zksync_prover_keystore::keystore::Keystore; + use crate::prover_job_processor::{load_setup_data_cache, Prover}; let protocol_version = PROVER_PROTOCOL_SEMANTIC_VERSION; @@ -191,12 +193,15 @@ async fn get_prover_tasks( protocol_version ); + let keystore = + Keystore::locate().with_setup_path(Some(prover_config.setup_data_path.clone().into())); let setup_load_mode = - load_setup_data_cache(&prover_config).context("load_setup_data_cache()")?; + load_setup_data_cache(&keystore, &prover_config).context("load_setup_data_cache()")?; let prover = Prover::new( store_factory.create_store().await?, public_blob_store, prover_config, + keystore, pool, setup_load_mode, circuit_ids_for_round_to_be_proven, @@ -222,9 +227,12 @@ async fn get_prover_tasks( use socket_listener::gpu_socket_listener; use tokio::sync::Mutex; use zksync_prover_fri_types::queue::FixedSizeQueue; + use zksync_prover_keystore::keystore::Keystore; - let setup_load_mode = - gpu_prover::load_setup_data_cache(&prover_config).context("load_setup_data_cache()")?; + let keystore = + Keystore::locate().with_setup_path(Some(prover_config.setup_data_path.clone().into())); + let setup_load_mode = gpu_prover::load_setup_data_cache(&keystore, &prover_config) + .context("load_setup_data_cache()")?; let witness_vector_queue = FixedSizeQueue::new(prover_config.queue_capacity); let shared_witness_vector_queue = Arc::new(Mutex::new(witness_vector_queue)); let consumer = shared_witness_vector_queue.clone(); @@ -238,6 +246,7 @@ async fn get_prover_tasks( let protocol_version = PROVER_PROTOCOL_SEMANTIC_VERSION; let prover = gpu_prover::Prover::new( + keystore, store_factory.create_store().await?, public_blob_store, prover_config.clone(), diff --git a/prover/crates/bin/prover_fri/src/prover_job_processor.rs b/prover/crates/bin/prover_fri/src/prover_job_processor.rs index 4de11a68b534..bbfb1d5a8322 100644 --- a/prover/crates/bin/prover_fri/src/prover_job_processor.rs +++ b/prover/crates/bin/prover_fri/src/prover_job_processor.rs @@ -43,6 +43,7 @@ pub struct Prover { blob_store: Arc, public_blob_store: Option>, config: Arc, + keystore: Keystore, prover_connection_pool: ConnectionPool, setup_load_mode: SetupLoadMode, // Only pick jobs for the configured circuit id and aggregation rounds. @@ -52,11 +53,12 @@ pub struct Prover { } impl Prover { - #[allow(dead_code)] + #[allow(dead_code, clippy::too_many_arguments)] pub fn new( blob_store: Arc, public_blob_store: Option>, config: FriProverConfig, + keystore: Keystore, prover_connection_pool: ConnectionPool, setup_load_mode: SetupLoadMode, circuit_ids_for_round_to_be_proven: Vec, @@ -66,6 +68,7 @@ impl Prover { blob_store, public_blob_store, config: Arc::new(config), + keystore, prover_connection_pool, setup_load_mode, circuit_ids_for_round_to_be_proven, @@ -85,9 +88,8 @@ impl Prover { .clone(), SetupLoadMode::FromDisk => { let started_at = Instant::now(); - let keystore = - Keystore::new_with_setup_data_path(self.config.setup_data_path.clone()); - let artifact: GoldilocksProverSetupData = keystore + let artifact: GoldilocksProverSetupData = self + .keystore .load_cpu_setup_data_for_circuit_type(key.clone()) .context("get_cpu_setup_data_for_circuit_type()")?; METRICS.gpu_setup_data_load_time[&key.circuit_id.to_string()] @@ -279,7 +281,10 @@ impl JobProcessor for Prover { } #[allow(dead_code)] -pub fn load_setup_data_cache(config: &FriProverConfig) -> anyhow::Result { +pub fn load_setup_data_cache( + keystore: &Keystore, + config: &FriProverConfig, +) -> anyhow::Result { Ok(match config.setup_load_mode { zksync_config::configs::fri_prover::SetupLoadMode::FromDisk => SetupLoadMode::FromDisk, zksync_config::configs::fri_prover::SetupLoadMode::FromMemory => { @@ -299,7 +304,6 @@ pub fn load_setup_data_cache(config: &FriProverConfig) -> anyhow::Result base.clone(), _ => anyhow::bail!("Expected base layer circuit"), }; - let keystore = Keystore::default(); + let keystore = Keystore::locate(); let circuit_setup_data = generate_setup_data_common( &keystore, ProverServiceDataKey::new_basic(circuit.numeric_circuit_type()), diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/src/commitment_generator.rs b/prover/crates/bin/vk_setup_data_generator_server_fri/src/commitment_generator.rs index ec4bbb77ba6e..f92be40fd7cc 100644 --- a/prover/crates/bin/vk_setup_data_generator_server_fri/src/commitment_generator.rs +++ b/prover/crates/bin/vk_setup_data_generator_server_fri/src/commitment_generator.rs @@ -34,6 +34,6 @@ mod test { #[test] fn test_read_and_update_contract_toml() { - read_and_update_contract_toml(&Keystore::default(), true).unwrap(); + read_and_update_contract_toml(&Keystore::locate(), true).unwrap(); } } diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/src/main.rs b/prover/crates/bin/vk_setup_data_generator_server_fri/src/main.rs index aa359720ab44..59d989037c4b 100644 --- a/prover/crates/bin/vk_setup_data_generator_server_fri/src/main.rs +++ b/prover/crates/bin/vk_setup_data_generator_server_fri/src/main.rs @@ -1,7 +1,7 @@ //! Tool to generate different types of keys used by the proving system. //! //! It can generate verification keys, setup keys, and also commitments. -use std::collections::HashMap; +use std::{collections::HashMap, path::PathBuf}; use anyhow::Context as _; use clap::{Parser, Subcommand}; @@ -196,14 +196,14 @@ fn print_stats(digests: HashMap) -> anyhow::Result<()> { Ok(()) } -fn keystore_from_optional_path(path: Option, setup_path: Option) -> Keystore { +fn keystore_from_optional_path(path: Option, setup_data_path: Option) -> Keystore { if let Some(path) = path { - return Keystore::new_with_optional_setup_path(path.into(), setup_path); + return Keystore::new(path.into()).with_setup_path(setup_data_path.map(PathBuf::from)); } - if setup_path.is_some() { + if setup_data_path.is_some() { panic!("--setup_path must not be set when --path is not set"); } - Keystore::default() + Keystore::locate() } fn generate_setup_keys( diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/src/tests.rs b/prover/crates/bin/vk_setup_data_generator_server_fri/src/tests.rs index d704f4e8fb60..0a9548197fd7 100644 --- a/prover/crates/bin/vk_setup_data_generator_server_fri/src/tests.rs +++ b/prover/crates/bin/vk_setup_data_generator_server_fri/src/tests.rs @@ -36,21 +36,21 @@ fn all_possible_prover_service_data_key() -> impl Strategy, prover_connection_pool: ConnectionPool, protocol_version: ProtocolSemanticVersion, - setup_data_path: String, + keystore: Keystore, } impl LeafAggregationWitnessGenerator { @@ -81,14 +81,14 @@ impl LeafAggregationWitnessGenerator { object_store: Arc, prover_connection_pool: ConnectionPool, protocol_version: ProtocolSemanticVersion, - setup_data_path: String, + keystore: Keystore, ) -> Self { Self { config, object_store, prover_connection_pool, protocol_version, - setup_data_path, + keystore, } } @@ -134,13 +134,9 @@ impl JobProcessor for LeafAggregationWitnessGenerator { tracing::info!("Processing leaf aggregation job {:?}", metadata.id); Ok(Some(( metadata.id, - prepare_leaf_aggregation_job( - metadata, - &*self.object_store, - self.setup_data_path.clone(), - ) - .await - .context("prepare_leaf_aggregation_job()")?, + prepare_leaf_aggregation_job(metadata, &*self.object_store, self.keystore.clone()) + .await + .context("prepare_leaf_aggregation_job()")?, ))) } @@ -226,7 +222,7 @@ impl JobProcessor for LeafAggregationWitnessGenerator { pub async fn prepare_leaf_aggregation_job( metadata: LeafAggregationJobMetadata, object_store: &dyn ObjectStore, - setup_data_path: String, + keystore: Keystore, ) -> anyhow::Result { let started_at = Instant::now(); let closed_form_input = get_artifacts(&metadata, object_store).await; @@ -235,7 +231,6 @@ pub async fn prepare_leaf_aggregation_job( .observe(started_at.elapsed()); let started_at = Instant::now(); - let keystore = Keystore::new_with_setup_data_path(setup_data_path); let base_vk = keystore .load_base_layer_verification_key(metadata.circuit_id) .context("get_base_layer_vk_for_circuit_type()")?; diff --git a/prover/crates/bin/witness_generator/src/main.rs b/prover/crates/bin/witness_generator/src/main.rs index 06414e43be3c..9d75d8ddc6f1 100644 --- a/prover/crates/bin/witness_generator/src/main.rs +++ b/prover/crates/bin/witness_generator/src/main.rs @@ -61,7 +61,7 @@ struct Opt { async fn ensure_protocol_alignment( prover_pool: &ConnectionPool, protocol_version: ProtocolSemanticVersion, - setup_data_path: String, + keystore: &Keystore, ) -> anyhow::Result<()> { tracing::info!("Verifying protocol alignment for {:?}", protocol_version); let vk_commitments_in_db = match prover_pool @@ -80,7 +80,6 @@ async fn ensure_protocol_alignment( ); } }; - let keystore = Keystore::new_with_setup_data_path(setup_data_path); let scheduler_vk_hash = vk_commitments_in_db.snark_wrapper_vk_hash; keystore .verify_scheduler_vk_hash(scheduler_vk_hash) @@ -118,6 +117,8 @@ async fn main() -> anyhow::Result<()> { .witness_generator_config .context("witness generator config")? .clone(); + let keystore = + Keystore::locate().with_setup_path(Some(prover_config.setup_data_path.clone().into())); let prometheus_config = general_config.prometheus_config.clone(); @@ -139,13 +140,9 @@ async fn main() -> anyhow::Result<()> { let (stop_sender, stop_receiver) = watch::channel(false); let protocol_version = PROVER_PROTOCOL_SEMANTIC_VERSION; - ensure_protocol_alignment( - &prover_connection_pool, - protocol_version, - prover_config.setup_data_path.clone(), - ) - .await - .unwrap_or_else(|err| panic!("Protocol alignment check failed: {:?}", err)); + ensure_protocol_alignment(&prover_connection_pool, protocol_version, &keystore) + .await + .unwrap_or_else(|err| panic!("Protocol alignment check failed: {:?}", err)); let rounds = match (opt.round, opt.all_rounds) { (Some(round), false) => vec![round], @@ -186,8 +183,6 @@ async fn main() -> anyhow::Result<()> { let mut tasks = Vec::new(); tasks.push(tokio::spawn(prometheus_task)); - let setup_data_path = prover_config.setup_data_path.clone(); - for round in rounds { tracing::info!( "initializing the {:?} witness generator, batch size: {:?} with protocol_version: {:?}", @@ -226,7 +221,7 @@ async fn main() -> anyhow::Result<()> { store_factory.create_store().await?, prover_connection_pool.clone(), protocol_version, - setup_data_path.clone(), + keystore.clone(), ); generator.run(stop_receiver.clone(), opt.batch_size) } @@ -236,7 +231,7 @@ async fn main() -> anyhow::Result<()> { store_factory.create_store().await?, prover_connection_pool.clone(), protocol_version, - setup_data_path.clone(), + keystore.clone(), ); generator.run(stop_receiver.clone(), opt.batch_size) } @@ -246,7 +241,7 @@ async fn main() -> anyhow::Result<()> { store_factory.create_store().await?, prover_connection_pool.clone(), protocol_version, - setup_data_path.clone(), + keystore.clone(), ); generator.run(stop_receiver.clone(), opt.batch_size) } @@ -256,7 +251,7 @@ async fn main() -> anyhow::Result<()> { store_factory.create_store().await?, prover_connection_pool.clone(), protocol_version, - setup_data_path.clone(), + keystore.clone(), ); generator.run(stop_receiver.clone(), opt.batch_size) } diff --git a/prover/crates/bin/witness_generator/src/node_aggregation.rs b/prover/crates/bin/witness_generator/src/node_aggregation.rs index 87835d79e13f..72bdebde572a 100644 --- a/prover/crates/bin/witness_generator/src/node_aggregation.rs +++ b/prover/crates/bin/witness_generator/src/node_aggregation.rs @@ -70,7 +70,7 @@ pub struct NodeAggregationWitnessGenerator { object_store: Arc, prover_connection_pool: ConnectionPool, protocol_version: ProtocolSemanticVersion, - setup_data_path: String, + keystore: Keystore, } impl NodeAggregationWitnessGenerator { @@ -79,14 +79,14 @@ impl NodeAggregationWitnessGenerator { object_store: Arc, prover_connection_pool: ConnectionPool, protocol_version: ProtocolSemanticVersion, - setup_data_path: String, + keystore: Keystore, ) -> Self { Self { config, object_store, prover_connection_pool, protocol_version, - setup_data_path, + keystore, } } @@ -244,7 +244,7 @@ impl JobProcessor for NodeAggregationWitnessGenerator { tracing::info!("Processing node aggregation job {:?}", metadata.id); Ok(Some(( metadata.id, - prepare_job(metadata, &*self.object_store, self.setup_data_path.clone()) + prepare_job(metadata, &*self.object_store, self.keystore.clone()) .await .context("prepare_job()")?, ))) @@ -329,7 +329,7 @@ impl JobProcessor for NodeAggregationWitnessGenerator { pub async fn prepare_job( metadata: NodeAggregationJobMetadata, object_store: &dyn ObjectStore, - setup_data_path: String, + keystore: Keystore, ) -> anyhow::Result { let started_at = Instant::now(); let artifacts = get_artifacts(&metadata, object_store).await; @@ -338,7 +338,6 @@ pub async fn prepare_job( .observe(started_at.elapsed()); let started_at = Instant::now(); - let keystore = Keystore::new_with_setup_data_path(setup_data_path); let leaf_vk = keystore .load_recursive_layer_verification_key(metadata.circuit_id) .context("get_recursive_layer_vk_for_circuit_type")?; diff --git a/prover/crates/bin/witness_generator/src/recursion_tip.rs b/prover/crates/bin/witness_generator/src/recursion_tip.rs index c04959b98952..5e97631babb9 100644 --- a/prover/crates/bin/witness_generator/src/recursion_tip.rs +++ b/prover/crates/bin/witness_generator/src/recursion_tip.rs @@ -75,7 +75,7 @@ pub struct RecursionTipWitnessGenerator { object_store: Arc, prover_connection_pool: ConnectionPool, protocol_version: ProtocolSemanticVersion, - setup_data_path: String, + keystore: Keystore, } impl RecursionTipWitnessGenerator { @@ -84,14 +84,14 @@ impl RecursionTipWitnessGenerator { object_store: Arc, prover_connection_pool: ConnectionPool, protocol_version: ProtocolSemanticVersion, - setup_data_path: String, + keystore: Keystore, ) -> Self { Self { config, object_store, prover_connection_pool, protocol_version, - setup_data_path, + keystore, } } @@ -175,7 +175,7 @@ impl JobProcessor for RecursionTipWitnessGenerator { l1_batch_number, final_node_proof_job_ids, &*self.object_store, - self.setup_data_path.clone(), + self.keystore.clone(), ) .await .context("prepare_job()")?, @@ -288,7 +288,7 @@ pub async fn prepare_job( l1_batch_number: L1BatchNumber, final_node_proof_job_ids: Vec<(u8, u32)>, object_store: &dyn ObjectStore, - setup_data_path: String, + keystore: Keystore, ) -> anyhow::Result { let started_at = Instant::now(); let recursion_tip_proofs = @@ -296,7 +296,6 @@ pub async fn prepare_job( WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::RecursionTip.into()] .observe(started_at.elapsed()); - let keystore = Keystore::new_with_setup_data_path(setup_data_path); let node_vk = keystore .load_recursive_layer_verification_key( ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8, diff --git a/prover/crates/bin/witness_generator/src/scheduler.rs b/prover/crates/bin/witness_generator/src/scheduler.rs index 6e3461150fe2..c6e43582bbdb 100644 --- a/prover/crates/bin/witness_generator/src/scheduler.rs +++ b/prover/crates/bin/witness_generator/src/scheduler.rs @@ -57,7 +57,7 @@ pub struct SchedulerWitnessGenerator { object_store: Arc, prover_connection_pool: ConnectionPool, protocol_version: ProtocolSemanticVersion, - setup_data_path: String, + keystore: Keystore, } impl SchedulerWitnessGenerator { @@ -66,14 +66,14 @@ impl SchedulerWitnessGenerator { object_store: Arc, prover_connection_pool: ConnectionPool, protocol_version: ProtocolSemanticVersion, - setup_data_path: String, + keystore: Keystore, ) -> Self { Self { config, object_store, prover_connection_pool, protocol_version, - setup_data_path, + keystore, } } @@ -154,7 +154,7 @@ impl JobProcessor for SchedulerWitnessGenerator { l1_batch_number, recursion_tip_job_id, &*self.object_store, - self.setup_data_path.clone(), + self.keystore.clone(), ) .await .context("prepare_job()")?, @@ -266,7 +266,7 @@ pub async fn prepare_job( l1_batch_number: L1BatchNumber, recursion_tip_job_id: u32, object_store: &dyn ObjectStore, - setup_data_path: String, + keystore: Keystore, ) -> anyhow::Result { let started_at = Instant::now(); let wrapper = object_store.get(recursion_tip_job_id).await?; @@ -280,7 +280,6 @@ pub async fn prepare_job( .observe(started_at.elapsed()); let started_at = Instant::now(); - let keystore = Keystore::new_with_setup_data_path(setup_data_path); let node_vk = keystore .load_recursive_layer_verification_key( ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8, diff --git a/prover/crates/bin/witness_generator/tests/basic_test.rs b/prover/crates/bin/witness_generator/tests/basic_test.rs index b034ab57d82c..3323e3c681e4 100644 --- a/prover/crates/bin/witness_generator/tests/basic_test.rs +++ b/prover/crates/bin/witness_generator/tests/basic_test.rs @@ -8,6 +8,7 @@ use zksync_prover_fri_types::{ CircuitWrapper, }; use zksync_prover_fri_utils::get_recursive_layer_circuit_id_for_base_layer; +use zksync_prover_keystore::keystore::Keystore; use zksync_types::{ basic_fri_types::AggregationRound, prover_dal::{LeafAggregationJobMetadata, NodeAggregationJobMetadata}, @@ -50,13 +51,10 @@ async fn test_leaf_witness_gen() { .await .unwrap(); - let job = prepare_leaf_aggregation_job( - leaf_aggregation_job_metadata, - &*object_store, - "crates/bin/vk_setup_data_generator/data".to_string(), - ) - .await - .unwrap(); + let keystore = Keystore::locate(); + let job = prepare_leaf_aggregation_job(leaf_aggregation_job_metadata, &*object_store, keystore) + .await + .unwrap(); let artifacts = LeafAggregationWitnessGenerator::process_job_impl( job, @@ -143,13 +141,11 @@ async fn test_node_witness_gen() { prover_job_ids_for_proofs: vec![5211320], }; - let job = node_aggregation::prepare_job( - node_aggregation_job_metadata, - &*object_store, - "crates/bin/vk_setup_data_generator/data".to_string(), - ) - .await - .unwrap(); + let keystore = Keystore::locate(); + let job = + node_aggregation::prepare_job(node_aggregation_job_metadata, &*object_store, keystore) + .await + .unwrap(); let artifacts = NodeAggregationWitnessGenerator::process_job_impl( job, diff --git a/prover/crates/bin/witness_vector_generator/src/generator.rs b/prover/crates/bin/witness_vector_generator/src/generator.rs index f482637c1778..6695905c07e3 100644 --- a/prover/crates/bin/witness_vector_generator/src/generator.rs +++ b/prover/crates/bin/witness_vector_generator/src/generator.rs @@ -34,7 +34,7 @@ pub struct WitnessVectorGenerator { config: FriWitnessVectorGeneratorConfig, protocol_version: ProtocolSemanticVersion, max_attempts: u32, - setup_data_path: Option, + keystore: Keystore, } impl WitnessVectorGenerator { @@ -47,7 +47,7 @@ impl WitnessVectorGenerator { config: FriWitnessVectorGeneratorConfig, protocol_version: ProtocolSemanticVersion, max_attempts: u32, - setup_data_path: Option, + keystore: Keystore, ) -> Self { Self { object_store, @@ -57,7 +57,7 @@ impl WitnessVectorGenerator { config, protocol_version, max_attempts, - setup_data_path, + keystore, } } @@ -127,16 +127,10 @@ impl JobProcessor for WitnessVectorGenerator { job: ProverJob, _started_at: Instant, ) -> JoinHandle> { - let setup_data_path = self.setup_data_path.clone(); - + let keystore = self.keystore.clone(); tokio::task::spawn_blocking(move || { let block_number = job.block_number; let _span = tracing::info_span!("witness_vector_generator", %block_number).entered(); - let keystore = if let Some(setup_data_path) = setup_data_path { - Keystore::new_with_setup_data_path(setup_data_path) - } else { - Keystore::default() - }; Self::generate_witness_vector(job, &keystore) }) } diff --git a/prover/crates/bin/witness_vector_generator/src/main.rs b/prover/crates/bin/witness_vector_generator/src/main.rs index 1d3113ebf1aa..17ac3bd6fc9f 100644 --- a/prover/crates/bin/witness_vector_generator/src/main.rs +++ b/prover/crates/bin/witness_vector_generator/src/main.rs @@ -12,6 +12,7 @@ use zksync_object_store::ObjectStoreFactory; use zksync_prover_dal::ConnectionPool; use zksync_prover_fri_types::PROVER_PROTOCOL_SEMANTIC_VERSION; use zksync_prover_fri_utils::{get_all_circuit_id_round_tuples_for, region_fetcher::RegionFetcher}; +use zksync_prover_keystore::keystore::Keystore; use zksync_queued_job_processor::JobProcessor; use zksync_utils::wait_for_tasks::ManagedTasks; use zksync_vlog::prometheus::PrometheusExporterConfig; @@ -87,6 +88,9 @@ async fn main() -> anyhow::Result<()> { .await .context("get_zone()")?; + let keystore = + Keystore::locate().with_setup_path(Some(prover_config.setup_data_path.clone().into())); + let protocol_version = PROVER_PROTOCOL_SEMANTIC_VERSION; let (stop_sender, stop_receiver) = watch::channel(false); @@ -120,7 +124,7 @@ async fn main() -> anyhow::Result<()> { config.clone(), protocol_version, prover_config.max_attempts, - Some(prover_config.setup_data_path.clone()), + keystore.clone(), ); tasks.push(tokio::spawn( witness_vector_generator.run(stop_receiver.clone(), opt.n_iterations), diff --git a/prover/crates/bin/witness_vector_generator/tests/basic_test.rs b/prover/crates/bin/witness_vector_generator/tests/basic_test.rs index dd1ef8404198..bcf01ddc4061 100644 --- a/prover/crates/bin/witness_vector_generator/tests/basic_test.rs +++ b/prover/crates/bin/witness_vector_generator/tests/basic_test.rs @@ -22,8 +22,7 @@ fn test_generate_witness_vector() { circuit_wrapper, setup_data_key: key, }; - let vector = - WitnessVectorGenerator::generate_witness_vector(job, &Keystore::default()).unwrap(); + let vector = WitnessVectorGenerator::generate_witness_vector(job, &Keystore::locate()).unwrap(); assert!(!vector.witness_vector.all_values.is_empty()); assert!(!vector.witness_vector.multiplicities.is_empty()); assert!(!vector.witness_vector.public_inputs_locations.is_empty()); diff --git a/prover/crates/lib/keystore/Cargo.toml b/prover/crates/lib/keystore/Cargo.toml index 423df468d0b6..617030754f8b 100644 --- a/prover/crates/lib/keystore/Cargo.toml +++ b/prover/crates/lib/keystore/Cargo.toml @@ -17,8 +17,6 @@ zksync_prover_fri_types.workspace = true zkevm_test_harness.workspace = true circuit_definitions = { workspace = true, features = ["log_tracing"] } shivini = { workspace = true, optional = true } -zksync_config.workspace = true -zksync_env_config.workspace = true anyhow.workspace = true tracing.workspace = true diff --git a/prover/crates/lib/keystore/src/keystore.rs b/prover/crates/lib/keystore/src/keystore.rs index 8fc2694608f9..ff14387bfda7 100644 --- a/prover/crates/lib/keystore/src/keystore.rs +++ b/prover/crates/lib/keystore/src/keystore.rs @@ -17,8 +17,6 @@ use circuit_definitions::{ use serde::{Deserialize, Serialize}; use zkevm_test_harness::data_source::{in_memory_data_source::InMemoryDataSource, SetupDataSource}; use zksync_basic_types::basic_fri_types::AggregationRound; -use zksync_config::configs::FriProverConfig; -use zksync_env_config::FromEnv; use zksync_prover_fri_types::ProverServiceDataKey; #[cfg(feature = "gpu")] @@ -36,12 +34,12 @@ pub enum ProverServiceDataType { /// There are 2 types: /// - small verification, finalization keys (used only during verification) /// - large setup keys, used during proving. -#[derive(Clone)] +#[derive(Clone, Debug)] pub struct Keystore { /// Directory to store all the small keys. basedir: PathBuf, /// Directory to store large setup keys. - setup_data_path: Option, + setup_data_path: PathBuf, } fn get_base_path() -> PathBuf { @@ -69,41 +67,32 @@ fn get_base_path() -> PathBuf { components.as_path().join("prover/data/keys") } -impl Default for Keystore { - fn default() -> Self { - Self { - basedir: get_base_path(), - setup_data_path: Some( - FriProverConfig::from_env() - .expect("FriProverConfig::from_env()") - .setup_data_path, - ), - } - } -} - impl Keystore { /// Base-dir is the location of smaller keys (like verification keys and finalization hints). /// Setup data path is used for the large setup keys. - pub fn new(basedir: PathBuf, setup_data_path: String) -> Self { + pub fn new(basedir: PathBuf) -> Self { Keystore { - basedir, - setup_data_path: Some(setup_data_path), + basedir: basedir.clone(), + setup_data_path: basedir, } } - pub fn new_with_optional_setup_path(basedir: PathBuf, setup_data_path: Option) -> Self { - Keystore { - basedir, - setup_data_path, + /// Uses automatic detection of the base path, and assumes that setup keys + /// are stored in the same directory. + pub fn locate() -> Self { + let base_path = get_base_path(); + Self { + basedir: base_path.clone(), + setup_data_path: base_path, } } - pub fn new_with_setup_data_path(setup_data_path: String) -> Self { - Keystore { - basedir: get_base_path(), - setup_data_path: Some(setup_data_path), + /// Will override the setup path, if present. + pub fn with_setup_path(mut self, setup_data_path: Option) -> Self { + if let Some(setup_data_path) = setup_data_path { + self.setup_data_path = setup_data_path; } + self } pub fn get_base_path(&self) -> &PathBuf { @@ -120,13 +109,9 @@ impl Keystore { ProverServiceDataType::VerificationKey => { self.basedir.join(format!("verification_{}_key.json", name)) } - ProverServiceDataType::SetupData => PathBuf::from(format!( - "{}/setup_{}_data.bin", - self.setup_data_path - .as_ref() - .expect("Setup data path not set"), - name - )), + ProverServiceDataType::SetupData => self + .setup_data_path + .join(format!("setup_{}_data.bin", name)), ProverServiceDataType::FinalizationHints => self .basedir .join(format!("finalization_hints_{}.bin", name)), diff --git a/prover/crates/lib/keystore/src/utils.rs b/prover/crates/lib/keystore/src/utils.rs index b74f716dac53..5cebf7aef77a 100644 --- a/prover/crates/lib/keystore/src/utils.rs +++ b/prover/crates/lib/keystore/src/utils.rs @@ -137,7 +137,7 @@ mod tests { for entry in std::fs::read_dir(path_to_input.clone()).unwrap().flatten() { if entry.metadata().unwrap().is_dir() { let basepath = path_to_input.join(entry.file_name()); - let keystore = Keystore::new_with_optional_setup_path(basepath.clone(), None); + let keystore = Keystore::new(basepath.clone()); let expected = H256::from_str(&keystore.load_commitments().unwrap().snark_wrapper).unwrap(); From df8641a912a8d480ceecff58b0bfaef05e04f0c8 Mon Sep 17 00:00:00 2001 From: Patrick Date: Fri, 6 Sep 2024 14:59:22 +0200 Subject: [PATCH 037/116] fix(tee-prover): fix deserialization of `std::time::Duration` in `envy` config (#2817) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ This PR fixes the deserialization issue of `std::time::Duration` in the `envy` configuration. Relevant logs from the `stage` environment showcasing the issue: https://grafana.matterlabs.dev/goto/IC-9k4eIR?orgId=1 Error message from the above logs: ``` Error: missing value for field initial_retry_backoff ``` The root cause of the problem supposedly boils down to the mismatch between the expected format of `TEE_PROVER_INITIAL_RETRY_BACKOFF` and the actual format. We export it as follows: ``` export TEE_PROVER_INITIAL_RETRY_BACKOFF=1 ``` which is not supported as explained here: https://github.com/serde-rs/serde/issues/339#issuecomment-539453327 ## Why ❔ To fix the bug. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- core/bin/zksync_tee_prover/src/config.rs | 18 ++++++++++++++---- core/bin/zksync_tee_prover/src/tee_prover.rs | 6 +++--- 2 files changed, 17 insertions(+), 7 deletions(-) diff --git a/core/bin/zksync_tee_prover/src/config.rs b/core/bin/zksync_tee_prover/src/config.rs index 5b009e33f25e..1c2eb229d616 100644 --- a/core/bin/zksync_tee_prover/src/config.rs +++ b/core/bin/zksync_tee_prover/src/config.rs @@ -22,11 +22,21 @@ pub(crate) struct TeeProverConfig { pub max_retries: usize, /// Initial back-off interval when retrying recovery on a retriable error. Each subsequent retry interval /// will be multiplied by [`Self.retry_backoff_multiplier`]. - pub initial_retry_backoff: Duration, + pub initial_retry_backoff_sec: u64, /// Multiplier for the back-off interval when retrying recovery on a retriable error. pub retry_backoff_multiplier: f32, /// Maximum back-off interval when retrying recovery on a retriable error. - pub max_backoff: Duration, + pub max_backoff_sec: u64, +} + +impl TeeProverConfig { + pub fn initial_retry_backoff(&self) -> Duration { + Duration::from_secs(self.initial_retry_backoff_sec) + } + + pub fn max_backoff(&self) -> Duration { + Duration::from_secs(self.max_backoff_sec) + } } impl FromEnv for TeeProverConfig { @@ -39,9 +49,9 @@ impl FromEnv for TeeProverConfig { /// export TEE_PROVER_TEE_TYPE="sgx" /// export TEE_PROVER_API_URL="http://127.0.0.1:3320" /// export TEE_PROVER_MAX_RETRIES=10 - /// export TEE_PROVER_INITIAL_RETRY_BACKOFF=1 + /// export TEE_PROVER_INITIAL_RETRY_BACKOFF_SEC=1 /// export TEE_PROVER_RETRY_BACKOFF_MULTIPLIER=2.0 - /// export TEE_PROVER_MAX_BACKOFF=128 + /// export TEE_PROVER_MAX_BACKOFF_SEC=128 /// ``` fn from_env() -> anyhow::Result { let config: Self = envy::prefixed("TEE_PROVER_").from_env()?; diff --git a/core/bin/zksync_tee_prover/src/tee_prover.rs b/core/bin/zksync_tee_prover/src/tee_prover.rs index 3d227118e57f..1511f0c88e3d 100644 --- a/core/bin/zksync_tee_prover/src/tee_prover.rs +++ b/core/bin/zksync_tee_prover/src/tee_prover.rs @@ -129,7 +129,7 @@ impl Task for TeeProver { .await?; let mut retries = 1; - let mut backoff = config.initial_retry_backoff; + let mut backoff = config.initial_retry_backoff(); let mut observer = METRICS.job_waiting_time.start(); loop { @@ -141,7 +141,7 @@ impl Task for TeeProver { let need_to_sleep = match result { Ok(batch_number) => { retries = 1; - backoff = config.initial_retry_backoff; + backoff = config.initial_retry_backoff(); if let Some(batch_number) = batch_number { observer.observe(); observer = METRICS.job_waiting_time.start(); @@ -162,7 +162,7 @@ impl Task for TeeProver { retries += 1; backoff = std::cmp::min( backoff.mul_f32(config.retry_backoff_multiplier), - config.max_backoff, + config.max_backoff(), ); true } From 4d8862b76a55ac78edd481694fefd2107736ffd9 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Fri, 6 Sep 2024 16:20:37 +0300 Subject: [PATCH 038/116] fix(state-keeper): Restore processed tx metrics in state keeper (#2815) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Restores processed transaction metrics that were accidentally removed in https://github.com/matter-labs/zksync-era/pull/2702. ## Why ❔ These metrics are used in dashboards etc. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Code has been formatted via `zk fmt` and `zk lint`. --- core/node/state_keeper/src/keeper.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/core/node/state_keeper/src/keeper.rs b/core/node/state_keeper/src/keeper.rs index 02f7f92e070a..d36ceec7d70c 100644 --- a/core/node/state_keeper/src/keeper.rs +++ b/core/node/state_keeper/src/keeper.rs @@ -14,6 +14,7 @@ use zksync_multivm::{ }, utils::StorageWritesDeduplicator, }; +use zksync_shared_metrics::{TxStage, APP_METRICS}; use zksync_state::{OwnedStorage, ReadStorageFactory}; use zksync_types::{ block::L2BlockExecutionData, l2::TransactionType, protocol_upgrade::ProtocolUpgradeTx, @@ -463,6 +464,9 @@ impl ZkSyncStateKeeper { .with_context(|| format!("failed re-executing transaction {:?}", tx.hash()))?; let result = TxExecutionResult::new(result, &tx); + APP_METRICS.processed_txs[&TxStage::StateKeeper].inc(); + APP_METRICS.processed_l1_txs[&TxStage::StateKeeper].inc_by(tx.is_l1().into()); + let TxExecutionResult::Success { tx_result, tx_metrics, @@ -742,6 +746,9 @@ impl ZkSyncStateKeeper { let exec_result = TxExecutionResult::new(exec_result, &tx); latency.observe(); + APP_METRICS.processed_txs[&TxStage::StateKeeper].inc(); + APP_METRICS.processed_l1_txs[&TxStage::StateKeeper].inc_by(tx.is_l1().into()); + let latency = KEEPER_METRICS.determine_seal_resolution.start(); // All of `TxExecutionResult::BootloaderOutOfGasForTx`, // `Halt::NotEnoughGasProvided` correspond to out-of-gas errors but of different nature. From b8d4424c0be72a22df0c2a828785442822825b21 Mon Sep 17 00:00:00 2001 From: Patrick Date: Sat, 7 Sep 2024 09:38:47 +0200 Subject: [PATCH 039/116] fix(tee-prover): passthrough env vars to the SGX enclave (#2824) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Passthrough env vars to the SGX enclave. Relevant logs showcasing the issue: https://grafana.matterlabs.dev/goto/1iFHMIeIg?orgId=1 ## Why ❔ To fix the bug. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- etc/nix/container-tee_prover.nix | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/etc/nix/container-tee_prover.nix b/etc/nix/container-tee_prover.nix index 7c0d8d164e34..cb8ebfb51549 100644 --- a/etc/nix/container-tee_prover.nix +++ b/etc/nix/container-tee_prover.nix @@ -33,9 +33,9 @@ nixsgxLib.mkSGXContainer { env = { TEE_PROVER_API_URL.passthrough = true; TEE_PROVER_MAX_RETRIES.passthrough = true; - TEE_PROVER_INITIAL_RETRY_BACKOFF_SECONDS.passthrough = true; + TEE_PROVER_INITIAL_RETRY_BACKOFF_SEC.passthrough = true; TEE_PROVER_RETRY_BACKOFF_MULTIPLIER.passthrough = true; - TEE_PROVER_MAX_BACKOFF_SECONDS.passthrough = true; + TEE_PROVER_MAX_BACKOFF_SEC.passthrough = true; API_PROMETHEUS_LISTENER_PORT.passthrough = true; API_PROMETHEUS_PUSHGATEWAY_URL.passthrough = true; API_PROMETHEUS_PUSH_INTERVAL_MS.passthrough = true; From 0a9e0961758e0b6274f1ac68d0b50ce5344ef14a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mat=C3=ADas=20Ignacio=20Gonz=C3=A1lez?= Date: Mon, 9 Sep 2024 07:50:29 -0300 Subject: [PATCH 040/116] feat(zk_toolbox): Add setup keys step to prover init (#2811) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Add setup keys step to prover init --- .../commands/prover/args/compressor_keys.rs | 22 ++++ .../src/commands/prover/args/init.rs | 121 +++++++++++------- .../commands/prover/args/init_bellman_cuda.rs | 6 +- .../src/commands/prover/args/mod.rs | 1 + .../src/commands/prover/args/setup_keys.rs | 8 +- .../src/commands/prover/compressor_keys.rs | 73 +++++++++++ .../zk_inception/src/commands/prover/init.rs | 84 +++--------- .../src/commands/prover/init_bellman_cuda.rs | 2 +- .../zk_inception/src/commands/prover/mod.rs | 10 +- .../crates/zk_inception/src/messages.rs | 13 +- 10 files changed, 214 insertions(+), 126 deletions(-) create mode 100644 zk_toolbox/crates/zk_inception/src/commands/prover/args/compressor_keys.rs create mode 100644 zk_toolbox/crates/zk_inception/src/commands/prover/compressor_keys.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/args/compressor_keys.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/args/compressor_keys.rs new file mode 100644 index 000000000000..095dccf00b38 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/args/compressor_keys.rs @@ -0,0 +1,22 @@ +use clap::Parser; +use common::Prompt; + +use crate::messages::MSG_SETUP_COMPRESSOR_KEY_PATH_PROMPT; + +#[derive(Debug, Clone, Parser, Default)] +pub struct CompressorKeysArgs { + #[clap(long)] + pub path: Option, +} + +impl CompressorKeysArgs { + pub fn fill_values_with_prompt(self, default: &str) -> CompressorKeysArgs { + let path = self.path.unwrap_or_else(|| { + Prompt::new(MSG_SETUP_COMPRESSOR_KEY_PATH_PROMPT) + .default(default) + .ask() + }); + + CompressorKeysArgs { path: Some(path) } + } +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/args/init.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/args/init.rs index e8c9cf1888d5..94fea1389d28 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/args/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/args/init.rs @@ -8,7 +8,10 @@ use url::Url; use xshell::Shell; use zksync_config::configs::fri_prover::CloudConnectionMode; -use super::init_bellman_cuda::InitBellmanCudaArgs; +use super::{ + compressor_keys::CompressorKeysArgs, init_bellman_cuda::InitBellmanCudaArgs, + setup_keys::SetupKeysArgs, +}; use crate::{ commands::prover::gcs::get_project_ids, consts::{DEFAULT_CREDENTIALS_FILE, DEFAULT_PROOF_STORE_DIR}, @@ -18,25 +21,24 @@ use crate::{ MSG_CREATE_GCS_BUCKET_LOCATION_PROMPT, MSG_CREATE_GCS_BUCKET_NAME_PROMTP, MSG_CREATE_GCS_BUCKET_PROJECT_ID_NO_PROJECTS_PROMPT, MSG_CREATE_GCS_BUCKET_PROJECT_ID_PROMPT, MSG_CREATE_GCS_BUCKET_PROMPT, - MSG_DOWNLOAD_SETUP_KEY_PROMPT, MSG_GETTING_PROOF_STORE_CONFIG, - MSG_GETTING_PUBLIC_STORE_CONFIG, MSG_PROOF_STORE_CONFIG_PROMPT, MSG_PROOF_STORE_DIR_PROMPT, + MSG_DOWNLOAD_SETUP_COMPRESSOR_KEY_PROMPT, MSG_GETTING_PROOF_STORE_CONFIG, + MSG_GETTING_PUBLIC_STORE_CONFIG, MSG_INITIALIZE_BELLMAN_CUDA_PROMPT, + MSG_PROOF_STORE_CONFIG_PROMPT, MSG_PROOF_STORE_DIR_PROMPT, MSG_PROOF_STORE_GCS_BUCKET_BASE_URL_ERR, MSG_PROOF_STORE_GCS_BUCKET_BASE_URL_PROMPT, MSG_PROOF_STORE_GCS_CREDENTIALS_FILE_PROMPT, MSG_PROVER_DB_NAME_HELP, - MSG_PROVER_DB_URL_HELP, MSG_SAVE_TO_PUBLIC_BUCKET_PROMPT, MSG_SETUP_KEY_PATH_PROMPT, + MSG_PROVER_DB_URL_HELP, MSG_SAVE_TO_PUBLIC_BUCKET_PROMPT, MSG_SETUP_KEYS_PROMPT, MSG_USE_DEFAULT_DATABASES_HELP, }, }; -#[derive(Debug, Clone, Serialize, Deserialize, Parser, Default)] +#[derive(Debug, Clone, Parser, Default)] pub struct ProverInitArgs { // Proof store object #[clap(long)] pub proof_store_dir: Option, #[clap(flatten)] - #[serde(flatten)] pub proof_store_gcs_config: ProofStorageGCSTmp, #[clap(flatten)] - #[serde(flatten)] pub create_gcs_bucket_config: ProofStorageGCSCreateBucketTmp, // Public store object @@ -45,20 +47,25 @@ pub struct ProverInitArgs { #[clap(long)] pub public_store_dir: Option, #[clap(flatten)] - #[serde(flatten)] pub public_store_gcs_config: PublicStorageGCSTmp, #[clap(flatten)] - #[serde(flatten)] pub public_create_gcs_bucket_config: PublicStorageGCSCreateBucketTmp, // Bellman cuda #[clap(flatten)] - #[serde(flatten)] pub bellman_cuda_config: InitBellmanCudaArgs, + #[clap(long, default_missing_value = "true", num_args = 0..=1)] + pub bellman_cuda: Option, + + #[clap(long, default_missing_value = "true", num_args = 0..=1)] + pub setup_compressor_keys: Option, + #[clap(flatten)] + pub compressor_keys_args: CompressorKeysArgs, #[clap(flatten)] - #[serde(flatten)] - pub setup_key_config: SetupKeyConfigTmp, + pub setup_keys_args: SetupKeysArgs, + #[clap(long, default_missing_value = "true", num_args = 0..=1)] + pub setup_keys: Option, #[clap(long)] pub setup_database: Option, @@ -137,7 +144,7 @@ pub struct PublicStorageGCSCreateBucketTmp { } #[derive(Clone, Debug, Serialize, Deserialize, Parser, Default)] -pub struct SetupKeyConfigTmp { +pub struct SetupCompressorKeyConfigTmp { #[clap(long)] pub download_key: Option, #[clap(long)] @@ -171,12 +178,6 @@ pub enum ProofStorageConfig { GCSCreateBucket(ProofStorageGCSCreateBucket), } -#[derive(Debug, Clone)] -pub struct SetupKeyConfig { - pub download_key: bool, - pub setup_key_path: String, -} - #[derive(Debug, Clone)] pub struct ProverDatabaseConfig { pub database_config: DatabaseConfig, @@ -187,8 +188,9 @@ pub struct ProverDatabaseConfig { pub struct ProverInitArgsFinal { pub proof_store: ProofStorageConfig, pub public_store: Option, - pub setup_key_config: SetupKeyConfig, - pub bellman_cuda_config: InitBellmanCudaArgs, + pub compressor_key_args: Option, + pub setup_keys: Option, + pub bellman_cuda_config: Option, pub cloud_type: CloudConnectionMode, pub database_config: Option, } @@ -197,20 +199,23 @@ impl ProverInitArgs { pub(crate) fn fill_values_with_prompt( &self, shell: &Shell, - setup_key_path: &str, + default_compressor_key_path: &str, chain_config: &ChainConfig, ) -> anyhow::Result { let proof_store = self.fill_proof_storage_values_with_prompt(shell)?; let public_store = self.fill_public_storage_values_with_prompt(shell)?; - let setup_key_config = self.fill_setup_key_values_with_prompt(setup_key_path); - let bellman_cuda_config = self.fill_bellman_cuda_values_with_prompt()?; + let compressor_key_args = + self.fill_setup_compressor_key_values_with_prompt(default_compressor_key_path); + let bellman_cuda_config = self.fill_bellman_cuda_values_with_prompt(); let cloud_type = self.get_cloud_type_with_prompt(); let database_config = self.fill_database_values_with_prompt(chain_config); + let setup_keys = self.fill_setup_keys_values_with_prompt(); Ok(ProverInitArgsFinal { proof_store, public_store, - setup_key_config, + compressor_key_args, + setup_keys, bellman_cuda_config, cloud_type, database_config, @@ -336,29 +341,38 @@ impl ProverInitArgs { } } - fn fill_setup_key_values_with_prompt(&self, setup_key_path: &str) -> SetupKeyConfig { - let download_key = self - .clone() - .setup_key_config - .download_key - .unwrap_or_else(|| { - PromptConfirm::new(MSG_DOWNLOAD_SETUP_KEY_PROMPT) - .default(true) - .ask() - }); - let setup_key_path = self - .clone() - .setup_key_config - .setup_key_path - .unwrap_or_else(|| { - Prompt::new(MSG_SETUP_KEY_PATH_PROMPT) - .default(setup_key_path) - .ask() - }); + fn fill_setup_compressor_key_values_with_prompt( + &self, + default_path: &str, + ) -> Option { + let download_key = self.clone().setup_compressor_keys.unwrap_or_else(|| { + PromptConfirm::new(MSG_DOWNLOAD_SETUP_COMPRESSOR_KEY_PROMPT) + .default(false) + .ask() + }); - SetupKeyConfig { - download_key, - setup_key_path, + if download_key { + Some( + self.compressor_keys_args + .clone() + .fill_values_with_prompt(default_path), + ) + } else { + None + } + } + + fn fill_setup_keys_values_with_prompt(&self) -> Option { + let args = self.setup_keys_args.clone(); + + if self.setup_keys.unwrap_or_else(|| { + PromptConfirm::new(MSG_SETUP_KEYS_PROMPT) + .default(false) + .ask() + }) { + Some(args) + } else { + None } } @@ -460,8 +474,17 @@ impl ProverInitArgs { }) } - fn fill_bellman_cuda_values_with_prompt(&self) -> anyhow::Result { - self.bellman_cuda_config.clone().fill_values_with_prompt() + fn fill_bellman_cuda_values_with_prompt(&self) -> Option { + let args = self.bellman_cuda_config.clone(); + if self.bellman_cuda.unwrap_or_else(|| { + PromptConfirm::new(MSG_INITIALIZE_BELLMAN_CUDA_PROMPT) + .default(false) + .ask() + }) { + Some(args) + } else { + None + } } fn get_cloud_type_with_prompt(&self) -> CloudConnectionMode { diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/args/init_bellman_cuda.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/args/init_bellman_cuda.rs index 848457c53271..ba204b0be9e9 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/args/init_bellman_cuda.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/args/init_bellman_cuda.rs @@ -30,7 +30,7 @@ impl std::fmt::Display for BellmanCudaPathSelection { } impl InitBellmanCudaArgs { - pub fn fill_values_with_prompt(self) -> anyhow::Result { + pub fn fill_values_with_prompt(self) -> InitBellmanCudaArgs { let bellman_cuda_dir = self.bellman_cuda_dir.unwrap_or_else(|| { match PromptSelect::new( MSG_BELLMAN_CUDA_ORIGIN_SELECT, @@ -43,8 +43,8 @@ impl InitBellmanCudaArgs { } }); - Ok(InitBellmanCudaArgs { + InitBellmanCudaArgs { bellman_cuda_dir: Some(bellman_cuda_dir), - }) + } } } diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/args/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/args/mod.rs index 903ecdb81d91..39391977b843 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/args/mod.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/args/mod.rs @@ -1,3 +1,4 @@ +pub mod compressor_keys; pub mod init; pub mod init_bellman_cuda; pub mod run; diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/args/setup_keys.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/args/setup_keys.rs index 4839c03eb429..155977b8812a 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/args/setup_keys.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/args/setup_keys.rs @@ -2,7 +2,7 @@ use clap::{Parser, ValueEnum}; use common::PromptSelect; use strum::{EnumIter, IntoEnumIterator}; -use crate::messages::{MSG_SETUP_KEYS_DOWNLOAD_HELP, MSG_SETUP_KEYS_REGION_PROMPT}; +use crate::messages::{MSG_SETUP_KEYS_DOWNLOAD_SELECTION_PROMPT, MSG_SETUP_KEYS_REGION_PROMPT}; #[derive(Debug, Clone, Parser, Default)] pub struct SetupKeysArgs { @@ -33,9 +33,9 @@ pub enum Region { impl SetupKeysArgs { pub fn fill_values_with_prompt(self) -> SetupKeysArgsFinal { - let mode = self - .mode - .unwrap_or_else(|| PromptSelect::new(MSG_SETUP_KEYS_DOWNLOAD_HELP, Mode::iter()).ask()); + let mode = self.mode.unwrap_or_else(|| { + PromptSelect::new(MSG_SETUP_KEYS_DOWNLOAD_SELECTION_PROMPT, Mode::iter()).ask() + }); if mode == Mode::Download { let region = self.region.unwrap_or_else(|| { diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/compressor_keys.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/compressor_keys.rs new file mode 100644 index 000000000000..1f39c91a2e2e --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/compressor_keys.rs @@ -0,0 +1,73 @@ +use anyhow::Context; +use common::{ + check_prerequisites, cmd::Cmd, config::global_config, spinner::Spinner, WGET_PREREQUISITES, +}; +use config::{EcosystemConfig, GeneralConfig}; +use xshell::{cmd, Shell}; + +use super::{args::compressor_keys::CompressorKeysArgs, utils::get_link_to_prover}; +use crate::messages::{ + MSG_CHAIN_NOT_FOUND_ERR, MSG_DOWNLOADING_SETUP_COMPRESSOR_KEY_SPINNER, + MSG_PROOF_COMPRESSOR_CONFIG_NOT_FOUND_ERR, MSG_SETUP_KEY_PATH_ERROR, +}; + +pub(crate) async fn run(shell: &Shell, args: CompressorKeysArgs) -> anyhow::Result<()> { + let ecosystem_config = EcosystemConfig::from_file(shell)?; + let chain_config = ecosystem_config + .load_chain(global_config().chain_name.clone()) + .context(MSG_CHAIN_NOT_FOUND_ERR)?; + let mut general_config = chain_config.get_general_config()?; + + let default_path = get_default_compressor_keys_path(&ecosystem_config)?; + let args = args.fill_values_with_prompt(&default_path); + + download_compressor_key( + shell, + &mut general_config, + &args.path.context(MSG_SETUP_KEY_PATH_ERROR)?, + )?; + + chain_config.save_general_config(&general_config)?; + + Ok(()) +} + +pub(crate) fn download_compressor_key( + shell: &Shell, + general_config: &mut GeneralConfig, + path: &str, +) -> anyhow::Result<()> { + check_prerequisites(shell, &WGET_PREREQUISITES, false); + let spinner = Spinner::new(MSG_DOWNLOADING_SETUP_COMPRESSOR_KEY_SPINNER); + let mut compressor_config: zksync_config::configs::FriProofCompressorConfig = general_config + .proof_compressor_config + .as_ref() + .expect(MSG_PROOF_COMPRESSOR_CONFIG_NOT_FOUND_ERR) + .clone(); + compressor_config.universal_setup_path = path.to_string(); + general_config.proof_compressor_config = Some(compressor_config.clone()); + + let url = compressor_config.universal_setup_download_url; + let path = std::path::Path::new(path); + let parent = path.parent().expect(MSG_SETUP_KEY_PATH_ERROR); + let file_name = path.file_name().expect(MSG_SETUP_KEY_PATH_ERROR); + + Cmd::new(cmd!(shell, "wget {url} -P {parent}")).run()?; + + if file_name != "setup_2^24.key" { + Cmd::new(cmd!(shell, "mv {parent}/setup_2^24.key {path}")).run()?; + } + + spinner.finish(); + Ok(()) +} + +pub fn get_default_compressor_keys_path( + ecosystem_config: &EcosystemConfig, +) -> anyhow::Result { + let link_to_prover = get_link_to_prover(ecosystem_config); + let path = link_to_prover.join("keys/setup/setup_2^24.key"); + let string = path.to_str().unwrap(); + + Ok(String::from(string)) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/init.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/init.rs index 7aadd04bf6b7..c8636381f203 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/init.rs @@ -2,47 +2,41 @@ use std::path::PathBuf; use anyhow::Context; use common::{ - check_prerequisites, - cmd::Cmd, config::global_config, db::{drop_db_if_exists, init_db, migrate_db, DatabaseConfig}, logger, spinner::Spinner, - WGET_PREREQUISITES, }; use config::{copy_configs, set_prover_database, traits::SaveConfigWithBasePath, EcosystemConfig}; -use xshell::{cmd, Shell}; -use zksync_config::{ - configs::{object_store::ObjectStoreMode, GeneralConfig}, - ObjectStoreConfig, -}; +use xshell::Shell; +use zksync_config::{configs::object_store::ObjectStoreMode, ObjectStoreConfig}; use super::{ args::init::{ProofStorageConfig, ProverInitArgs}, + compressor_keys::{download_compressor_key, get_default_compressor_keys_path}, gcs::create_gcs_bucket, init_bellman_cuda::run as init_bellman_cuda, - utils::get_link_to_prover, + setup_keys, }; use crate::{ consts::{PROVER_MIGRATIONS, PROVER_STORE_MAX_RETRIES}, messages::{ - MSG_CHAIN_NOT_FOUND_ERR, MSG_DOWNLOADING_SETUP_KEY_SPINNER, - MSG_FAILED_TO_DROP_PROVER_DATABASE_ERR, MSG_GENERAL_CONFIG_NOT_FOUND_ERR, - MSG_INITIALIZING_DATABASES_SPINNER, MSG_INITIALIZING_PROVER_DATABASE, - MSG_PROOF_COMPRESSOR_CONFIG_NOT_FOUND_ERR, MSG_PROVER_CONFIG_NOT_FOUND_ERR, - MSG_PROVER_INITIALIZED, MSG_SETUP_KEY_PATH_ERROR, + MSG_CHAIN_NOT_FOUND_ERR, MSG_FAILED_TO_DROP_PROVER_DATABASE_ERR, + MSG_GENERAL_CONFIG_NOT_FOUND_ERR, MSG_INITIALIZING_DATABASES_SPINNER, + MSG_INITIALIZING_PROVER_DATABASE, MSG_PROVER_CONFIG_NOT_FOUND_ERR, MSG_PROVER_INITIALIZED, + MSG_SETUP_KEY_PATH_ERROR, }, }; pub(crate) async fn run(args: ProverInitArgs, shell: &Shell) -> anyhow::Result<()> { let ecosystem_config = EcosystemConfig::from_file(shell)?; - let setup_key_path = get_default_setup_key_path(&ecosystem_config)?; + let default_compressor_key_path = get_default_compressor_keys_path(&ecosystem_config)?; let chain_config = ecosystem_config .load_chain(global_config().chain_name.clone()) .context(MSG_CHAIN_NOT_FOUND_ERR)?; - let args = args.fill_values_with_prompt(shell, &setup_key_path, &chain_config)?; + let args = args.fill_values_with_prompt(shell, &default_compressor_key_path, &chain_config)?; if chain_config.get_general_config().is_err() || chain_config.get_secrets_config().is_err() { copy_configs(shell, &ecosystem_config.link_to_code, &chain_config.configs)?; @@ -55,12 +49,13 @@ pub(crate) async fn run(args: ProverInitArgs, shell: &Shell) -> anyhow::Result<( let proof_object_store_config = get_object_store_config(shell, Some(args.proof_store))?; let public_object_store_config = get_object_store_config(shell, args.public_store)?; - if args.setup_key_config.download_key { - download_setup_key( - shell, - &general_config, - &args.setup_key_config.setup_key_path, - )?; + if let Some(args) = args.compressor_key_args { + let path = args.path.context(MSG_SETUP_KEY_PATH_ERROR)?; + download_compressor_key(shell, &mut general_config, &path)?; + } + + if let Some(args) = args.setup_keys { + setup_keys::run(args, shell).await?; } let mut prover_config = general_config @@ -78,15 +73,11 @@ pub(crate) async fn run(args: ProverInitArgs, shell: &Shell) -> anyhow::Result<( prover_config.cloud_type = args.cloud_type; general_config.prover_config = Some(prover_config); - let mut proof_compressor_config = general_config - .proof_compressor_config - .expect(MSG_PROOF_COMPRESSOR_CONFIG_NOT_FOUND_ERR); - proof_compressor_config.universal_setup_path = args.setup_key_config.setup_key_path; - general_config.proof_compressor_config = Some(proof_compressor_config); - chain_config.save_general_config(&general_config)?; - init_bellman_cuda(shell, args.bellman_cuda_config).await?; + if let Some(args) = args.bellman_cuda_config { + init_bellman_cuda(shell, args).await?; + } if let Some(prover_db) = &args.database_config { let spinner = Spinner::new(MSG_INITIALIZING_DATABASES_SPINNER); @@ -109,41 +100,6 @@ pub(crate) async fn run(args: ProverInitArgs, shell: &Shell) -> anyhow::Result<( Ok(()) } -fn download_setup_key( - shell: &Shell, - general_config: &GeneralConfig, - path: &str, -) -> anyhow::Result<()> { - check_prerequisites(shell, &WGET_PREREQUISITES, false); - let spinner = Spinner::new(MSG_DOWNLOADING_SETUP_KEY_SPINNER); - let compressor_config: zksync_config::configs::FriProofCompressorConfig = general_config - .proof_compressor_config - .as_ref() - .expect(MSG_PROOF_COMPRESSOR_CONFIG_NOT_FOUND_ERR) - .clone(); - let url = compressor_config.universal_setup_download_url; - let path = std::path::Path::new(path); - let parent = path.parent().expect(MSG_SETUP_KEY_PATH_ERROR); - let file_name = path.file_name().expect(MSG_SETUP_KEY_PATH_ERROR); - - Cmd::new(cmd!(shell, "wget {url} -P {parent}")).run()?; - - if file_name != "setup_2^24.key" { - Cmd::new(cmd!(shell, "mv {parent}/setup_2^24.key {path}")).run()?; - } - - spinner.finish(); - Ok(()) -} - -fn get_default_setup_key_path(ecosystem_config: &EcosystemConfig) -> anyhow::Result { - let link_to_prover = get_link_to_prover(ecosystem_config); - let path = link_to_prover.join("keys/setup/setup_2^24.key"); - let string = path.to_str().unwrap(); - - Ok(String::from(string)) -} - fn get_object_store_config( shell: &Shell, config: Option, diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/init_bellman_cuda.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/init_bellman_cuda.rs index 5ed1473a33f6..615ef841488b 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/init_bellman_cuda.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/init_bellman_cuda.rs @@ -17,7 +17,7 @@ pub(crate) async fn run(shell: &Shell, args: InitBellmanCudaArgs) -> anyhow::Res let mut ecosystem_config = EcosystemConfig::from_file(shell)?; - let args = args.fill_values_with_prompt()?; + let args = args.fill_values_with_prompt(); let bellman_cuda_dir = args.bellman_cuda_dir.unwrap_or("".to_string()); let bellman_cuda_dir = if bellman_cuda_dir.is_empty() { diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/mod.rs index 4fb90dcfd020..2b771c8ad201 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/mod.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/mod.rs @@ -1,10 +1,14 @@ -use args::{init::ProverInitArgs, init_bellman_cuda::InitBellmanCudaArgs, run::ProverRunArgs}; +use args::{ + compressor_keys::CompressorKeysArgs, init::ProverInitArgs, + init_bellman_cuda::InitBellmanCudaArgs, run::ProverRunArgs, +}; use clap::Subcommand; use xshell::Shell; use crate::commands::prover::args::setup_keys::SetupKeysArgs; mod args; +mod compressor_keys; mod gcs; mod init; mod init_bellman_cuda; @@ -24,6 +28,9 @@ pub enum ProverCommands { /// Initialize bellman-cuda #[command(alias = "cuda")] InitBellmanCuda(Box), + /// Download compressor keys + #[command(alias = "ck")] + CompressorKeys(CompressorKeysArgs), } pub(crate) async fn run(shell: &Shell, args: ProverCommands) -> anyhow::Result<()> { @@ -32,5 +39,6 @@ pub(crate) async fn run(shell: &Shell, args: ProverCommands) -> anyhow::Result<( ProverCommands::SetupKeys(args) => setup_keys::run(args, shell).await, ProverCommands::Run(args) => run::run(args, shell).await, ProverCommands::InitBellmanCuda(args) => init_bellman_cuda::run(shell, *args).await, + ProverCommands::CompressorKeys(args) => compressor_keys::run(shell, args).await, } } diff --git a/zk_toolbox/crates/zk_inception/src/messages.rs b/zk_toolbox/crates/zk_inception/src/messages.rs index cca3e3b549b1..99af684010a9 100644 --- a/zk_toolbox/crates/zk_inception/src/messages.rs +++ b/zk_toolbox/crates/zk_inception/src/messages.rs @@ -5,7 +5,7 @@ use ethers::{ utils::format_ether, }; -pub(super) const MSG_SETUP_KEYS_DOWNLOAD_HELP: &str = +pub(super) const MSG_SETUP_KEYS_DOWNLOAD_SELECTION_PROMPT: &str = "Do you want to download the setup keys or generate them?"; pub(super) const MSG_SETUP_KEYS_REGION_PROMPT: &str = "From which region you want setup keys to be downloaded?"; @@ -344,9 +344,13 @@ pub(super) const MSG_CREATE_GCS_BUCKET_NAME_PROMTP: &str = "What do you want to pub(super) const MSG_CREATE_GCS_BUCKET_LOCATION_PROMPT: &str = "What location do you want to use? Find available locations at https://cloud.google.com/storage/docs/locations"; pub(super) const MSG_PROOF_COMPRESSOR_CONFIG_NOT_FOUND_ERR: &str = "Proof compressor config not found"; -pub(super) const MSG_DOWNLOADING_SETUP_KEY_SPINNER: &str = "Downloading setup key..."; -pub(super) const MSG_DOWNLOAD_SETUP_KEY_PROMPT: &str = "Do you want to download the setup key?"; -pub(super) const MSG_SETUP_KEY_PATH_PROMPT: &str = "Provide the path to the setup key:"; +pub(super) const MSG_DOWNLOADING_SETUP_COMPRESSOR_KEY_SPINNER: &str = + "Downloading compressor setup key..."; +pub(super) const MSG_DOWNLOAD_SETUP_COMPRESSOR_KEY_PROMPT: &str = + "Do you want to download the setup key for compressor?"; +pub(super) const MSG_INITIALIZE_BELLMAN_CUDA_PROMPT: &str = + "Do you want to initialize bellman-cuda?"; +pub(super) const MSG_SETUP_COMPRESSOR_KEY_PATH_PROMPT: &str = "Provide the path to the setup key:"; pub(super) const MSG_GETTING_GCP_PROJECTS_SPINNER: &str = "Getting GCP projects..."; pub(super) const MSG_GETTING_PROOF_STORE_CONFIG: &str = "Getting proof store configuration..."; pub(super) const MSG_GETTING_PUBLIC_STORE_CONFIG: &str = "Getting public store configuration..."; @@ -368,6 +372,7 @@ pub(super) const MSG_BELLMAN_CUDA_SELECTION_CLONE: &str = "Clone for me (recomme pub(super) const MSG_BELLMAN_CUDA_SELECTION_PATH: &str = "I have the code already"; pub(super) const MSG_CLOUD_TYPE_PROMPT: &str = "Select the cloud connection mode:"; pub(super) const MSG_THREADS_PROMPT: &str = "Provide the number of threads:"; +pub(super) const MSG_SETUP_KEYS_PROMPT: &str = "Do you want to setup keys?"; pub(super) fn msg_bucket_created(bucket_name: &str) -> String { format!("Bucket created successfully with url: gs://{bucket_name}") From bcb176b8bb033d9cc0fd6bf7e971930c97c91d81 Mon Sep 17 00:00:00 2001 From: Artem Fomiuk <88630083+Artemka374@users.noreply.github.com> Date: Tue, 10 Sep 2024 10:46:25 +0300 Subject: [PATCH 041/116] feat(zk_toolbox): Allow running docker images for provers (#2800) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Add `--docker` flag for `zk_inception prover run` which will allow prover components to run from docker images. ## Why ❔ To decrease setup time/improve UX ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- .github/workflows/build-docker-from-tag.yml | 16 +- docker/proof-fri-gpu-compressor/Dockerfile | 3 +- zk_toolbox/crates/zk_inception/README.md | 7 + .../src/commands/prover/args/run.rs | 139 +++++++++++- .../zk_inception/src/commands/prover/run.rs | 197 +++++++++--------- zk_toolbox/crates/zk_inception/src/consts.rs | 15 ++ .../crates/zk_inception/src/messages.rs | 1 + 7 files changed, 265 insertions(+), 113 deletions(-) diff --git a/.github/workflows/build-docker-from-tag.yml b/.github/workflows/build-docker-from-tag.yml index cd222a6e43bb..791f44117477 100644 --- a/.github/workflows/build-docker-from-tag.yml +++ b/.github/workflows/build-docker-from-tag.yml @@ -23,7 +23,7 @@ concurrency: docker-build jobs: setup: name: Setup - runs-on: [ubuntu-latest] + runs-on: [ ubuntu-latest ] outputs: image_tag_suffix: ${{ steps.set.outputs.image_tag_suffix }} prover_fri_gpu_key_id: ${{ steps.extract-prover-fri-setup-key-ids.outputs.gpu_short_commit_sha }} @@ -48,7 +48,7 @@ jobs: build-push-core-images: name: Build and push image - needs: [setup] + needs: [ setup ] uses: ./.github/workflows/build-core-template.yml if: contains(github.ref_name, 'core') secrets: @@ -60,7 +60,7 @@ jobs: build-push-tee-prover-images: name: Build and push images - needs: [setup] + needs: [ setup ] uses: ./.github/workflows/build-tee-prover-template.yml if: contains(github.ref_name, 'core') secrets: @@ -72,7 +72,7 @@ jobs: build-push-contract-verifier: name: Build and push image - needs: [setup] + needs: [ setup ] uses: ./.github/workflows/build-contract-verifier-template.yml if: contains(github.ref_name, 'contract_verifier') secrets: @@ -83,20 +83,20 @@ jobs: build-push-prover-images: name: Build and push image - needs: [setup] + needs: [ setup ] uses: ./.github/workflows/build-prover-template.yml if: contains(github.ref_name, 'prover') with: image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }} ERA_BELLMAN_CUDA_RELEASE: ${{ vars.ERA_BELLMAN_CUDA_RELEASE }} - CUDA_ARCH: "60;70;75;89" + CUDA_ARCH: "60;70;75;80;89" secrets: DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} build-push-witness-generator-image-avx512: name: Build and push image - needs: [setup] + needs: [ setup ] uses: ./.github/workflows/build-witness-generator-template.yml if: contains(github.ref_name, 'prover') with: @@ -110,7 +110,7 @@ jobs: build-gar-prover-fri-gpu: name: Build GAR prover FRI GPU - needs: [setup, build-push-prover-images] + needs: [ setup, build-push-prover-images ] uses: ./.github/workflows/build-prover-fri-gpu-gar.yml if: contains(github.ref_name, 'prover') with: diff --git a/docker/proof-fri-gpu-compressor/Dockerfile b/docker/proof-fri-gpu-compressor/Dockerfile index a3d92d113cde..45f2ffa51b04 100644 --- a/docker/proof-fri-gpu-compressor/Dockerfile +++ b/docker/proof-fri-gpu-compressor/Dockerfile @@ -4,8 +4,7 @@ FROM nvidia/cuda:12.2.0-devel-ubuntu22.04 as builder ARG DEBIAN_FRONTEND=noninteractive ARG CUDA_ARCH=89 -ARG A100_CUDA_ARCH=80 -ENV CUDAARCHS=${CUDA_ARCH};${A100_CUDA_ARCH} +ENV CUDAARCHS=${CUDA_ARCH} RUN apt-get update && apt-get install -y curl clang openssl libssl-dev gcc g++ git \ pkg-config build-essential libclang-dev && \ diff --git a/zk_toolbox/crates/zk_inception/README.md b/zk_toolbox/crates/zk_inception/README.md index 25eeff40247b..904b1421e3a0 100644 --- a/zk_toolbox/crates/zk_inception/README.md +++ b/zk_toolbox/crates/zk_inception/README.md @@ -504,12 +504,19 @@ Run prover Possible values: `gateway`, `witness-generator`, `witness-vector-generator`, `prover`, `compressor`, `prover-job-monitor` +- `--docker` - Whether to run image of the component instead of binary. + + Possible values: `true`, `false` + - `--round ` Possible values: `all-rounds`, `basic-circuits`, `leaf-aggregation`, `node-aggregation`, `recursion-tip`, `scheduler` - `--threads ` +- `--max-allocation ` - in case you are running prover component, the value limits maximum + memory allocation of it in bytes. + ## `zk_inception prover init-bellman-cuda` Initialize bellman-cuda diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/args/run.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/args/run.rs index 6bdd62c1d488..751cc48074fe 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/args/run.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/args/run.rs @@ -1,8 +1,22 @@ +use anyhow::anyhow; use clap::{Parser, ValueEnum}; use common::{Prompt, PromptSelect}; +use config::ChainConfig; use strum::{EnumIter, IntoEnumIterator}; -use crate::messages::{MSG_ROUND_SELECT_PROMPT, MSG_RUN_COMPONENT_PROMPT, MSG_THREADS_PROMPT}; +use crate::{ + consts::{ + COMPRESSOR_BINARY_NAME, COMPRESSOR_DOCKER_IMAGE, PROVER_BINARY_NAME, PROVER_DOCKER_IMAGE, + PROVER_GATEWAY_BINARY_NAME, PROVER_GATEWAY_DOCKER_IMAGE, PROVER_JOB_MONITOR_BINARY_NAME, + PROVER_JOB_MONITOR_DOCKER_IMAGE, WITNESS_GENERATOR_BINARY_NAME, + WITNESS_GENERATOR_DOCKER_IMAGE, WITNESS_VECTOR_GENERATOR_BINARY_NAME, + WITNESS_VECTOR_GENERATOR_DOCKER_IMAGE, + }, + messages::{ + MSG_ROUND_SELECT_PROMPT, MSG_RUN_COMPONENT_PROMPT, MSG_THREADS_PROMPT, + MSG_WITNESS_GENERATOR_ROUND_ERR, + }, +}; #[derive(Debug, Clone, Parser, Default)] pub struct ProverRunArgs { @@ -12,6 +26,10 @@ pub struct ProverRunArgs { pub witness_generator_args: WitnessGeneratorArgs, #[clap(flatten)] pub witness_vector_generator_args: WitnessVectorGeneratorArgs, + #[clap(flatten)] + pub fri_prover_args: FriProverRunArgs, + #[clap(long)] + pub docker: Option, } #[derive( @@ -32,6 +50,108 @@ pub enum ProverComponent { ProverJobMonitor, } +impl ProverComponent { + pub fn image_name(&self) -> &'static str { + match self { + Self::Gateway => PROVER_GATEWAY_DOCKER_IMAGE, + Self::WitnessGenerator => WITNESS_GENERATOR_DOCKER_IMAGE, + Self::WitnessVectorGenerator => WITNESS_VECTOR_GENERATOR_DOCKER_IMAGE, + Self::Prover => PROVER_DOCKER_IMAGE, + Self::Compressor => COMPRESSOR_DOCKER_IMAGE, + Self::ProverJobMonitor => PROVER_JOB_MONITOR_DOCKER_IMAGE, + } + } + + pub fn binary_name(&self) -> &'static str { + match self { + Self::Gateway => PROVER_GATEWAY_BINARY_NAME, + Self::WitnessGenerator => WITNESS_GENERATOR_BINARY_NAME, + Self::WitnessVectorGenerator => WITNESS_VECTOR_GENERATOR_BINARY_NAME, + Self::Prover => PROVER_BINARY_NAME, + Self::Compressor => COMPRESSOR_BINARY_NAME, + Self::ProverJobMonitor => PROVER_JOB_MONITOR_BINARY_NAME, + } + } + + pub fn get_application_args(&self, in_docker: bool) -> anyhow::Result> { + let mut application_args = vec![]; + + if self == &Self::Prover || self == &Self::Compressor { + if in_docker { + application_args.push("--gpus=all".to_string()); + } else { + application_args.push("--features=gpu".to_string()); + } + } + + Ok(application_args) + } + + pub fn get_additional_args( + &self, + in_docker: bool, + args: ProverRunArgs, + chain: &ChainConfig, + ) -> anyhow::Result> { + let mut additional_args = vec![]; + if in_docker { + additional_args.push("--config-path=/configs/general.yaml".to_string()); + additional_args.push("--secrets-path=/configs/secrets.yaml".to_string()); + } else { + let general_config = chain + .path_to_general_config() + .into_os_string() + .into_string() + .map_err(|_| anyhow!("Failed to convert path to string"))?; + let secrets_config = chain + .path_to_secrets_config() + .into_os_string() + .into_string() + .map_err(|_| anyhow!("Failed to convert path to string"))?; + + additional_args.push(format!("--config-path={}", general_config)); + additional_args.push(format!("--secrets-path={}", secrets_config)); + } + + match self { + Self::WitnessGenerator => { + additional_args.push( + match args + .witness_generator_args + .round + .expect(MSG_WITNESS_GENERATOR_ROUND_ERR) + { + WitnessGeneratorRound::AllRounds => "--all_rounds", + WitnessGeneratorRound::BasicCircuits => "--round=basic_circuits", + WitnessGeneratorRound::LeafAggregation => "--round=leaf_aggregation", + WitnessGeneratorRound::NodeAggregation => "--round=node_aggregation", + WitnessGeneratorRound::RecursionTip => "--round=recursion_tip", + WitnessGeneratorRound::Scheduler => "--round=scheduler", + } + .to_string(), + ); + } + Self::WitnessVectorGenerator => { + additional_args.push(format!( + "--threads={}", + args.witness_vector_generator_args.threads.unwrap_or(1) + )); + } + Self::Prover => { + if args.fri_prover_args.max_allocation.is_some() { + additional_args.push(format!( + "--max-allocation={}", + args.fri_prover_args.max_allocation.unwrap() + )); + }; + } + _ => {} + }; + + Ok(additional_args) + } +} + #[derive(Debug, Clone, Parser, Default)] pub struct WitnessGeneratorArgs { #[clap(long)] @@ -76,8 +196,15 @@ impl WitnessVectorGeneratorArgs { } } +#[derive(Debug, Clone, Parser, Default)] +pub struct FriProverRunArgs { + /// Memory allocation limit in bytes (for prover component) + #[clap(long)] + pub max_allocation: Option, +} + impl ProverRunArgs { - pub fn fill_values_with_prompt(&self) -> anyhow::Result { + pub fn fill_values_with_prompt(self) -> anyhow::Result { let component = self.component.unwrap_or_else(|| { PromptSelect::new(MSG_RUN_COMPONENT_PROMPT, ProverComponent::iter()).ask() }); @@ -90,10 +217,18 @@ impl ProverRunArgs { .witness_vector_generator_args .fill_values_with_prompt(component)?; + let docker = self.docker.unwrap_or_else(|| { + Prompt::new("Do you want to run Docker image for the component?") + .default("false") + .ask() + }); + Ok(ProverRunArgs { component: Some(component), witness_generator_args, witness_vector_generator_args, + fri_prover_args: self.fri_prover_args, + docker: Some(docker), }) } } diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/run.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/run.rs index a819c3322a89..78116e40d6c7 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/run.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/run.rs @@ -1,22 +1,21 @@ -use anyhow::Context; +use std::path::PathBuf; + +use anyhow::{anyhow, Context}; use common::{check_prerequisites, cmd::Cmd, config::global_config, logger, GPU_PREREQUISITES}; -use config::{ChainConfig, EcosystemConfig}; +use config::EcosystemConfig; use xshell::{cmd, Shell}; use super::{ - args::run::{ - ProverComponent, ProverRunArgs, WitnessGeneratorArgs, WitnessGeneratorRound, - WitnessVectorGeneratorArgs, - }, + args::run::{ProverComponent, ProverRunArgs}, utils::get_link_to_prover, }; use crate::messages::{ MSG_BELLMAN_CUDA_DIR_ERR, MSG_CHAIN_NOT_FOUND_ERR, MSG_MISSING_COMPONENT_ERR, MSG_RUNNING_COMPRESSOR, MSG_RUNNING_COMPRESSOR_ERR, MSG_RUNNING_PROVER, MSG_RUNNING_PROVER_ERR, MSG_RUNNING_PROVER_GATEWAY, MSG_RUNNING_PROVER_GATEWAY_ERR, MSG_RUNNING_PROVER_JOB_MONITOR, - MSG_RUNNING_WITNESS_GENERATOR, MSG_RUNNING_WITNESS_GENERATOR_ERR, - MSG_RUNNING_WITNESS_VECTOR_GENERATOR, MSG_RUNNING_WITNESS_VECTOR_GENERATOR_ERR, - MSG_WITNESS_GENERATOR_ROUND_ERR, + MSG_RUNNING_PROVER_JOB_MONITOR_ERR, MSG_RUNNING_WITNESS_GENERATOR, + MSG_RUNNING_WITNESS_GENERATOR_ERR, MSG_RUNNING_WITNESS_VECTOR_GENERATOR, + MSG_RUNNING_WITNESS_VECTOR_GENERATOR_ERR, }; pub(crate) async fn run(args: ProverRunArgs, shell: &Shell) -> anyhow::Result<()> { @@ -29,114 +28,110 @@ pub(crate) async fn run(args: ProverRunArgs, shell: &Shell) -> anyhow::Result<() let link_to_prover = get_link_to_prover(&ecosystem_config); shell.change_dir(link_to_prover.clone()); - match args.component { - Some(ProverComponent::Gateway) => run_gateway(shell, &chain)?, - Some(ProverComponent::WitnessGenerator) => { - run_witness_generator(shell, &chain, args.witness_generator_args)? + let component = args.component.context(anyhow!(MSG_MISSING_COMPONENT_ERR))?; + let in_docker = args.docker.unwrap_or(false); + + let application_args = component.get_application_args(in_docker)?; + let additional_args = component.get_additional_args(in_docker, args, &chain)?; + + let (message, error) = match component { + ProverComponent::WitnessGenerator => ( + MSG_RUNNING_WITNESS_GENERATOR, + MSG_RUNNING_WITNESS_GENERATOR_ERR, + ), + ProverComponent::WitnessVectorGenerator => ( + MSG_RUNNING_WITNESS_VECTOR_GENERATOR, + MSG_RUNNING_WITNESS_VECTOR_GENERATOR_ERR, + ), + ProverComponent::Prover => { + if !in_docker { + check_prerequisites(shell, &GPU_PREREQUISITES, false); + } + (MSG_RUNNING_PROVER, MSG_RUNNING_PROVER_ERR) } - Some(ProverComponent::WitnessVectorGenerator) => { - run_witness_vector_generator(shell, &chain, args.witness_vector_generator_args)? + ProverComponent::Compressor => { + if !in_docker { + check_prerequisites(shell, &GPU_PREREQUISITES, false); + shell.set_var( + "BELLMAN_CUDA_DIR", + ecosystem_config + .bellman_cuda_dir + .clone() + .expect(MSG_BELLMAN_CUDA_DIR_ERR), + ); + } + (MSG_RUNNING_COMPRESSOR, MSG_RUNNING_COMPRESSOR_ERR) } - Some(ProverComponent::Prover) => run_prover(shell, &chain)?, - Some(ProverComponent::Compressor) => run_compressor(shell, &chain, &ecosystem_config)?, - Some(ProverComponent::ProverJobMonitor) => run_prover_job_monitor(shell, &chain)?, - None => anyhow::bail!(MSG_MISSING_COMPONENT_ERR), + ProverComponent::ProverJobMonitor => ( + MSG_RUNNING_PROVER_JOB_MONITOR, + MSG_RUNNING_PROVER_JOB_MONITOR_ERR, + ), + ProverComponent::Gateway => (MSG_RUNNING_PROVER_GATEWAY, MSG_RUNNING_PROVER_GATEWAY_ERR), + }; + + if in_docker { + let path_to_configs = chain.configs.clone(); + let path_to_prover = get_link_to_prover(&ecosystem_config); + run_dockerized_component( + shell, + component.image_name(), + &application_args, + &additional_args, + message, + error, + &path_to_configs, + &path_to_prover, + )? + } else { + run_binary_component( + shell, + component.binary_name(), + &application_args, + &additional_args, + message, + error, + )? } Ok(()) } -fn run_gateway(shell: &Shell, chain: &ChainConfig) -> anyhow::Result<()> { - logger::info(MSG_RUNNING_PROVER_GATEWAY); - let config_path = chain.path_to_general_config(); - let secrets_path = chain.path_to_secrets_config(); - - let mut cmd = Cmd::new(cmd!(shell, "cargo run --release --bin zksync_prover_fri_gateway -- --config-path={config_path} --secrets-path={secrets_path}")); - cmd = cmd.with_force_run(); - cmd.run().context(MSG_RUNNING_PROVER_GATEWAY_ERR) -} - -fn run_witness_generator( +#[allow(clippy::too_many_arguments)] +fn run_dockerized_component( shell: &Shell, - chain: &ChainConfig, - args: WitnessGeneratorArgs, + image_name: &str, + application_args: &[String], + args: &[String], + message: &'static str, + error: &'static str, + path_to_configs: &PathBuf, + path_to_prover: &PathBuf, ) -> anyhow::Result<()> { - logger::info(MSG_RUNNING_WITNESS_GENERATOR); - let config_path = chain.path_to_general_config(); - let secrets_path = chain.path_to_secrets_config(); - let round = args.round.expect(MSG_WITNESS_GENERATOR_ROUND_ERR); + logger::info(message); - let round_str = match round { - WitnessGeneratorRound::AllRounds => "--all_rounds", - WitnessGeneratorRound::BasicCircuits => "--round=basic_circuits", - WitnessGeneratorRound::LeafAggregation => "--round=leaf_aggregation", - WitnessGeneratorRound::NodeAggregation => "--round=node_aggregation", - WitnessGeneratorRound::RecursionTip => "--round=recursion_tip", - WitnessGeneratorRound::Scheduler => "--round=scheduler", - }; + let mut cmd = Cmd::new(cmd!( + shell, + "docker run --net=host -v {path_to_prover}/data/keys:/prover/data/keys -v {path_to_prover}/artifacts:/artifacts -v {path_to_configs}:/configs {application_args...} {image_name} {args...}" + )); - let mut cmd = Cmd::new(cmd!(shell, "cargo run --release --bin zksync_witness_generator -- {round_str} --config-path={config_path} --secrets-path={secrets_path}")); cmd = cmd.with_force_run(); - cmd.run().context(MSG_RUNNING_WITNESS_GENERATOR_ERR) + cmd.run().context(error) } -fn run_witness_vector_generator( +fn run_binary_component( shell: &Shell, - chain: &ChainConfig, - args: WitnessVectorGeneratorArgs, + binary_name: &str, + application_args: &[String], + args: &[String], + message: &'static str, + error: &'static str, ) -> anyhow::Result<()> { - logger::info(MSG_RUNNING_WITNESS_VECTOR_GENERATOR); - let config_path = chain.path_to_general_config(); - let secrets_path = chain.path_to_secrets_config(); - - let threads = args.threads.unwrap_or(1).to_string(); - let mut cmd = Cmd::new(cmd!(shell, "cargo run --release --bin zksync_witness_vector_generator -- --config-path={config_path} --secrets-path={secrets_path} --threads={threads}")); - cmd = cmd.with_force_run(); - cmd.run().context(MSG_RUNNING_WITNESS_VECTOR_GENERATOR_ERR) -} - -fn run_prover(shell: &Shell, chain: &ChainConfig) -> anyhow::Result<()> { - check_prerequisites(shell, &GPU_PREREQUISITES, false); - logger::info(MSG_RUNNING_PROVER); - let config_path = chain.path_to_general_config(); - let secrets_path = chain.path_to_secrets_config(); - - let mut cmd = Cmd::new( - cmd!(shell, "cargo run --features gpu --release --bin zksync_prover_fri -- --config-path={config_path} --secrets-path={secrets_path}"), - ); - cmd = cmd.with_force_run(); - cmd.run().context(MSG_RUNNING_PROVER_ERR) -} - -fn run_compressor( - shell: &Shell, - chain: &ChainConfig, - ecosystem: &EcosystemConfig, -) -> anyhow::Result<()> { - check_prerequisites(shell, &GPU_PREREQUISITES, false); - logger::info(MSG_RUNNING_COMPRESSOR); - let config_path = chain.path_to_general_config(); - let secrets_path = chain.path_to_secrets_config(); - - shell.set_var( - "BELLMAN_CUDA_DIR", - ecosystem - .bellman_cuda_dir - .clone() - .expect(MSG_BELLMAN_CUDA_DIR_ERR), - ); - - let mut cmd = Cmd::new(cmd!(shell, "cargo run --features gpu --release --bin zksync_proof_fri_compressor -- --config-path={config_path} --secrets-path={secrets_path}")); - cmd = cmd.with_force_run(); - cmd.run().context(MSG_RUNNING_COMPRESSOR_ERR) -} - -fn run_prover_job_monitor(shell: &Shell, chain: &ChainConfig) -> anyhow::Result<()> { - logger::info(MSG_RUNNING_PROVER_JOB_MONITOR); - let config_path = chain.path_to_general_config(); - let secrets_path = chain.path_to_secrets_config(); + logger::info(message); - let mut cmd = Cmd::new(cmd!(shell, "cargo run --release --bin zksync_prover_job_monitor -- --config-path={config_path} --secrets-path={secrets_path}")); + let mut cmd = Cmd::new(cmd!( + shell, + "cargo run {application_args...} --release --bin {binary_name} -- {args...}" + )); cmd = cmd.with_force_run(); - cmd.run().context(MSG_RUNNING_PROVER_JOB_MONITOR) + cmd.run().context(error) } diff --git a/zk_toolbox/crates/zk_inception/src/consts.rs b/zk_toolbox/crates/zk_inception/src/consts.rs index 7db976c61033..72c8948a65d1 100644 --- a/zk_toolbox/crates/zk_inception/src/consts.rs +++ b/zk_toolbox/crates/zk_inception/src/consts.rs @@ -15,3 +15,18 @@ pub const EXPLORER_APP_DOCKER_IMAGE: &str = "matterlabs/block-explorer-app"; /// Path to the JS runtime config for the dapp-portal docker container to be mounted to pub const PORTAL_DOCKER_CONFIG_PATH: &str = "/usr/src/app/dist/config.js"; pub const PORTAL_DOCKER_IMAGE: &str = "matterlabs/dapp-portal"; + +pub const PROVER_GATEWAY_DOCKER_IMAGE: &str = "matterlabs/prover-fri-gateway:latest2.0"; +pub const WITNESS_GENERATOR_DOCKER_IMAGE: &str = "matterlabs/witness-generator:latest2.0"; +pub const WITNESS_VECTOR_GENERATOR_DOCKER_IMAGE: &str = + "matterlabs/witness-vector-generator:latest2.0"; +pub const PROVER_DOCKER_IMAGE: &str = "matterlabs/prover-gpu-fri:latest2.0"; +pub const COMPRESSOR_DOCKER_IMAGE: &str = "matterlabs/proof-fri-gpu-compressor:latest2.0"; +pub const PROVER_JOB_MONITOR_DOCKER_IMAGE: &str = "matterlabs/prover-job-monitor:latest2.0"; + +pub const PROVER_GATEWAY_BINARY_NAME: &str = "zksync_prover_fri_gateway"; +pub const WITNESS_GENERATOR_BINARY_NAME: &str = "zksync_witness_generator"; +pub const WITNESS_VECTOR_GENERATOR_BINARY_NAME: &str = "zksync_witness_vector_generator"; +pub const PROVER_BINARY_NAME: &str = "zksync_prover_fri"; +pub const COMPRESSOR_BINARY_NAME: &str = "zksync_proof_fri_compressor"; +pub const PROVER_JOB_MONITOR_BINARY_NAME: &str = "zksync_prover_job_monitor"; diff --git a/zk_toolbox/crates/zk_inception/src/messages.rs b/zk_toolbox/crates/zk_inception/src/messages.rs index 99af684010a9..6f94a7b102a4 100644 --- a/zk_toolbox/crates/zk_inception/src/messages.rs +++ b/zk_toolbox/crates/zk_inception/src/messages.rs @@ -311,6 +311,7 @@ pub(super) const MSG_GENERATING_SK_SPINNER: &str = "Generating setup keys..."; pub(super) const MSG_SK_GENERATED: &str = "Setup keys generated successfully"; pub(super) const MSG_MISSING_COMPONENT_ERR: &str = "Missing component"; pub(super) const MSG_RUNNING_PROVER_GATEWAY: &str = "Running gateway"; +pub(super) const MSG_RUNNING_PROVER_JOB_MONITOR_ERR: &str = "Failed to run prover job monitor"; pub(super) const MSG_RUNNING_PROVER_JOB_MONITOR: &str = "Running prover job monitor"; pub(super) const MSG_RUNNING_WITNESS_GENERATOR: &str = "Running witness generator"; pub(super) const MSG_RUNNING_WITNESS_VECTOR_GENERATOR: &str = "Running witness vector generator"; From b8925ddacb7d86081d90c86933502e524da588e1 Mon Sep 17 00:00:00 2001 From: perekopskiy <53865202+perekopskiy@users.noreply.github.com> Date: Tue, 10 Sep 2024 11:16:20 +0300 Subject: [PATCH 042/116] chore: Add README for verified sources fetcher (#2829) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Adds README for verified sources fetcher ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- core/bin/verified_sources_fetcher/README.md | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 core/bin/verified_sources_fetcher/README.md diff --git a/core/bin/verified_sources_fetcher/README.md b/core/bin/verified_sources_fetcher/README.md new file mode 100644 index 000000000000..0abddb7a8843 --- /dev/null +++ b/core/bin/verified_sources_fetcher/README.md @@ -0,0 +1,4 @@ +# Verified sources fetcher + +This tool downloads verified contract sources from SQL database from `contract_verification_requests` table. Then it +saves sources and compilation settings to files. From fb57d05ec7e1c782863b018c23814b138b1f13a3 Mon Sep 17 00:00:00 2001 From: Igor Aleksanov Date: Tue, 10 Sep 2024 13:24:50 +0400 Subject: [PATCH 043/116] chore(ci): Limit tokio/rayon pools for zk_toolbox CI (#2828) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Limits rayon threadpool size to 2 and tokio threadpool size to 4 in zk_toolbox CI. I have checked locally, and with this configuration time to run integration tests is pretty close to the default configuration. ## Why ❔ By default, both tokio and rayon will try to use all the CPUs. When we run multiple Rust binaries at the same time (3 servers and 3 ENs in our case), it causes a lot of conflict for resources, regardless of the number of CPUs. --- .github/workflows/ci-zk-toolbox-reusable.yml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci-zk-toolbox-reusable.yml b/.github/workflows/ci-zk-toolbox-reusable.yml index 5f82df646c13..78e1e485cafc 100644 --- a/.github/workflows/ci-zk-toolbox-reusable.yml +++ b/.github/workflows/ci-zk-toolbox-reusable.yml @@ -4,6 +4,11 @@ on: env: CLICOLOR: 1 + # We run multiple binaries in parallel, and by default they will try to utilize all the + # available CPUs. In tests, there is not much CPU-intensive work (rayon), but a lot of + # async work (tokio), so we prioritize tokio. + TOKIO_WORKER_THREADS: 4 + RAYON_NUM_THREADS: 2 jobs: lint: @@ -11,7 +16,7 @@ jobs: uses: ./.github/workflows/ci-core-lint-reusable.yml tests: - runs-on: [ matterlabs-ci-runner ] + runs-on: [ matterlabs-ci-runner-ultra-performance ] steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 with: From 92dde039ee8a0bc08e2019b7fa6f243a34d9816f Mon Sep 17 00:00:00 2001 From: Grzegorz Prusak Date: Tue, 10 Sep 2024 12:08:53 +0200 Subject: [PATCH 044/116] feat: attester committees data extractor (BFT-434) (#2684) Extraction of the attester committee from consensus registry state. If consensus registry address is not specified, we fall back to attester committee from genesis. This pr does NOT enable the dynamic attestation, as the registry address needs to be added to the main node config first. --------- Co-authored-by: Moshe Shababo <17073733+moshababo@users.noreply.github.com> Co-authored-by: Igor Aleksanov --- Cargo.lock | 67 ++-- Cargo.toml | 20 +- core/bin/external_node/src/node_builder.rs | 8 +- core/lib/config/src/configs/consensus.rs | 4 +- core/lib/config/src/testonly.rs | 19 +- ...9ed36420c15081ff5f60da0a1c769c2dbc542.json | 20 - ...38ff77f5a0d5dfcc7c3f1519f882d2fa60634.json | 26 ++ ...b22abb03e2a9b2d3b53dc858e5c5a08bfb85.json} | 5 +- ...f2ede5e22b0bbd8bc910cb36a91ed992bde1.json} | 4 +- ...48aa4e4a804519bcf68e14c5bbb0f58939da1.json | 22 ++ ...37978579ba22eec525912c4aeeb235c3b984c.json | 20 - ...987e056c2bf423054e40236aba60f4d3b8a97.json | 20 + ...6d2b2e3f2497e6b5f26c72e220397cc0e62f7.json | 15 + ...d_l1_batches_consensus_committees.down.sql | 3 + ...add_l1_batches_consensus_committees.up.sql | 8 + core/lib/dal/src/consensus/mod.rs | 51 +++ core/lib/dal/src/consensus/proto/mod.proto | 10 + core/lib/dal/src/consensus_dal.rs | 356 ++++++++++++------ core/lib/env_config/src/contracts.rs | 1 + core/lib/protobuf_config/src/consensus.rs | 9 +- .../src/proto/core/consensus.proto | 2 + core/lib/types/src/api/en.rs | 13 + core/lib/web3_decl/src/error.rs | 23 +- core/lib/web3_decl/src/namespaces/en.rs | 3 + .../src/execution_sandbox/execute.rs | 6 +- .../api_server/src/execution_sandbox/mod.rs | 10 +- .../src/execution_sandbox/tracers.rs | 2 +- core/node/api_server/src/tx_sender/mod.rs | 62 +-- .../web3/backend_jsonrpsee/namespaces/en.rs | 6 + .../node/api_server/src/web3/namespaces/en.rs | 41 +- core/node/consensus/Cargo.toml | 13 +- core/node/consensus/src/abi.rs | 133 +++++++ core/node/consensus/src/config.rs | 19 +- core/node/consensus/src/en.rs | 164 +++++--- core/node/consensus/src/era.rs | 4 +- core/node/consensus/src/lib.rs | 3 + core/node/consensus/src/mn.rs | 67 ++-- core/node/consensus/src/registry/abi.rs | 225 +++++++++++ core/node/consensus/src/registry/mod.rs | 80 ++++ core/node/consensus/src/registry/testonly.rs | 118 ++++++ core/node/consensus/src/registry/tests.rs | 91 +++++ core/node/consensus/src/storage/connection.rs | 105 ++++-- core/node/consensus/src/storage/store.rs | 5 +- core/node/consensus/src/storage/testonly.rs | 30 +- core/node/consensus/src/testonly.rs | 81 ++-- core/node/consensus/src/tests/attestation.rs | 160 +++++--- core/node/consensus/src/tests/batch.rs | 12 +- core/node/consensus/src/tests/mod.rs | 58 +-- core/node/consensus/src/vm.rs | 96 +++++ core/node/node_framework/Cargo.toml | 1 + .../layers/consensus/external_node.rs | 4 + core/node/node_sync/src/client.rs | 19 - core/node/node_sync/src/testonly.rs | 12 - core/node/state_keeper/src/testonly/mod.rs | 2 +- prover/Cargo.lock | 28 +- zk_toolbox/Cargo.lock | 16 +- zk_toolbox/Cargo.toml | 2 +- 57 files changed, 1845 insertions(+), 559 deletions(-) delete mode 100644 core/lib/dal/.sqlx/query-14c0caee921199f799400dbea719ed36420c15081ff5f60da0a1c769c2dbc542.json create mode 100644 core/lib/dal/.sqlx/query-17c760825deaa18fc8862c950dc38ff77f5a0d5dfcc7c3f1519f882d2fa60634.json rename core/lib/dal/.sqlx/{query-f87c50d37f78d6b3c5a752ea88799a1f6ee5a046ece2ef949aee7ab3d2549975.json => query-1f43731fa0736a91ba8da41f52b6b22abb03e2a9b2d3b53dc858e5c5a08bfb85.json} (51%) rename core/lib/dal/.sqlx/{query-d9d71913a116abf390c71f5229426306b02e328d7b1b69c495443bd2ca7f7510.json => query-311d0357c22163b893dc91f2b080f2ede5e22b0bbd8bc910cb36a91ed992bde1.json} (58%) create mode 100644 core/lib/dal/.sqlx/query-65bbf852d677e1f00a04785374148aa4e4a804519bcf68e14c5bbb0f58939da1.json delete mode 100644 core/lib/dal/.sqlx/query-849d54b4cf9212010fb4e41ce8137978579ba22eec525912c4aeeb235c3b984c.json create mode 100644 core/lib/dal/.sqlx/query-883be3789994eac050df85056e4987e056c2bf423054e40236aba60f4d3b8a97.json create mode 100644 core/lib/dal/.sqlx/query-b313ab2b1e0a83136a202ea758c6d2b2e3f2497e6b5f26c72e220397cc0e62f7.json create mode 100644 core/lib/dal/migrations/20240829123456_add_l1_batches_consensus_committees.down.sql create mode 100644 core/lib/dal/migrations/20240829123456_add_l1_batches_consensus_committees.up.sql create mode 100644 core/node/consensus/src/abi.rs create mode 100644 core/node/consensus/src/registry/abi.rs create mode 100644 core/node/consensus/src/registry/mod.rs create mode 100644 core/node/consensus/src/registry/testonly.rs create mode 100644 core/node/consensus/src/registry/tests.rs create mode 100644 core/node/consensus/src/vm.rs diff --git a/Cargo.lock b/Cargo.lock index 2d6263f7ab4e..d5abe5c3b151 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -778,9 +778,9 @@ dependencies = [ [[package]] name = "build_html" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3108fe6fe7ac796fb7625bdde8fa2b67b5a7731496251ca57c7b8cadd78a16a1" +checksum = "225eb82ce9e70dcc0cfa6e404d0f353326b6e163bf500ec4711cec317d11935c" [[package]] name = "bumpalo" @@ -5575,9 +5575,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.20" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "836fa6a3e1e547f9a2c4040802ec865b5d85f4014efe00555d7090a3dcaa1090" +checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" dependencies = [ "serde", ] @@ -5704,9 +5704,9 @@ checksum = "a3f0bf26fd526d2a95683cd0f87bf103b8539e2ca1ef48ce002d67aad59aa0b4" [[package]] name = "serde" -version = "1.0.189" +version = "1.0.208" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e422a44e74ad4001bdc8eede9a4570ab52f71190e9c076d14369f38b9200537" +checksum = "cff085d2cb684faa248efb494c39b68e522822ac0de72ccf08109abde717cfb2" dependencies = [ "serde_derive", ] @@ -5723,9 +5723,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.189" +version = "1.0.208" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e48d1f918009ce3145511378cf68d613e3b3d9137d67272562080d68a2b32d5" +checksum = "24008e81ff7613ed8e5ba0cfaf24e2c2f1e5b8a0495711e44fcd4882fca62bcf" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.36", @@ -8188,9 +8188,9 @@ dependencies = [ [[package]] name = "zksync_concurrency" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0e31a9fc9a390b440cd12bbe040330dc64f64697a8a8ecbc3beb98cd0747909" +checksum = "a49ad68bfaf6fb8542c68894b68b28be31514786549855aaa8a46b36defbb100" dependencies = [ "anyhow", "once_cell", @@ -8225,9 +8225,9 @@ dependencies = [ [[package]] name = "zksync_consensus_bft" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e22e3bfe96fa30a57313e774a5e8c74ffee884abff57ecacc10e8832315ee8a2" +checksum = "b1dcab481683131c093271c19602bd495b1d682f7a94f764f2227111a0a104f0" dependencies = [ "anyhow", "async-trait", @@ -8247,9 +8247,9 @@ dependencies = [ [[package]] name = "zksync_consensus_crypto" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efb7ff3ec44b7b92fd4e28d9d92b83d61dc74125ccfc90bcfb27a5750d8a8580" +checksum = "ace39bdf50b8421c4d546381fe1ecc5212f953ce61cf93d4fa69172078dbe4af" dependencies = [ "anyhow", "blst", @@ -8271,13 +8271,14 @@ dependencies = [ [[package]] name = "zksync_consensus_executor" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7fcde1275970a6b8a33ea2ade5cc994d6392f95509ce374e0e7a26cde4cd6db" +checksum = "216e3d9f3df8c119e037e44c41db12fa6448dafbf1eaf5015d13b22400866980" dependencies = [ "anyhow", "async-trait", "rand 0.8.5", + "semver", "tracing", "vise", "zksync_concurrency", @@ -8292,9 +8293,9 @@ dependencies = [ [[package]] name = "zksync_consensus_network" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6ee48bee7dae8adb2769c7315adde1780832d05ecb6a77c08cdda53a315992a" +checksum = "19d7dd832b1bbcd0a2b977b2d85986437105fd5e1e82bd4becb2e6a9db112655" dependencies = [ "anyhow", "async-trait", @@ -8309,6 +8310,7 @@ dependencies = [ "pin-project", "prost 0.12.1", "rand 0.8.5", + "semver", "snow", "thiserror", "tls-listener", @@ -8327,9 +8329,9 @@ dependencies = [ [[package]] name = "zksync_consensus_roles" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72223c0b20621775db51bcc4b043addafeaf784d444af2ad4bc8bcdee477367c" +checksum = "06277266e31efdc1465f6a27ce96c7435392a270978a91956b8a848732df2cfa" dependencies = [ "anyhow", "bit-vec", @@ -8349,9 +8351,9 @@ dependencies = [ [[package]] name = "zksync_consensus_storage" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41d1750ad93f7e3a0c2f5880f9bcc1244a3b46d3e6c124c4f65f545032b87464" +checksum = "9099b2295f550176d824b5287f2f31b7739c4d10247faec1132f1c6e9d18059c" dependencies = [ "anyhow", "async-trait", @@ -8369,9 +8371,9 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ff679f8b5f671d887a750b8107f3b5c01fd6085f68eef37ab01de8d2bd0736b" +checksum = "4d624f55e2449f43b2c85588b5dd2a28b3c5ea629effc89df76e3254f8d9d2fb" dependencies = [ "anyhow", "rand 0.8.5", @@ -9052,13 +9054,17 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", + "hex", + "jsonrpsee", "rand 0.8.5", "secrecy", + "semver", "tempfile", "test-casing", "thiserror", "tokio", "tracing", + "zksync_basic_types", "zksync_concurrency", "zksync_config", "zksync_consensus_bft", @@ -9073,16 +9079,20 @@ dependencies = [ "zksync_l1_contract_interface", "zksync_merkle_tree", "zksync_metadata_calculator", + "zksync_multivm", "zksync_node_api_server", "zksync_node_genesis", "zksync_node_sync", "zksync_node_test_utils", "zksync_protobuf", + "zksync_state", "zksync_state_keeper", + "zksync_storage", "zksync_system_constants", "zksync_test_account", "zksync_types", "zksync_utils", + "zksync_vm_interface", "zksync_web3_decl", ] @@ -9139,6 +9149,7 @@ dependencies = [ "ctrlc", "futures 0.3.28", "pin-project-lite", + "semver", "thiserror", "tokio", "tracing", @@ -9341,9 +9352,9 @@ dependencies = [ [[package]] name = "zksync_protobuf" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4f6ba3bf0aac20de18b4ae18a22d8c81b83f8f72e8fdec1c879525ecdacd2f5" +checksum = "d26fb2beb3aeafb5e9babf1acf6494662cc7157b893fa248bd151494f931d07f" dependencies = [ "anyhow", "bit-vec", @@ -9362,9 +9373,9 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7798c248b9a64505f0586bd5fadad6b26c999be4a8dec6b1a86b10b3888169c5" +checksum = "58e86c198e056d921b4f3f1d2755c23d090e942b5a70b03bcb7e7c02445aa491" dependencies = [ "anyhow", "heck 0.5.0", diff --git a/Cargo.toml b/Cargo.toml index d244d436b9f5..075f5007be4c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -218,16 +218,16 @@ zk_evm_1_5_0 = { package = "zk_evm", version = "=0.150.4" } vm2 = { git = "https://github.com/matter-labs/vm2.git", rev = "4ef15d46410ffc11744771a3a6c7c09dd9470c90" } # Consensus dependencies. -zksync_concurrency = "=0.1.0-rc.11" -zksync_consensus_bft = "=0.1.0-rc.11" -zksync_consensus_crypto = "=0.1.0-rc.11" -zksync_consensus_executor = "=0.1.0-rc.11" -zksync_consensus_network = "=0.1.0-rc.11" -zksync_consensus_roles = "=0.1.0-rc.11" -zksync_consensus_storage = "=0.1.0-rc.11" -zksync_consensus_utils = "=0.1.0-rc.11" -zksync_protobuf = "=0.1.0-rc.11" -zksync_protobuf_build = "=0.1.0-rc.11" +zksync_concurrency = "=0.1.0-rc.12" +zksync_consensus_bft = "=0.1.0-rc.12" +zksync_consensus_crypto = "=0.1.0-rc.12" +zksync_consensus_executor = "=0.1.0-rc.12" +zksync_consensus_network = "=0.1.0-rc.12" +zksync_consensus_roles = "=0.1.0-rc.12" +zksync_consensus_storage = "=0.1.0-rc.12" +zksync_consensus_utils = "=0.1.0-rc.12" +zksync_protobuf = "=0.1.0-rc.12" +zksync_protobuf_build = "=0.1.0-rc.12" # "Local" dependencies zksync_multivm = { version = "0.1.0", path = "core/lib/multivm" } diff --git a/core/bin/external_node/src/node_builder.rs b/core/bin/external_node/src/node_builder.rs index c30cc1a432bb..7b94ca7a0c2a 100644 --- a/core/bin/external_node/src/node_builder.rs +++ b/core/bin/external_node/src/node_builder.rs @@ -242,7 +242,13 @@ impl ExternalNodeBuilder { let config = self.config.consensus.clone(); let secrets = config::read_consensus_secrets().context("config::read_consensus_secrets()")?; - let layer = ExternalNodeConsensusLayer { config, secrets }; + let layer = ExternalNodeConsensusLayer { + build_version: crate::metadata::SERVER_VERSION + .parse() + .context("CRATE_VERSION.parse()")?, + config, + secrets, + }; self.node.add_layer(layer); Ok(self) } diff --git a/core/lib/config/src/configs/consensus.rs b/core/lib/config/src/configs/consensus.rs index 50885a6ec6fe..e5e01f880feb 100644 --- a/core/lib/config/src/configs/consensus.rs +++ b/core/lib/config/src/configs/consensus.rs @@ -1,7 +1,7 @@ use std::collections::{BTreeMap, BTreeSet}; use secrecy::{ExposeSecret as _, Secret}; -use zksync_basic_types::L2ChainId; +use zksync_basic_types::{ethabi, L2ChainId}; use zksync_concurrency::{limiter, time}; /// `zksync_consensus_crypto::TextFmt` representation of `zksync_consensus_roles::validator::PublicKey`. @@ -89,6 +89,8 @@ pub struct GenesisSpec { /// Leader of the committee. Represents /// `zksync_consensus_roles::validator::LeaderSelectionMode::Sticky`. pub leader: ValidatorPublicKey, + /// Address of the registry contract. + pub registry_address: Option, } #[derive(Clone, Debug, PartialEq, Default)] diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index 028b5e38055f..bc3b6025b15a 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -243,17 +243,17 @@ impl Distribution for EncodeDist { default_upgrade_addr: rng.gen(), diamond_proxy_addr: rng.gen(), validator_timelock_addr: rng.gen(), - l1_erc20_bridge_proxy_addr: rng.gen(), - l2_erc20_bridge_addr: rng.gen(), - l1_shared_bridge_proxy_addr: rng.gen(), - l2_shared_bridge_addr: rng.gen(), - l1_weth_bridge_proxy_addr: rng.gen(), - l2_weth_bridge_addr: rng.gen(), - l2_testnet_paymaster_addr: rng.gen(), + l1_erc20_bridge_proxy_addr: self.sample_opt(|| rng.gen()), + l2_erc20_bridge_addr: self.sample_opt(|| rng.gen()), + l1_shared_bridge_proxy_addr: self.sample_opt(|| rng.gen()), + l2_shared_bridge_addr: self.sample_opt(|| rng.gen()), + l1_weth_bridge_proxy_addr: self.sample_opt(|| rng.gen()), + l2_weth_bridge_addr: self.sample_opt(|| rng.gen()), + l2_testnet_paymaster_addr: self.sample_opt(|| rng.gen()), l1_multicall3_addr: rng.gen(), - base_token_addr: rng.gen(), - chain_admin_addr: rng.gen(), ecosystem_contracts: self.sample(rng), + base_token_addr: self.sample_opt(|| rng.gen()), + chain_admin_addr: self.sample_opt(|| rng.gen()), } } } @@ -777,6 +777,7 @@ impl Distribution for EncodeDist { validators: self.sample_collect(rng), attesters: self.sample_collect(rng), leader: ValidatorPublicKey(self.sample(rng)), + registry_address: self.sample_opt(|| rng.gen()), } } } diff --git a/core/lib/dal/.sqlx/query-14c0caee921199f799400dbea719ed36420c15081ff5f60da0a1c769c2dbc542.json b/core/lib/dal/.sqlx/query-14c0caee921199f799400dbea719ed36420c15081ff5f60da0a1c769c2dbc542.json deleted file mode 100644 index 3baa610d7d78..000000000000 --- a/core/lib/dal/.sqlx/query-14c0caee921199f799400dbea719ed36420c15081ff5f60da0a1c769c2dbc542.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n genesis\n FROM\n consensus_replica_state\n WHERE\n fake_key\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "genesis", - "type_info": "Jsonb" - } - ], - "parameters": { - "Left": [] - }, - "nullable": [ - true - ] - }, - "hash": "14c0caee921199f799400dbea719ed36420c15081ff5f60da0a1c769c2dbc542" -} diff --git a/core/lib/dal/.sqlx/query-17c760825deaa18fc8862c950dc38ff77f5a0d5dfcc7c3f1519f882d2fa60634.json b/core/lib/dal/.sqlx/query-17c760825deaa18fc8862c950dc38ff77f5a0d5dfcc7c3f1519f882d2fa60634.json new file mode 100644 index 000000000000..28a1e54230d8 --- /dev/null +++ b/core/lib/dal/.sqlx/query-17c760825deaa18fc8862c950dc38ff77f5a0d5dfcc7c3f1519f882d2fa60634.json @@ -0,0 +1,26 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n genesis,\n global_config\n FROM\n consensus_replica_state\n WHERE\n fake_key\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "genesis", + "type_info": "Jsonb" + }, + { + "ordinal": 1, + "name": "global_config", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + true, + true + ] + }, + "hash": "17c760825deaa18fc8862c950dc38ff77f5a0d5dfcc7c3f1519f882d2fa60634" +} diff --git a/core/lib/dal/.sqlx/query-f87c50d37f78d6b3c5a752ea88799a1f6ee5a046ece2ef949aee7ab3d2549975.json b/core/lib/dal/.sqlx/query-1f43731fa0736a91ba8da41f52b6b22abb03e2a9b2d3b53dc858e5c5a08bfb85.json similarity index 51% rename from core/lib/dal/.sqlx/query-f87c50d37f78d6b3c5a752ea88799a1f6ee5a046ece2ef949aee7ab3d2549975.json rename to core/lib/dal/.sqlx/query-1f43731fa0736a91ba8da41f52b6b22abb03e2a9b2d3b53dc858e5c5a08bfb85.json index 38b88c316eef..3817369ecc16 100644 --- a/core/lib/dal/.sqlx/query-f87c50d37f78d6b3c5a752ea88799a1f6ee5a046ece2ef949aee7ab3d2549975.json +++ b/core/lib/dal/.sqlx/query-1f43731fa0736a91ba8da41f52b6b22abb03e2a9b2d3b53dc858e5c5a08bfb85.json @@ -1,15 +1,16 @@ { "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n consensus_replica_state (fake_key, genesis, state)\n VALUES\n (TRUE, $1, $2)\n ", + "query": "\n INSERT INTO\n consensus_replica_state (fake_key, global_config, genesis, state)\n VALUES\n (TRUE, $1, $2, $3)\n ", "describe": { "columns": [], "parameters": { "Left": [ + "Jsonb", "Jsonb", "Jsonb" ] }, "nullable": [] }, - "hash": "f87c50d37f78d6b3c5a752ea88799a1f6ee5a046ece2ef949aee7ab3d2549975" + "hash": "1f43731fa0736a91ba8da41f52b6b22abb03e2a9b2d3b53dc858e5c5a08bfb85" } diff --git a/core/lib/dal/.sqlx/query-d9d71913a116abf390c71f5229426306b02e328d7b1b69c495443bd2ca7f7510.json b/core/lib/dal/.sqlx/query-311d0357c22163b893dc91f2b080f2ede5e22b0bbd8bc910cb36a91ed992bde1.json similarity index 58% rename from core/lib/dal/.sqlx/query-d9d71913a116abf390c71f5229426306b02e328d7b1b69c495443bd2ca7f7510.json rename to core/lib/dal/.sqlx/query-311d0357c22163b893dc91f2b080f2ede5e22b0bbd8bc910cb36a91ed992bde1.json index a42fbe98ff2f..cabe0a3dc557 100644 --- a/core/lib/dal/.sqlx/query-d9d71913a116abf390c71f5229426306b02e328d7b1b69c495443bd2ca7f7510.json +++ b/core/lib/dal/.sqlx/query-311d0357c22163b893dc91f2b080f2ede5e22b0bbd8bc910cb36a91ed992bde1.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n l1_batches_consensus (l1_batch_number, certificate, created_at, updated_at)\n VALUES\n ($1, $2, NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO NOTHING\n ", + "query": "\n INSERT INTO\n l1_batches_consensus (l1_batch_number, certificate, updated_at, created_at)\n VALUES\n ($1, $2, NOW(), NOW())\n ", "describe": { "columns": [], "parameters": { @@ -11,5 +11,5 @@ }, "nullable": [] }, - "hash": "d9d71913a116abf390c71f5229426306b02e328d7b1b69c495443bd2ca7f7510" + "hash": "311d0357c22163b893dc91f2b080f2ede5e22b0bbd8bc910cb36a91ed992bde1" } diff --git a/core/lib/dal/.sqlx/query-65bbf852d677e1f00a04785374148aa4e4a804519bcf68e14c5bbb0f58939da1.json b/core/lib/dal/.sqlx/query-65bbf852d677e1f00a04785374148aa4e4a804519bcf68e14c5bbb0f58939da1.json new file mode 100644 index 000000000000..ec17f2e0b61b --- /dev/null +++ b/core/lib/dal/.sqlx/query-65bbf852d677e1f00a04785374148aa4e4a804519bcf68e14c5bbb0f58939da1.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n attesters\n FROM\n l1_batches_consensus_committees\n WHERE\n l1_batch_number = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "attesters", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false + ] + }, + "hash": "65bbf852d677e1f00a04785374148aa4e4a804519bcf68e14c5bbb0f58939da1" +} diff --git a/core/lib/dal/.sqlx/query-849d54b4cf9212010fb4e41ce8137978579ba22eec525912c4aeeb235c3b984c.json b/core/lib/dal/.sqlx/query-849d54b4cf9212010fb4e41ce8137978579ba22eec525912c4aeeb235c3b984c.json deleted file mode 100644 index 5130763af73c..000000000000 --- a/core/lib/dal/.sqlx/query-849d54b4cf9212010fb4e41ce8137978579ba22eec525912c4aeeb235c3b984c.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n MAX(l1_batch_number) AS \"number\"\n FROM\n l1_batches_consensus\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "number", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [] - }, - "nullable": [ - null - ] - }, - "hash": "849d54b4cf9212010fb4e41ce8137978579ba22eec525912c4aeeb235c3b984c" -} diff --git a/core/lib/dal/.sqlx/query-883be3789994eac050df85056e4987e056c2bf423054e40236aba60f4d3b8a97.json b/core/lib/dal/.sqlx/query-883be3789994eac050df85056e4987e056c2bf423054e40236aba60f4d3b8a97.json new file mode 100644 index 000000000000..a59468bd516c --- /dev/null +++ b/core/lib/dal/.sqlx/query-883be3789994eac050df85056e4987e056c2bf423054e40236aba60f4d3b8a97.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n l1_batch_number\n FROM\n l1_batches_consensus\n ORDER BY\n l1_batch_number DESC\n LIMIT\n 1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false + ] + }, + "hash": "883be3789994eac050df85056e4987e056c2bf423054e40236aba60f4d3b8a97" +} diff --git a/core/lib/dal/.sqlx/query-b313ab2b1e0a83136a202ea758c6d2b2e3f2497e6b5f26c72e220397cc0e62f7.json b/core/lib/dal/.sqlx/query-b313ab2b1e0a83136a202ea758c6d2b2e3f2497e6b5f26c72e220397cc0e62f7.json new file mode 100644 index 000000000000..356fd8e9d999 --- /dev/null +++ b/core/lib/dal/.sqlx/query-b313ab2b1e0a83136a202ea758c6d2b2e3f2497e6b5f26c72e220397cc0e62f7.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n l1_batches_consensus_committees (l1_batch_number, attesters, updated_at)\n VALUES\n ($1, $2, NOW())\n ON CONFLICT (l1_batch_number) DO\n UPDATE\n SET\n l1_batch_number = $1,\n attesters = $2,\n updated_at = NOW()\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Jsonb" + ] + }, + "nullable": [] + }, + "hash": "b313ab2b1e0a83136a202ea758c6d2b2e3f2497e6b5f26c72e220397cc0e62f7" +} diff --git a/core/lib/dal/migrations/20240829123456_add_l1_batches_consensus_committees.down.sql b/core/lib/dal/migrations/20240829123456_add_l1_batches_consensus_committees.down.sql new file mode 100644 index 000000000000..fee0b42079f3 --- /dev/null +++ b/core/lib/dal/migrations/20240829123456_add_l1_batches_consensus_committees.down.sql @@ -0,0 +1,3 @@ +ALTER TABLE consensus_replica_state DROP COLUMN global_config; + +DROP TABLE l1_batches_consensus_committees; diff --git a/core/lib/dal/migrations/20240829123456_add_l1_batches_consensus_committees.up.sql b/core/lib/dal/migrations/20240829123456_add_l1_batches_consensus_committees.up.sql new file mode 100644 index 000000000000..c31952b96465 --- /dev/null +++ b/core/lib/dal/migrations/20240829123456_add_l1_batches_consensus_committees.up.sql @@ -0,0 +1,8 @@ +ALTER TABLE consensus_replica_state + ADD COLUMN global_config JSONB NULL; + +CREATE TABLE l1_batches_consensus_committees ( + l1_batch_number BIGINT PRIMARY KEY REFERENCES l1_batches (number) ON DELETE CASCADE, + attesters JSONB NOT NULL, + updated_at TIMESTAMP NOT NULL +); diff --git a/core/lib/dal/src/consensus/mod.rs b/core/lib/dal/src/consensus/mod.rs index 658da6c76821..f0ef336bc543 100644 --- a/core/lib/dal/src/consensus/mod.rs +++ b/core/lib/dal/src/consensus/mod.rs @@ -22,6 +22,36 @@ use zksync_utils::{h256_to_u256, u256_to_h256}; use crate::models::{parse_h160, parse_h256}; +/// Global config of the consensus. +#[derive(Debug, PartialEq, Clone)] +pub struct GlobalConfig { + pub genesis: validator::Genesis, + pub registry_address: Option, +} + +impl ProtoFmt for GlobalConfig { + type Proto = proto::GlobalConfig; + + fn read(r: &Self::Proto) -> anyhow::Result { + Ok(Self { + genesis: read_required(&r.genesis).context("genesis")?, + registry_address: r + .registry_address + .as_ref() + .map(|a| parse_h160(a)) + .transpose() + .context("registry_address")?, + }) + } + + fn build(&self) -> Self::Proto { + Self::Proto { + genesis: Some(self.genesis.build()), + registry_address: self.registry_address.map(|a| a.as_bytes().to_vec()), + } + } +} + /// Global attestation status served by /// `attestationStatus` RPC. #[derive(Debug, PartialEq, Clone)] @@ -469,3 +499,24 @@ impl ProtoRepr for proto::Transaction { } } } + +impl ProtoRepr for proto::AttesterCommittee { + type Type = attester::Committee; + + fn read(&self) -> anyhow::Result { + let members: Vec<_> = self + .members + .iter() + .enumerate() + .map(|(i, m)| attester::WeightedAttester::read(m).context(i)) + .collect::>() + .context("members")?; + Self::Type::new(members) + } + + fn build(this: &Self::Type) -> Self { + Self { + members: this.iter().map(|x| x.build()).collect(), + } + } +} diff --git a/core/lib/dal/src/consensus/proto/mod.proto b/core/lib/dal/src/consensus/proto/mod.proto index ea0c12f1b5f3..da9151f10f4d 100644 --- a/core/lib/dal/src/consensus/proto/mod.proto +++ b/core/lib/dal/src/consensus/proto/mod.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package zksync.dal; import "zksync/roles/validator.proto"; +import "zksync/roles/attester.proto"; message Payload { // zksync-era ProtocolVersionId @@ -117,6 +118,15 @@ message PaymasterParams { optional bytes paymaster_input = 2; // required } +message AttesterCommittee { + repeated roles.attester.WeightedAttester members = 1; // required +} + +message GlobalConfig { + optional roles.validator.Genesis genesis = 1; // required + optional bytes registry_address = 2; // optional; H160 +} + message AttestationStatus { optional roles.validator.GenesisHash genesis = 1; // required optional uint64 next_batch_to_attest = 2; // required diff --git a/core/lib/dal/src/consensus_dal.rs b/core/lib/dal/src/consensus_dal.rs index 8f05cb381777..2dca58e2a6a6 100644 --- a/core/lib/dal/src/consensus_dal.rs +++ b/core/lib/dal/src/consensus_dal.rs @@ -1,5 +1,4 @@ use anyhow::Context as _; -use bigdecimal::Zero as _; use zksync_consensus_roles::{attester, validator}; use zksync_consensus_storage::{BlockStoreState, ReplicaState}; use zksync_db_connection::{ @@ -7,10 +6,10 @@ use zksync_db_connection::{ error::{DalError, DalResult, SqlxContext}, instrument::{InstrumentExt, Instrumented}, }; -use zksync_protobuf::ProtoFmt as _; +use zksync_protobuf::ProtoRepr as _; use zksync_types::L2BlockNumber; -pub use crate::consensus::{AttestationStatus, Payload}; +pub use crate::consensus::{proto, AttestationStatus, GlobalConfig, Payload}; use crate::{Core, CoreDal}; /// Storage access methods for `zksync_core::consensus` module. @@ -33,72 +32,77 @@ pub enum InsertCertificateError { } impl ConsensusDal<'_, '_> { - /// Fetches genesis. - pub async fn genesis(&mut self) -> DalResult> { - Ok(sqlx::query!( + /// Fetch consensus global config. + pub async fn global_config(&mut self) -> anyhow::Result> { + // global_config contains a superset of genesis information. + // genesis column is deprecated and will be removed once the main node + // is fully upgraded. + // For now we keep the information between both columns in sync. + let Some(row) = sqlx::query!( r#" SELECT - genesis + genesis, + global_config FROM consensus_replica_state WHERE fake_key "# ) - .try_map(|row| { - let Some(genesis) = row.genesis else { - return Ok(None); - }; - // Deserialize the json, but don't allow for unknown fields. - // We might encounter an unknown fields here in case if support for the previous - // consensus protocol version is removed before the migration to a new version - // is performed. The node should NOT operate in such a state. - Ok(Some( - validator::GenesisRaw::read( - &zksync_protobuf::serde::deserialize_proto_with_options( - &genesis, /*deny_unknown_fields=*/ true, - ) - .decode_column("genesis")?, - ) - .decode_column("genesis")? - .with_hash(), - )) - }) - .instrument("genesis") + .instrument("global_config") .fetch_optional(self.storage) .await? - .flatten()) + else { + return Ok(None); + }; + if let Some(global_config) = row.global_config { + return Ok(Some( + zksync_protobuf::serde::deserialize(&global_config).context("global_config")?, + )); + } + if let Some(genesis) = row.genesis { + let genesis: validator::Genesis = + zksync_protobuf::serde::deserialize(&genesis).context("genesis")?; + return Ok(Some(GlobalConfig { + genesis, + registry_address: None, + })); + } + Ok(None) } - /// Attempts to update the genesis. + /// Attempts to update the global config. /// Fails if the new genesis is invalid. /// Fails if the new genesis has different `chain_id`. /// Fails if the storage contains a newer genesis (higher fork number). - /// Noop if the new genesis is the same as the current one. + /// Noop if the new global config is the same as the current one. /// Resets the stored consensus state otherwise and purges all certificates. - pub async fn try_update_genesis(&mut self, genesis: &validator::Genesis) -> anyhow::Result<()> { + pub async fn try_update_global_config(&mut self, want: &GlobalConfig) -> anyhow::Result<()> { let mut txn = self.storage.start_transaction().await?; - if let Some(got) = txn.consensus_dal().genesis().await? { + if let Some(got) = txn.consensus_dal().global_config().await? { // Exit if the genesis didn't change. - if &got == genesis { + if &got == want { return Ok(()); } anyhow::ensure!( - got.chain_id == genesis.chain_id, + got.genesis.chain_id == want.genesis.chain_id, "changing chain_id is not allowed: old = {:?}, new = {:?}", - got.chain_id, - genesis.chain_id, + got.genesis.chain_id, + want.genesis.chain_id, ); anyhow::ensure!( - got.fork_number < genesis.fork_number, + got.genesis.fork_number < want.genesis.fork_number, "transition to a past fork is not allowed: old = {:?}, new = {:?}", - got.fork_number, - genesis.fork_number, + got.genesis.fork_number, + want.genesis.fork_number, ); - genesis.verify().context("genesis.verify()")?; + want.genesis.verify().context("genesis.verify()")?; } let genesis = - zksync_protobuf::serde::serialize(genesis, serde_json::value::Serializer).unwrap(); + zksync_protobuf::serde::serialize(&want.genesis, serde_json::value::Serializer) + .unwrap(); + let global_config = + zksync_protobuf::serde::serialize(want, serde_json::value::Serializer).unwrap(); let state = zksync_protobuf::serde::serialize( &ReplicaState::default(), serde_json::value::Serializer, @@ -131,14 +135,15 @@ impl ConsensusDal<'_, '_> { sqlx::query!( r#" INSERT INTO - consensus_replica_state (fake_key, genesis, state) + consensus_replica_state (fake_key, global_config, genesis, state) VALUES - (TRUE, $1, $2) + (TRUE, $1, $2, $3) "#, + global_config, genesis, state, ) - .instrument("try_update_genesis#INSERT INTO consenuss_replica_state") + .instrument("try_update_global_config#INSERT INTO consensus_replica_state") .execute(&mut txn) .await?; txn.commit().await?; @@ -154,25 +159,33 @@ impl ConsensusDal<'_, '_> { .start_transaction() .await .context("start_transaction")?; - let Some(old) = txn.consensus_dal().genesis().await.context("genesis()")? else { + let Some(old) = txn + .consensus_dal() + .global_config() + .await + .context("global_config()")? + else { return Ok(()); }; - let new = validator::GenesisRaw { - chain_id: old.chain_id, - fork_number: old.fork_number.next(), - first_block: txn - .consensus_dal() - .next_block() - .await - .context("next_block()")?, - - protocol_version: old.protocol_version, - validators: old.validators.clone(), - attesters: old.attesters.clone(), - leader_selection: old.leader_selection.clone(), - } - .with_hash(); - txn.consensus_dal().try_update_genesis(&new).await?; + let new = GlobalConfig { + genesis: validator::GenesisRaw { + chain_id: old.genesis.chain_id, + fork_number: old.genesis.fork_number.next(), + first_block: txn + .consensus_dal() + .next_block() + .await + .context("next_block()")?, + + protocol_version: old.genesis.protocol_version, + validators: old.genesis.validators.clone(), + attesters: old.genesis.attesters.clone(), + leader_selection: old.genesis.leader_selection.clone(), + } + .with_hash(), + registry_address: old.registry_address, + }; + txn.consensus_dal().try_update_global_config(&new).await?; txn.commit().await?; Ok(()) } @@ -259,7 +272,12 @@ impl ConsensusDal<'_, '_> { /// so it might NOT be the certificate for the last L2 block. pub async fn block_certificates_range(&mut self) -> anyhow::Result { // It cannot be older than genesis first block. - let mut start = self.genesis().await?.context("genesis()")?.first_block; + let mut start = self + .global_config() + .await? + .context("genesis()")? + .genesis + .first_block; start = start.max(self.first_block().await.context("first_block()")?); let row = sqlx::query!( r#" @@ -422,21 +440,96 @@ impl ConsensusDal<'_, '_> { Ok(()) } + /// Persist the attester committee for the given batch. + pub async fn upsert_attester_committee( + &mut self, + number: attester::BatchNumber, + committee: &attester::Committee, + ) -> anyhow::Result<()> { + let committee = proto::AttesterCommittee::build(committee); + let committee = + zksync_protobuf::serde::serialize_proto(&committee, serde_json::value::Serializer) + .unwrap(); + sqlx::query!( + r#" + INSERT INTO + l1_batches_consensus_committees (l1_batch_number, attesters, updated_at) + VALUES + ($1, $2, NOW()) + ON CONFLICT (l1_batch_number) DO + UPDATE + SET + l1_batch_number = $1, + attesters = $2, + updated_at = NOW() + "#, + i64::try_from(number.0).context("overflow")?, + committee + ) + .instrument("upsert_attester_committee") + .report_latency() + .execute(self.storage) + .await?; + Ok(()) + } + + /// Fetches the attester committee for the L1 batch with the given number. + pub async fn attester_committee( + &mut self, + n: attester::BatchNumber, + ) -> anyhow::Result> { + let Some(row) = sqlx::query!( + r#" + SELECT + attesters + FROM + l1_batches_consensus_committees + WHERE + l1_batch_number = $1 + "#, + i64::try_from(n.0)? + ) + .instrument("attester_committee") + .report_latency() + .fetch_optional(self.storage) + .await? + else { + return Ok(None); + }; + let raw = zksync_protobuf::serde::deserialize_proto(&row.attesters) + .context("deserialize_proto()")?; + Ok(Some( + proto::AttesterCommittee::read(&raw).context("read()")?, + )) + } + /// Inserts a certificate for the L1 batch. /// Noop if a certificate for the same L1 batch is already present. - /// No verification is performed - it cannot be performed due to circular dependency on + /// Verification against previously stored attester committee is performed. + /// Batch hash is not verified - it cannot be performed due to circular dependency on /// `zksync_l1_contract_interface`. pub async fn insert_batch_certificate( &mut self, cert: &attester::BatchQC, ) -> anyhow::Result<()> { - let res = sqlx::query!( + let cfg = self + .global_config() + .await + .context("global_config()")? + .context("genesis is missing")?; + let committee = self + .attester_committee(cert.message.number) + .await + .context("attester_committee()")? + .context("attester committee is missing")?; + cert.verify(cfg.genesis.hash(), &committee) + .context("cert.verify()")?; + sqlx::query!( r#" INSERT INTO - l1_batches_consensus (l1_batch_number, certificate, created_at, updated_at) + l1_batches_consensus (l1_batch_number, certificate, updated_at, created_at) VALUES ($1, $2, NOW(), NOW()) - ON CONFLICT (l1_batch_number) DO NOTHING "#, i64::try_from(cert.message.number.0).context("overflow")?, // Unwrap is ok, because serialization should always succeed. @@ -446,9 +539,6 @@ impl ConsensusDal<'_, '_> { .report_latency() .execute(self.storage) .await?; - if res.rows_affected().is_zero() { - tracing::debug!(l1_batch_number = ?cert.message.number, "duplicate batch certificate"); - } Ok(()) } @@ -457,24 +547,28 @@ impl ConsensusDal<'_, '_> { pub async fn last_batch_certificate_number( &mut self, ) -> anyhow::Result> { - let row = sqlx::query!( + let Some(row) = sqlx::query!( r#" SELECT - MAX(l1_batch_number) AS "number" + l1_batch_number FROM l1_batches_consensus + ORDER BY + l1_batch_number DESC + LIMIT + 1 "# ) .instrument("last_batch_certificate_number") .report_latency() - .fetch_one(self.storage) - .await?; - - let Some(n) = row.number else { + .fetch_optional(self.storage) + .await? + else { return Ok(None); }; + Ok(Some(attester::BatchNumber( - n.try_into().context("overflow")?, + row.l1_batch_number.try_into().context("overflow")?, ))) } @@ -529,7 +623,7 @@ impl ConsensusDal<'_, '_> { /// This is a main node only query. /// ENs should call the attestation_status RPC of the main node. pub async fn attestation_status(&mut self) -> anyhow::Result> { - let Some(genesis) = self.genesis().await.context("genesis()")? else { + let Some(cfg) = self.global_config().await.context("genesis()")? else { return Ok(None); }; let Some(next_batch_to_attest) = async { @@ -542,18 +636,21 @@ impl ConsensusDal<'_, '_> { return Ok(Some(last + 1)); } // Otherwise start with the batch containing the first block of the fork. - self.batch_of_block(genesis.first_block) + self.batch_of_block(cfg.genesis.first_block) .await .context("batch_of_block()") } .await? else { - tracing::info!(%genesis.first_block, "genesis block not found"); + tracing::info!(%cfg.genesis.first_block, "genesis block not found"); return Ok(None); }; Ok(Some(AttestationStatus { - genesis: genesis.hash(), - next_batch_to_attest, + genesis: cfg.genesis.hash(), + // We never attest batch 0 for technical reasons: + // * it is not supported to read state before batch 0. + // * the registry contract needs to be deployed before we can start operating on it + next_batch_to_attest: next_batch_to_attest.max(attester::BatchNumber(1)), })) } } @@ -563,8 +660,9 @@ mod tests { use rand::Rng as _; use zksync_consensus_roles::{attester, validator}; use zksync_consensus_storage::ReplicaState; - use zksync_types::{L1BatchNumber, ProtocolVersion}; + use zksync_types::ProtocolVersion; + use super::GlobalConfig; use crate::{ tests::{create_l1_batch_header, create_l2_block_header}, ConnectionPool, Core, CoreDal, @@ -575,19 +673,22 @@ mod tests { let rng = &mut rand::thread_rng(); let pool = ConnectionPool::::test_pool().await; let mut conn = pool.connection().await.unwrap(); - assert_eq!(None, conn.consensus_dal().genesis().await.unwrap()); + assert_eq!(None, conn.consensus_dal().global_config().await.unwrap()); for n in 0..3 { let setup = validator::testonly::Setup::new(rng, 3); let mut genesis = (*setup.genesis).clone(); genesis.fork_number = validator::ForkNumber(n); - let genesis = genesis.with_hash(); + let cfg = GlobalConfig { + genesis: genesis.with_hash(), + registry_address: Some(rng.gen()), + }; conn.consensus_dal() - .try_update_genesis(&genesis) + .try_update_global_config(&cfg) .await .unwrap(); assert_eq!( - genesis, - conn.consensus_dal().genesis().await.unwrap().unwrap() + cfg, + conn.consensus_dal().global_config().await.unwrap().unwrap() ); assert_eq!( ReplicaState::default(), @@ -597,8 +698,8 @@ mod tests { let want: ReplicaState = rng.gen(); conn.consensus_dal().set_replica_state(&want).await.unwrap(); assert_eq!( - genesis, - conn.consensus_dal().genesis().await.unwrap().unwrap() + cfg, + conn.consensus_dal().global_config().await.unwrap().unwrap() ); assert_eq!(want, conn.consensus_dal().replica_state().await.unwrap()); } @@ -608,14 +709,32 @@ mod tests { #[tokio::test] async fn test_batch_certificate() { let rng = &mut rand::thread_rng(); + let setup = validator::testonly::Setup::new(rng, 3); let pool = ConnectionPool::::test_pool().await; let mut conn = pool.connection().await.unwrap(); + let cfg = GlobalConfig { + genesis: setup.genesis.clone(), + registry_address: Some(rng.gen()), + }; + conn.consensus_dal() + .try_update_global_config(&cfg) + .await + .unwrap(); - let mut mock_batch_qc = |number: L1BatchNumber| { - let mut cert: attester::BatchQC = rng.gen(); - cert.message.number.0 = u64::from(number.0); - cert.signatures.add(rng.gen(), rng.gen()); - cert + let mut make_cert = |number: attester::BatchNumber| { + let m = attester::Batch { + genesis: setup.genesis.hash(), + hash: rng.gen(), + number, + }; + let mut sigs = attester::MultiSig::default(); + for k in &setup.attester_keys { + sigs.add(k.public(), k.sign_msg(m.clone()).sig); + } + attester::BatchQC { + message: m, + signatures: sigs, + } }; // Required for inserting l2 blocks @@ -627,8 +746,7 @@ mod tests { // Insert some mock L2 blocks and L1 batches let mut block_number = 0; let mut batch_number = 0; - let num_batches = 3; - for _ in 0..num_batches { + for _ in 0..3 { for _ in 0..3 { block_number += 1; let l2_block = create_l2_block_header(block_number); @@ -636,64 +754,56 @@ mod tests { } batch_number += 1; let l1_batch = create_l1_batch_header(batch_number); - conn.blocks_dal() .insert_mock_l1_batch(&l1_batch) .await .unwrap(); - conn.blocks_dal() .mark_l2_blocks_as_executed_in_l1_batch(l1_batch.number) .await .unwrap(); } - let l1_batch_number = L1BatchNumber(batch_number); + let n = attester::BatchNumber(batch_number.into()); // Insert a batch certificate for the last L1 batch. - let cert1 = mock_batch_qc(l1_batch_number); - + let want = make_cert(n); conn.consensus_dal() - .insert_batch_certificate(&cert1) + .upsert_attester_committee(n, setup.genesis.attesters.as_ref().unwrap()) .await .unwrap(); - - // Try insert duplicate batch certificate for the same batch. - let cert2 = mock_batch_qc(l1_batch_number); - conn.consensus_dal() - .insert_batch_certificate(&cert2) + .insert_batch_certificate(&want) .await .unwrap(); + // Reinserting a cert should fail. + assert!(conn + .consensus_dal() + .insert_batch_certificate(&make_cert(n)) + .await + .is_err()); + // Retrieve the latest certificate. - let number = conn + let got_n = conn .consensus_dal() .last_batch_certificate_number() .await .unwrap() .unwrap(); - - let cert = conn + let got = conn .consensus_dal() - .batch_certificate(number) + .batch_certificate(got_n) .await .unwrap() .unwrap(); - - assert_eq!(cert, cert1, "duplicates are ignored"); + assert_eq!(got, want); // Try insert batch certificate for non-existing batch - let cert3 = mock_batch_qc(l1_batch_number.next()); - conn.consensus_dal() - .insert_batch_certificate(&cert3) - .await - .expect_err("missing payload"); - - // Insert one more L1 batch without a certificate. - conn.blocks_dal() - .insert_mock_l1_batch(&create_l1_batch_header(batch_number + 1)) + assert!(conn + .consensus_dal() + .insert_batch_certificate(&make_cert(n.next())) .await - .unwrap(); + .is_err()); } } diff --git a/core/lib/env_config/src/contracts.rs b/core/lib/env_config/src/contracts.rs index 3365f56add77..298c43b80ccd 100644 --- a/core/lib/env_config/src/contracts.rs +++ b/core/lib/env_config/src/contracts.rs @@ -89,6 +89,7 @@ CONTRACTS_L2_ERC20_BRIDGE_ADDR="0x8656770FA78c830456B00B4fFCeE6b1De0e1b888" CONTRACTS_L1_WETH_BRIDGE_PROXY_ADDR="0x8656770FA78c830456B00B4fFCeE6b1De0e1b888" CONTRACTS_L2_WETH_BRIDGE_ADDR="0x8656770FA78c830456B00B4fFCeE6b1De0e1b888" CONTRACTS_L2_TESTNET_PAYMASTER_ADDR="FC073319977e314F251EAE6ae6bE76B0B3BAeeCF" +CONTRACTS_L2_CONSENSUS_REGISTRY_ADDR="D64e136566a9E04eb05B30184fF577F52682D182" CONTRACTS_L1_MULTICALL3_ADDR="0xcA11bde05977b3631167028862bE2a173976CA11" CONTRACTS_L1_SHARED_BRIDGE_PROXY_ADDR="0x8656770FA78c830456B00B4fFCeE6b1De0e1b888" CONTRACTS_L2_SHARED_BRIDGE_ADDR="0x8656770FA78c830456B00B4fFCeE6b1De0e1b888" diff --git a/core/lib/protobuf_config/src/consensus.rs b/core/lib/protobuf_config/src/consensus.rs index b57f033d0d22..f5eb5c5b2f10 100644 --- a/core/lib/protobuf_config/src/consensus.rs +++ b/core/lib/protobuf_config/src/consensus.rs @@ -6,7 +6,7 @@ use zksync_config::configs::consensus::{ }; use zksync_protobuf::{kB, read_optional, repr::ProtoRepr, required, ProtoFmt}; -use crate::{proto::consensus as proto, read_optional_repr}; +use crate::{parse_h160, proto::consensus as proto, read_optional_repr}; impl ProtoRepr for proto::WeightedValidator { type Type = WeightedValidator; @@ -65,6 +65,12 @@ impl ProtoRepr for proto::GenesisSpec { .collect::>() .context("attesters")?, leader: ValidatorPublicKey(required(&self.leader).context("leader")?.clone()), + registry_address: self + .registry_address + .as_ref() + .map(|x| parse_h160(x)) + .transpose() + .context("registry_address")?, }) } fn build(this: &Self::Type) -> Self { @@ -74,6 +80,7 @@ impl ProtoRepr for proto::GenesisSpec { validators: this.validators.iter().map(ProtoRepr::build).collect(), attesters: this.attesters.iter().map(ProtoRepr::build).collect(), leader: Some(this.leader.0.clone()), + registry_address: this.registry_address.map(|a| format!("{:?}", a)), } } } diff --git a/core/lib/protobuf_config/src/proto/core/consensus.proto b/core/lib/protobuf_config/src/proto/core/consensus.proto index c64c993be7c8..835ead1ab65c 100644 --- a/core/lib/protobuf_config/src/proto/core/consensus.proto +++ b/core/lib/protobuf_config/src/proto/core/consensus.proto @@ -56,6 +56,8 @@ message GenesisSpec { repeated WeightedValidator validators = 3; // must be non-empty; validator committee. optional string leader = 4; // required; ValidatorPublicKey repeated WeightedAttester attesters = 5; // can be empty; attester committee. + // Currently not in consensus genesis, but still a part of the global configuration. + optional string registry_address = 6; // optional; H160 } // Per peer connection RPC rate limits. diff --git a/core/lib/types/src/api/en.rs b/core/lib/types/src/api/en.rs index bf26caddd07b..9391c8627573 100644 --- a/core/lib/types/src/api/en.rs +++ b/core/lib/types/src/api/en.rs @@ -44,10 +44,23 @@ pub struct SyncBlock { pub protocol_version: ProtocolVersionId, } +/// Global configuration of the consensus served by the main node to the external nodes. +/// In particular, it contains consensus genesis. +/// +/// The wrapped JSON value corresponds to `zksync_dal::consensus::GlobalConfig`. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ConsensusGlobalConfig(pub serde_json::Value); + +/// [DEPRECATED] Genesis served by the main node to the external nodes. +/// This type is deprecated since ConsensusGlobalConfig also contains genesis and is extensible. +/// +/// The wrapped JSON value corresponds to `zksync_consensus_roles::validator::Genesis`. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct ConsensusGenesis(pub serde_json::Value); /// AttestationStatus maintained by the main node. /// Used for testing L1 batch signing by consensus attesters. +/// +/// The wrapped JSON value corresponds to `zksync_dal::consensus::AttestationStatus`. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct AttestationStatus(pub serde_json::Value); diff --git a/core/lib/web3_decl/src/error.rs b/core/lib/web3_decl/src/error.rs index f42fe8de59d5..3aa16a9ab77c 100644 --- a/core/lib/web3_decl/src/error.rs +++ b/core/lib/web3_decl/src/error.rs @@ -60,6 +60,19 @@ pub struct EnrichedClientError { args: HashMap<&'static str, String>, } +/// Whether the error should be considered retriable. +pub fn is_retriable(err: &ClientError) -> bool { + match err { + ClientError::Transport(_) | ClientError::RequestTimeout => true, + ClientError::Call(err) => { + // At least some RPC providers use "internal error" in case of the server being overloaded + err.code() == ErrorCode::ServerIsBusy.code() + || err.code() == ErrorCode::InternalError.code() + } + _ => false, + } +} + /// Alias for a result with enriched client RPC error. pub type EnrichedClientResult = Result; @@ -87,15 +100,7 @@ impl EnrichedClientError { /// Whether the error should be considered retriable. pub fn is_retriable(&self) -> bool { - match self.as_ref() { - ClientError::Transport(_) | ClientError::RequestTimeout => true, - ClientError::Call(err) => { - // At least some RPC providers use "internal error" in case of the server being overloaded - err.code() == ErrorCode::ServerIsBusy.code() - || err.code() == ErrorCode::InternalError.code() - } - _ => false, - } + is_retriable(&self.inner_error) } } diff --git a/core/lib/web3_decl/src/namespaces/en.rs b/core/lib/web3_decl/src/namespaces/en.rs index dac774dd7bdf..8a4d2db8c6fe 100644 --- a/core/lib/web3_decl/src/namespaces/en.rs +++ b/core/lib/web3_decl/src/namespaces/en.rs @@ -25,6 +25,9 @@ pub trait EnNamespace { #[method(name = "consensusGenesis")] async fn consensus_genesis(&self) -> RpcResult>; + #[method(name = "consensusGlobalConfig")] + async fn consensus_global_config(&self) -> RpcResult>; + /// Lists all tokens created at or before the specified `block_number`. /// /// This method is used by EN after snapshot recovery in order to recover token records. diff --git a/core/node/api_server/src/execution_sandbox/execute.rs b/core/node/api_server/src/execution_sandbox/execute.rs index 086a75c81de9..f247313db2b1 100644 --- a/core/node/api_server/src/execution_sandbox/execute.rs +++ b/core/node/api_server/src/execution_sandbox/execute.rs @@ -25,7 +25,7 @@ use super::{ /// Please don't add fields that duplicate `SystemEnv` or `L1BatchEnv` information, since both of these /// are also provided to an executor. #[derive(Debug)] -pub(crate) struct TxExecutionArgs { +pub struct TxExecutionArgs { /// Transaction / call itself. pub transaction: Transaction, /// Nonce override for the initiator account. @@ -80,7 +80,7 @@ impl TxExecutionArgs { } #[derive(Debug, Clone)] -pub(crate) struct TransactionExecutionOutput { +pub struct TransactionExecutionOutput { /// Output of the VM. pub vm: VmExecutionResultAndLogs, /// Execution metrics. @@ -91,7 +91,7 @@ pub(crate) struct TransactionExecutionOutput { /// Executor of transactions. #[derive(Debug)] -pub(crate) enum TransactionExecutor { +pub enum TransactionExecutor { Real(MainOneshotExecutor), #[doc(hidden)] // Intended for tests only Mock(MockOneshotExecutor), diff --git a/core/node/api_server/src/execution_sandbox/mod.rs b/core/node/api_server/src/execution_sandbox/mod.rs index f2a3f0e5f8c3..faaccf03c96a 100644 --- a/core/node/api_server/src/execution_sandbox/mod.rs +++ b/core/node/api_server/src/execution_sandbox/mod.rs @@ -16,10 +16,10 @@ use zksync_types::{ api, fee_model::BatchFeeInput, AccountTreeId, Address, L1BatchNumber, L2BlockNumber, L2ChainId, }; +pub use self::execute::{TransactionExecutor, TxExecutionArgs}; use self::vm_metrics::SandboxStage; pub(super) use self::{ error::SandboxExecutionError, - execute::{TransactionExecutor, TxExecutionArgs}, tracers::ApiTracer, validate::ValidationError, vm_metrics::{SubmitTxStage, SANDBOX_METRICS}, @@ -158,7 +158,7 @@ async fn get_pending_state( /// Arguments for VM execution necessary to set up storage and environment. #[derive(Debug, Clone)] -pub(crate) struct TxSetupArgs { +pub struct TxSetupArgs { pub execution_mode: TxExecutionMode, pub operator_account: AccountTreeId, pub fee_input: BatchFeeInput, @@ -215,7 +215,7 @@ impl BlockStartInfoInner { /// Information about first L1 batch / L2 block in the node storage. #[derive(Debug, Clone)] -pub(crate) struct BlockStartInfo { +pub struct BlockStartInfo { cached_pruning_info: Arc>, max_cache_age: Duration, } @@ -331,7 +331,7 @@ impl BlockStartInfo { } #[derive(Debug, thiserror::Error)] -pub(crate) enum BlockArgsError { +pub enum BlockArgsError { #[error("Block is pruned; first retained block is {0}")] Pruned(L2BlockNumber), #[error("Block is missing, but can appear in the future")] @@ -342,7 +342,7 @@ pub(crate) enum BlockArgsError { /// Information about a block provided to VM. #[derive(Debug, Clone, Copy)] -pub(crate) struct BlockArgs { +pub struct BlockArgs { block_id: api::BlockId, resolved_block_number: L2BlockNumber, l1_batch_timestamp_s: Option, diff --git a/core/node/api_server/src/execution_sandbox/tracers.rs b/core/node/api_server/src/execution_sandbox/tracers.rs index 31384b7a0898..6fdc3dbc7b62 100644 --- a/core/node/api_server/src/execution_sandbox/tracers.rs +++ b/core/node/api_server/src/execution_sandbox/tracers.rs @@ -11,7 +11,7 @@ use zksync_types::ProtocolVersionId; /// Custom tracers supported by the API sandbox. #[derive(Debug)] -pub(crate) enum ApiTracer { +pub enum ApiTracer { CallTracer(Arc>>), Validation { params: ValidationTracerParams, diff --git a/core/node/api_server/src/tx_sender/mod.rs b/core/node/api_server/src/tx_sender/mod.rs index 5f913e305cd0..f0d96118638b 100644 --- a/core/node/api_server/src/tx_sender/mod.rs +++ b/core/node/api_server/src/tx_sender/mod.rs @@ -140,6 +140,38 @@ impl MultiVMBaseSystemContracts { } } } + + pub fn load_estimate_gas_blocking() -> Self { + Self { + pre_virtual_blocks: BaseSystemContracts::estimate_gas_pre_virtual_blocks(), + post_virtual_blocks: BaseSystemContracts::estimate_gas_post_virtual_blocks(), + post_virtual_blocks_finish_upgrade_fix: + BaseSystemContracts::estimate_gas_post_virtual_blocks_finish_upgrade_fix(), + post_boojum: BaseSystemContracts::estimate_gas_post_boojum(), + post_allowlist_removal: BaseSystemContracts::estimate_gas_post_allowlist_removal(), + post_1_4_1: BaseSystemContracts::estimate_gas_post_1_4_1(), + post_1_4_2: BaseSystemContracts::estimate_gas_post_1_4_2(), + vm_1_5_0_small_memory: BaseSystemContracts::estimate_gas_1_5_0_small_memory(), + vm_1_5_0_increased_memory: + BaseSystemContracts::estimate_gas_post_1_5_0_increased_memory(), + } + } + + pub fn load_eth_call_blocking() -> Self { + Self { + pre_virtual_blocks: BaseSystemContracts::playground_pre_virtual_blocks(), + post_virtual_blocks: BaseSystemContracts::playground_post_virtual_blocks(), + post_virtual_blocks_finish_upgrade_fix: + BaseSystemContracts::playground_post_virtual_blocks_finish_upgrade_fix(), + post_boojum: BaseSystemContracts::playground_post_boojum(), + post_allowlist_removal: BaseSystemContracts::playground_post_allowlist_removal(), + post_1_4_1: BaseSystemContracts::playground_post_1_4_1(), + post_1_4_2: BaseSystemContracts::playground_post_1_4_2(), + vm_1_5_0_small_memory: BaseSystemContracts::playground_1_5_0_small_memory(), + vm_1_5_0_increased_memory: BaseSystemContracts::playground_post_1_5_0_increased_memory( + ), + } + } } /// Smart contracts to be used in the API sandbox requests, e.g. for estimating gas and @@ -169,32 +201,8 @@ impl ApiContracts { /// Blocking version of [`Self::load_from_disk()`]. pub fn load_from_disk_blocking() -> Self { Self { - estimate_gas: MultiVMBaseSystemContracts { - pre_virtual_blocks: BaseSystemContracts::estimate_gas_pre_virtual_blocks(), - post_virtual_blocks: BaseSystemContracts::estimate_gas_post_virtual_blocks(), - post_virtual_blocks_finish_upgrade_fix: - BaseSystemContracts::estimate_gas_post_virtual_blocks_finish_upgrade_fix(), - post_boojum: BaseSystemContracts::estimate_gas_post_boojum(), - post_allowlist_removal: BaseSystemContracts::estimate_gas_post_allowlist_removal(), - post_1_4_1: BaseSystemContracts::estimate_gas_post_1_4_1(), - post_1_4_2: BaseSystemContracts::estimate_gas_post_1_4_2(), - vm_1_5_0_small_memory: BaseSystemContracts::estimate_gas_1_5_0_small_memory(), - vm_1_5_0_increased_memory: - BaseSystemContracts::estimate_gas_post_1_5_0_increased_memory(), - }, - eth_call: MultiVMBaseSystemContracts { - pre_virtual_blocks: BaseSystemContracts::playground_pre_virtual_blocks(), - post_virtual_blocks: BaseSystemContracts::playground_post_virtual_blocks(), - post_virtual_blocks_finish_upgrade_fix: - BaseSystemContracts::playground_post_virtual_blocks_finish_upgrade_fix(), - post_boojum: BaseSystemContracts::playground_post_boojum(), - post_allowlist_removal: BaseSystemContracts::playground_post_allowlist_removal(), - post_1_4_1: BaseSystemContracts::playground_post_1_4_1(), - post_1_4_2: BaseSystemContracts::playground_post_1_4_2(), - vm_1_5_0_small_memory: BaseSystemContracts::playground_1_5_0_small_memory(), - vm_1_5_0_increased_memory: - BaseSystemContracts::playground_post_1_5_0_increased_memory(), - }, + estimate_gas: MultiVMBaseSystemContracts::load_estimate_gas_blocking(), + eth_call: MultiVMBaseSystemContracts::load_eth_call_blocking(), } } } @@ -1003,7 +1011,7 @@ impl TxSender { .await } - pub(super) async fn eth_call( + pub async fn eth_call( &self, block_args: BlockArgs, call_overrides: CallOverrides, diff --git a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/en.rs b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/en.rs index c3e116d39928..de7635263735 100644 --- a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/en.rs +++ b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/en.rs @@ -19,6 +19,12 @@ impl EnNamespaceServer for EnNamespace { .map_err(|err| self.current_method().map_err(err)) } + async fn consensus_global_config(&self) -> RpcResult> { + self.consensus_global_config_impl() + .await + .map_err(|err| self.current_method().map_err(err)) + } + async fn consensus_genesis(&self) -> RpcResult> { self.consensus_genesis_impl() .await diff --git a/core/node/api_server/src/web3/namespaces/en.rs b/core/node/api_server/src/web3/namespaces/en.rs index ca15352fd1ac..26f4aa2b0b5f 100644 --- a/core/node/api_server/src/web3/namespaces/en.rs +++ b/core/node/api_server/src/web3/namespaces/en.rs @@ -21,18 +21,35 @@ impl EnNamespace { Self { state } } + pub async fn consensus_global_config_impl( + &self, + ) -> Result, Web3Error> { + let mut conn = self.state.acquire_connection().await?; + let Some(cfg) = conn + .consensus_dal() + .global_config() + .await + .context("global_config()")? + else { + return Ok(None); + }; + Ok(Some(en::ConsensusGlobalConfig( + zksync_protobuf::serde::serialize(&cfg, serde_json::value::Serializer).unwrap(), + ))) + } + pub async fn consensus_genesis_impl(&self) -> Result, Web3Error> { let mut conn = self.state.acquire_connection().await?; - let Some(genesis) = conn + let Some(cfg) = conn .consensus_dal() - .genesis() + .global_config() .await - .map_err(DalError::generalize)? + .context("global_config()")? else { return Ok(None); }; Ok(Some(en::ConsensusGenesis( - zksync_protobuf::serde::serialize(&genesis, serde_json::value::Serializer).unwrap(), + zksync_protobuf::serde::serialize(&cfg.genesis, serde_json::value::Serializer).unwrap(), ))) } @@ -40,7 +57,7 @@ impl EnNamespace { pub async fn attestation_status_impl( &self, ) -> Result, Web3Error> { - let status = self + let Some(status) = self .state .acquire_connection() .await? @@ -54,13 +71,13 @@ impl EnNamespace { .context("TransactionBuilder::build()")? .consensus_dal() .attestation_status() - .await?; - - Ok(status.map(|s| { - en::AttestationStatus( - zksync_protobuf::serde::serialize(&s, serde_json::value::Serializer).unwrap(), - ) - })) + .await? + else { + return Ok(None); + }; + Ok(Some(en::AttestationStatus( + zksync_protobuf::serde::serialize(&status, serde_json::value::Serializer).unwrap(), + ))) } pub(crate) fn current_method(&self) -> &MethodTracer { diff --git a/core/node/consensus/Cargo.toml b/core/node/consensus/Cargo.toml index e82969dae6c6..ba52892584d2 100644 --- a/core/node/consensus/Cargo.toml +++ b/core/node/consensus/Cargo.toml @@ -11,6 +11,7 @@ keywords.workspace = true categories.workspace = true [dependencies] +zksync_basic_types.workspace = true zksync_config.workspace = true zksync_concurrency.workspace = true zksync_consensus_crypto.workspace = true @@ -20,6 +21,7 @@ zksync_consensus_storage.workspace = true zksync_consensus_executor.workspace = true zksync_consensus_bft.workspace = true zksync_consensus_utils.workspace = true +zksync_contracts.workspace = true zksync_protobuf.workspace = true zksync_dal.workspace = true zksync_l1_contract_interface.workspace = true @@ -31,22 +33,27 @@ zksync_system_constants.workspace = true zksync_types.workspace = true zksync_utils.workspace = true zksync_web3_decl.workspace = true - +zksync_node_api_server.workspace = true +zksync_state.workspace = true +zksync_storage.workspace = true +zksync_vm_interface.workspace = true +zksync_multivm.workspace = true anyhow.workspace = true async-trait.workspace = true secrecy.workspace = true tempfile.workspace = true thiserror.workspace = true tracing.workspace = true +hex.workspace = true tokio.workspace = true +jsonrpsee.workspace = true +semver.workspace = true [dev-dependencies] zksync_node_genesis.workspace = true zksync_node_test_utils.workspace = true zksync_node_api_server.workspace = true zksync_test_account.workspace = true -zksync_contracts.workspace = true -tokio.workspace = true test-casing.workspace = true rand.workspace = true diff --git a/core/node/consensus/src/abi.rs b/core/node/consensus/src/abi.rs new file mode 100644 index 000000000000..0e2200e28038 --- /dev/null +++ b/core/node/consensus/src/abi.rs @@ -0,0 +1,133 @@ +//! Strongly-typed API for Consensus-related solidity contracts. +//! Placeholder until we can depend on alloy_sol_types. +use anyhow::Context as _; +use zksync_types::{ethabi, ethabi::Token}; + +/// Strongly typed representation of a contract function. +/// It also represents the inputs of the function. +pub trait Function { + /// Name of the solidity function. + const NAME: &'static str; + /// Type representing contract this function belongs to. + type Contract: AsRef; + /// Typ representing outputs of this function. + type Outputs; + /// Encodes this struct to inputs of this function. + fn encode(&self) -> Vec; + /// Decodes outputs of this function. + fn decode_outputs(outputs: Vec) -> anyhow::Result; +} + +/// Address of contract C. It is just a wrapper of ethabi::Address, +/// just additionally indicating what contract is deployed under this address. +#[derive(Debug)] +pub struct Address(ethabi::Address, std::marker::PhantomData); + +impl Clone for Address { + fn clone(&self) -> Self { + *self + } +} + +impl Copy for Address {} + +impl PartialEq for Address { + fn eq(&self, other: &Self) -> bool { + self.0.eq(&other.0) + } +} + +impl Eq for Address {} + +impl Address { + pub fn new(address: ethabi::Address) -> Self { + Self(address, std::marker::PhantomData) + } +} + +impl std::ops::Deref for Address { + type Target = ethabi::Address; + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +/// Represents a call to the function F. +#[derive(Debug)] +pub struct Call { + /// Contract of the function. + pub contract: F::Contract, + /// Inputs to the function. + pub inputs: F, +} + +impl Call { + pub(super) fn function(&self) -> ðabi::Function { + self.contract.as_ref().function(F::NAME).unwrap() + } + /// Converts the call to raw calldata. + pub fn calldata(&self) -> ethabi::Result { + self.function().encode_input(&self.inputs.encode()) + } + /// Parses the outputs of the call. + pub fn decode_outputs(&self, outputs: &[u8]) -> anyhow::Result { + F::decode_outputs( + self.function() + .decode_output(outputs) + .context("decode_output()")?, + ) + } +} + +pub(crate) fn into_fixed_bytes(t: Token) -> anyhow::Result<[u8; N]> { + match t { + Token::FixedBytes(b) => b.try_into().ok().context("bad size"), + bad => anyhow::bail!("want fixed_bytes, got {bad:?}"), + } +} + +pub(crate) fn into_tuple(t: Token) -> anyhow::Result<[Token; N]> { + match t { + Token::Tuple(ts) => ts.try_into().ok().context("bad size"), + bad => anyhow::bail!("want tuple, got {bad:?}"), + } +} + +pub(crate) fn into_uint>(t: Token) -> anyhow::Result { + match t { + Token::Uint(i) => i.try_into().ok().context("overflow"), + bad => anyhow::bail!("want uint, got {bad:?}"), + } +} + +#[cfg(test)] +fn example(t: ðabi::ParamType) -> Token { + use ethabi::ParamType as T; + match t { + T::Address => Token::Address(ethabi::Address::default()), + T::Bytes => Token::Bytes(ethabi::Bytes::default()), + T::Int(_) => Token::Int(ethabi::Int::default()), + T::Uint(_) => Token::Uint(ethabi::Uint::default()), + T::Bool => Token::Bool(bool::default()), + T::String => Token::String(String::default()), + T::Array(t) => Token::Array(vec![example(t)]), + T::FixedBytes(n) => Token::FixedBytes(vec![0; *n]), + T::FixedArray(t, n) => Token::FixedArray(vec![example(t); *n]), + T::Tuple(ts) => Token::Tuple(ts.iter().map(example).collect()), + } +} + +#[cfg(test)] +impl Call { + pub(crate) fn test(&self) -> anyhow::Result<()> { + self.calldata().context("calldata()")?; + F::decode_outputs( + self.function() + .outputs + .iter() + .map(|p| example(&p.kind)) + .collect(), + )?; + Ok(()) + } +} diff --git a/core/node/consensus/src/config.rs b/core/node/consensus/src/config.rs index c2fa13472066..22f8fc01192f 100644 --- a/core/node/consensus/src/config.rs +++ b/core/node/consensus/src/config.rs @@ -11,6 +11,8 @@ use zksync_config::{ use zksync_consensus_crypto::{Text, TextFmt}; use zksync_consensus_executor as executor; use zksync_consensus_roles::{attester, node, validator}; +use zksync_dal::consensus_dal; +use zksync_types::ethabi; fn read_secret_text(text: Option<&Secret>) -> anyhow::Result> { text.map(|text| Text::new(text.expose_secret()).decode()) @@ -41,16 +43,18 @@ pub(super) struct GenesisSpec { pub(super) validators: validator::Committee, pub(super) attesters: Option, pub(super) leader_selection: validator::LeaderSelectionMode, + pub(super) registry_address: Option, } impl GenesisSpec { - pub(super) fn from_genesis(g: &validator::Genesis) -> Self { + pub(super) fn from_global_config(cfg: &consensus_dal::GlobalConfig) -> Self { Self { - chain_id: g.chain_id, - protocol_version: g.protocol_version, - validators: g.validators.clone(), - attesters: g.attesters.clone(), - leader_selection: g.leader_selection.clone(), + chain_id: cfg.genesis.chain_id, + protocol_version: cfg.genesis.protocol_version, + validators: cfg.genesis.validators.clone(), + attesters: cfg.genesis.attesters.clone(), + leader_selection: cfg.genesis.leader_selection.clone(), + registry_address: cfg.registry_address, } } @@ -93,6 +97,7 @@ impl GenesisSpec { } else { Some(attester::Committee::new(attesters).context("attesters")?) }, + registry_address: x.registry_address, }) } } @@ -104,6 +109,7 @@ pub(super) fn node_key(secrets: &ConsensusSecrets) -> anyhow::Result, ) -> anyhow::Result { let mut gossip_static_outbound = HashMap::new(); { @@ -128,6 +134,7 @@ pub(super) fn executor( }; Ok(executor::Config { + build_version, server_addr: cfg.server_addr, public_addr: net::Host(cfg.public_addr.0.clone()), max_payload_size: cfg.max_payload_size, diff --git a/core/node/consensus/src/en.rs b/core/node/consensus/src/en.rs index 259cac5d074a..e1f10b8e4e50 100644 --- a/core/node/consensus/src/en.rs +++ b/core/node/consensus/src/en.rs @@ -1,20 +1,25 @@ use std::sync::Arc; use anyhow::Context as _; +use jsonrpsee::{core::ClientError, types::error::ErrorCode}; use zksync_concurrency::{ctx, error::Wrap as _, scope, time}; use zksync_consensus_executor::{self as executor, attestation}; use zksync_consensus_roles::{attester, validator}; use zksync_consensus_storage::{BatchStore, BlockStore}; use zksync_dal::consensus_dal; -use zksync_node_sync::{ - fetcher::FetchedBlock, sync_action::ActionQueueSender, MainNodeClient, SyncState, -}; -use zksync_protobuf::ProtoFmt as _; +use zksync_node_sync::{fetcher::FetchedBlock, sync_action::ActionQueueSender, SyncState}; use zksync_types::L2BlockNumber; -use zksync_web3_decl::client::{DynClient, L2}; +use zksync_web3_decl::{ + client::{DynClient, L2}, + error::is_retriable, + namespaces::{EnNamespaceClient as _, EthNamespaceClient as _}, +}; use super::{config, storage::Store, ConsensusConfig, ConsensusSecrets}; -use crate::storage::{self, ConnectionPool}; +use crate::{ + registry, + storage::{self, ConnectionPool}, +}; /// External node. pub(super) struct EN { @@ -27,7 +32,7 @@ impl EN { /// Task running a consensus node for the external node. /// It may be a validator, but it cannot be a leader (cannot propose blocks). /// - /// NOTE: Before starting the consensus node if fetches all the blocks + /// NOTE: Before starting the consensus node it fetches all the blocks /// older than consensus genesis from the main node using json RPC. pub async fn run( self, @@ -35,6 +40,7 @@ impl EN { actions: ActionQueueSender, cfg: ConsensusConfig, secrets: ConsensusSecrets, + build_version: Option, ) -> anyhow::Result<()> { let attester = config::attester_key(&secrets).context("attester_key")?; @@ -47,13 +53,16 @@ impl EN { // Update sync state in the background. s.spawn_bg(self.fetch_state_loop(ctx)); - // Initialize genesis. - let genesis = self.fetch_genesis(ctx).await.wrap("fetch_genesis()")?; + // Initialize global config. + let global_config = self + .fetch_global_config(ctx) + .await + .wrap("fetch_genesis()")?; let mut conn = self.pool.connection(ctx).await.wrap("connection()")?; - conn.try_update_genesis(ctx, &genesis) + conn.try_update_global_config(ctx, &global_config) .await - .wrap("set_genesis()")?; + .wrap("try_update_global_config()")?; let mut payload_queue = conn .new_payload_queue(ctx, actions, self.sync_state.clone()) @@ -63,18 +72,22 @@ impl EN { drop(conn); // Fetch blocks before the genesis. - self.fetch_blocks(ctx, &mut payload_queue, Some(genesis.first_block)) - .await - .wrap("fetch_blocks()")?; + self.fetch_blocks( + ctx, + &mut payload_queue, + Some(global_config.genesis.first_block), + ) + .await + .wrap("fetch_blocks()")?; // Monitor the genesis of the main node. // If it changes, it means that a hard fork occurred and we need to reset the consensus state. s.spawn_bg::<()>({ - let old = genesis.clone(); + let old = global_config.clone(); async { let old = old; loop { - if let Ok(new) = self.fetch_genesis(ctx).await { + if let Ok(new) = self.fetch_global_config(ctx).await { if new != old { return Err(anyhow::format_err!( "genesis changed: old {old:?}, new {new:?}" @@ -105,10 +118,14 @@ impl EN { s.spawn_bg(async { Ok(runner.run(ctx).await?) }); let attestation = Arc::new(attestation::Controller::new(attester)); - s.spawn_bg(self.run_attestation_updater(ctx, genesis.clone(), attestation.clone())); + s.spawn_bg(self.run_attestation_controller( + ctx, + global_config.clone(), + attestation.clone(), + )); let executor = executor::Executor { - config: config::executor(&cfg, &secrets)?, + config: config::executor(&cfg, &secrets, build_version)?, block_store, batch_store, validator: config::validator_key(&secrets) @@ -164,24 +181,21 @@ impl EN { /// Monitors the `AttestationStatus` on the main node, /// and updates the attestation config accordingly. - async fn run_attestation_updater( + async fn run_attestation_controller( &self, ctx: &ctx::Ctx, - genesis: validator::Genesis, + cfg: consensus_dal::GlobalConfig, attestation: Arc, ) -> ctx::Result<()> { const POLL_INTERVAL: time::Duration = time::Duration::seconds(5); - let Some(committee) = &genesis.attesters else { - return Ok(()); - }; - let committee = Arc::new(committee.clone()); + let registry = registry::Registry::new(cfg.genesis.clone(), self.pool.clone()).await; let mut next = attester::BatchNumber(0); loop { let status = loop { match self.fetch_attestation_status(ctx).await { Err(err) => tracing::warn!("{err:#}"), Ok(status) => { - if status.genesis != genesis.hash() { + if status.genesis != cfg.genesis.hash() { return Err(anyhow::format_err!("genesis mismatch").into()); } if status.next_batch_to_attest >= next { @@ -191,6 +205,7 @@ impl EN { } ctx.sleep(POLL_INTERVAL).await?; }; + next = status.next_batch_to_attest.next(); tracing::info!( "waiting for hash of batch {:?}", status.next_batch_to_attest @@ -199,6 +214,27 @@ impl EN { .pool .wait_for_batch_hash(ctx, status.next_batch_to_attest) .await?; + let Some(committee) = registry + .attester_committee_for( + ctx, + cfg.registry_address.map(registry::Address::new), + status.next_batch_to_attest, + ) + .await + .wrap("attester_committee_for()")? + else { + tracing::info!("attestation not required"); + continue; + }; + let committee = Arc::new(committee); + // Persist the derived committee. + self.pool + .connection(ctx) + .await + .wrap("connection")? + .upsert_attester_committee(ctx, status.next_batch_to_attest, &committee) + .await + .wrap("upsert_attester_committee()")?; tracing::info!( "attesting batch {:?} with hash {hash:?}", status.next_batch_to_attest @@ -214,7 +250,6 @@ impl EN { })) .await .context("start_attestation()")?; - next = status.next_batch_to_attest.next(); } } @@ -224,37 +259,52 @@ impl EN { const DELAY_INTERVAL: time::Duration = time::Duration::milliseconds(500); const RETRY_INTERVAL: time::Duration = time::Duration::seconds(5); loop { - match ctx.wait(self.client.fetch_l2_block_number()).await? { + match ctx.wait(self.client.get_block_number()).await? { Ok(head) => { + let head = L2BlockNumber(head.try_into().ok().context("overflow")?); self.sync_state.set_main_node_block(head); ctx.sleep(DELAY_INTERVAL).await?; } Err(err) => { - tracing::warn!("main_node_client.fetch_l2_block_number(): {err}"); + tracing::warn!("get_block_number(): {err}"); ctx.sleep(RETRY_INTERVAL).await?; } } } } - /// Fetches genesis from the main node. + /// Fetches consensus global configuration from the main node. #[tracing::instrument(skip_all)] - async fn fetch_genesis(&self, ctx: &ctx::Ctx) -> ctx::Result { - let genesis = ctx - .wait(self.client.fetch_consensus_genesis()) - .await? - .context("fetch_consensus_genesis()")? - .context("main node is not running consensus component")?; - // Deserialize the json, but don't allow for unknown fields. - // We need to compute the hash of the Genesis, so simply ignoring the unknown fields won't - // do. - Ok(validator::GenesisRaw::read( - &zksync_protobuf::serde::deserialize_proto_with_options( - &genesis.0, /*deny_unknown_fields=*/ true, - ) - .context("deserialize")?, - )? - .with_hash()) + async fn fetch_global_config( + &self, + ctx: &ctx::Ctx, + ) -> ctx::Result { + match ctx.wait(self.client.consensus_global_config()).await? { + Ok(cfg) => { + let cfg = cfg.context("main node is not running consensus component")?; + Ok(zksync_protobuf::serde::deserialize(&cfg.0).context("deserialize()")?) + } + Err(ClientError::Call(err)) if err.code() == ErrorCode::MethodNotFound.code() => { + tracing::info!( + "consensus_global_config() not found, calling consensus_genesis() instead" + ); + let genesis = ctx + .wait(self.client.consensus_genesis()) + .await? + .context("consensus_genesis()")? + .context("main node is not running consensus component")?; + Ok(consensus_dal::GlobalConfig { + genesis: zksync_protobuf::serde::deserialize(&genesis.0) + .context("deserialize()")?, + registry_address: None, + }) + } + Err(err) => { + return Err(err) + .context("consensus_global_config()") + .map_err(|err| err.into()) + } + } } #[tracing::instrument(skip_all)] @@ -262,15 +312,12 @@ impl EN { &self, ctx: &ctx::Ctx, ) -> ctx::Result { - match ctx.wait(self.client.fetch_attestation_status()).await? { - Ok(Some(status)) => Ok(zksync_protobuf::serde::deserialize(&status.0) - .context("deserialize(AttestationStatus")?), - Ok(None) => Err(anyhow::format_err!("empty response").into()), - Err(err) => Err(anyhow::format_err!( - "AttestationStatus call to main node HTTP RPC failed: {err:#}" - ) - .into()), - } + let status = ctx + .wait(self.client.attestation_status()) + .await? + .context("attestation_status()")? + .context("main node is not runnign consensus component")?; + Ok(zksync_protobuf::serde::deserialize(&status.0).context("deserialize()")?) } /// Fetches (with retries) the given block from the main node. @@ -278,14 +325,11 @@ impl EN { const RETRY_INTERVAL: time::Duration = time::Duration::seconds(5); loop { - let res = ctx.wait(self.client.fetch_l2_block(n, true)).await?; - match res { + match ctx.wait(self.client.sync_l2_block(n, true)).await? { Ok(Some(block)) => return Ok(block.try_into()?), Ok(None) => {} - Err(err) if err.is_retriable() => {} - Err(err) => { - return Err(anyhow::format_err!("client.fetch_l2_block({}): {err}", n).into()); - } + Err(err) if is_retriable(&err) => {} + Err(err) => Err(err).with_context(|| format!("client.sync_l2_block({n})"))?, } ctx.sleep(RETRY_INTERVAL).await?; } diff --git a/core/node/consensus/src/era.rs b/core/node/consensus/src/era.rs index 574e496f4d11..3150f839680e 100644 --- a/core/node/consensus/src/era.rs +++ b/core/node/consensus/src/era.rs @@ -45,6 +45,7 @@ pub async fn run_external_node( sync_state: SyncState, main_node_client: Box>, actions: ActionQueueSender, + build_version: semver::Version, ) -> anyhow::Result<()> { let en = en::EN { pool: ConnectionPool(pool), @@ -58,7 +59,8 @@ pub async fn run_external_node( is_validator = secrets.validator_key.is_some(), "running external node" ); - en.run(ctx, actions, cfg, secrets).await + en.run(ctx, actions, cfg, secrets, Some(build_version)) + .await } None => { tracing::info!("running fetcher"); diff --git a/core/node/consensus/src/lib.rs b/core/node/consensus/src/lib.rs index 13d918b5b6ee..ff9cdf865281 100644 --- a/core/node/consensus/src/lib.rs +++ b/core/node/consensus/src/lib.rs @@ -5,6 +5,7 @@ use zksync_config::configs::consensus::{ConsensusConfig, ConsensusSecrets}; +mod abi; // Currently `batch` module is only used in tests, // but will be used in production once batch syncing is implemented in consensus. #[allow(unused)] @@ -13,8 +14,10 @@ mod config; mod en; pub mod era; mod mn; +mod registry; mod storage; #[cfg(test)] pub(crate) mod testonly; #[cfg(test)] mod tests; +mod vm; diff --git a/core/node/consensus/src/mn.rs b/core/node/consensus/src/mn.rs index 7de86b4d8ba1..4d428346ebe4 100644 --- a/core/node/consensus/src/mn.rs +++ b/core/node/consensus/src/mn.rs @@ -6,9 +6,10 @@ use zksync_config::configs::consensus::{ConsensusConfig, ConsensusSecrets}; use zksync_consensus_executor::{self as executor, attestation}; use zksync_consensus_roles::{attester, validator}; use zksync_consensus_storage::{BatchStore, BlockStore}; +use zksync_dal::consensus_dal; use crate::{ - config, + config, registry, storage::{ConnectionPool, InsertCertificateError, Store}, }; @@ -36,9 +37,9 @@ pub async fn run_main_node( pool.connection(ctx) .await .wrap("connection()")? - .adjust_genesis(ctx, &spec) + .adjust_global_config(ctx, &spec) .await - .wrap("adjust_genesis()")?; + .wrap("adjust_global_config()")?; } // The main node doesn't have a payload queue as it produces all the L2 blocks itself. @@ -47,33 +48,40 @@ pub async fn run_main_node( .wrap("Store::new()")?; s.spawn_bg(runner.run(ctx)); - let (block_store, runner) = BlockStore::new(ctx, Box::new(store.clone())) + let global_config = pool + .connection(ctx) .await - .wrap("BlockStore::new()")?; - s.spawn_bg(runner.run(ctx)); - - let genesis = block_store.genesis().clone(); + .wrap("connection()")? + .global_config(ctx) + .await + .wrap("global_config()")? + .context("global_config() disappeared")?; anyhow::ensure!( - genesis.leader_selection + global_config.genesis.leader_selection == validator::LeaderSelectionMode::Sticky(validator_key.public()), "unsupported leader selection mode - main node has to be the leader" ); + let (block_store, runner) = BlockStore::new(ctx, Box::new(store.clone())) + .await + .wrap("BlockStore::new()")?; + s.spawn_bg(runner.run(ctx)); + let (batch_store, runner) = BatchStore::new(ctx, Box::new(store.clone())) .await .wrap("BatchStore::new()")?; s.spawn_bg(runner.run(ctx)); let attestation = Arc::new(attestation::Controller::new(attester)); - s.spawn_bg(run_attestation_updater( + s.spawn_bg(run_attestation_controller( ctx, &pool, - genesis, + global_config, attestation.clone(), )); let executor = executor::Executor { - config: config::executor(&cfg, &secrets)?, + config: config::executor(&cfg, &secrets, None)?, block_store, batch_store, validator: Some(executor::Validator { @@ -93,18 +101,17 @@ pub async fn run_main_node( /// Manages attestation state by configuring the /// next batch to attest and storing the collected /// certificates. -async fn run_attestation_updater( +async fn run_attestation_controller( ctx: &ctx::Ctx, pool: &ConnectionPool, - genesis: validator::Genesis, + cfg: consensus_dal::GlobalConfig, attestation: Arc, ) -> anyhow::Result<()> { const POLL_INTERVAL: time::Duration = time::Duration::seconds(5); + let registry = registry::Registry::new(cfg.genesis, pool.clone()).await; + let registry_addr = cfg.registry_address.map(registry::Address::new); + let mut next = attester::BatchNumber(0); let res = async { - let Some(committee) = &genesis.attesters else { - return Ok(()); - }; - let committee = Arc::new(committee.clone()); loop { // After regenesis it might happen that the batch number for the first block // is not immediately known (the first block was not produced yet), @@ -118,10 +125,12 @@ async fn run_attestation_updater( .await .wrap("attestation_status()")? { - Some(status) => break status, - None => ctx.sleep(POLL_INTERVAL).await?, + Some(status) if status.next_batch_to_attest >= next => break status, + _ => {} } + ctx.sleep(POLL_INTERVAL).await?; }; + next = status.next_batch_to_attest.next(); tracing::info!( "waiting for hash of batch {:?}", status.next_batch_to_attest @@ -129,6 +138,22 @@ async fn run_attestation_updater( let hash = pool .wait_for_batch_hash(ctx, status.next_batch_to_attest) .await?; + let Some(committee) = registry + .attester_committee_for(ctx, registry_addr, status.next_batch_to_attest) + .await + .wrap("attester_committee_for()")? + else { + tracing::info!("attestation not required"); + continue; + }; + let committee = Arc::new(committee); + // Persist the derived committee. + pool.connection(ctx) + .await + .wrap("connection")? + .upsert_attester_committee(ctx, status.next_batch_to_attest, &committee) + .await + .wrap("upsert_attester_committee()")?; tracing::info!( "attesting batch {:?} with hash {hash:?}", status.next_batch_to_attest @@ -140,7 +165,7 @@ async fn run_attestation_updater( number: status.next_batch_to_attest, genesis: status.genesis, }, - committee: committee.clone(), + committee, })) .await .context("start_attestation()")?; diff --git a/core/node/consensus/src/registry/abi.rs b/core/node/consensus/src/registry/abi.rs new file mode 100644 index 000000000000..55cc7f9264fb --- /dev/null +++ b/core/node/consensus/src/registry/abi.rs @@ -0,0 +1,225 @@ +//! Strongly-typed API for ConsensusRegistry contract. +#![allow(dead_code)] + +use std::sync::Arc; + +use anyhow::Context as _; +use zksync_types::{ethabi, ethabi::Token}; + +use crate::abi; + +/// Reprents ConsensusRegistry contract. +#[derive(Debug, Clone)] +pub(crate) struct ConsensusRegistry(Arc); + +impl AsRef for ConsensusRegistry { + fn as_ref(&self) -> ðabi::Contract { + &self.0 + } +} + +impl ConsensusRegistry { + const FILE: &'static str = "contracts/l2-contracts/artifacts-zk/contracts/ConsensusRegistry.sol/ConsensusRegistry.json"; + + /// Loads bytecode of the contract. + #[cfg(test)] + pub(crate) fn bytecode() -> Vec { + zksync_contracts::read_bytecode(Self::FILE) + } + + /// Loads the `ethabi` representation of the contract. + pub(crate) fn load() -> Self { + Self(zksync_contracts::load_contract(ConsensusRegistry::FILE).into()) + } + + /// Constructs a call to function `F` of this contract. + pub(crate) fn call>(&self, inputs: F) -> abi::Call { + abi::Call { + contract: self.clone(), + inputs, + } + } +} + +/// ConsensusRegistry.getAttesterCommittee function. +#[derive(Debug, Default)] +pub(crate) struct GetAttesterCommittee; + +impl abi::Function for GetAttesterCommittee { + type Contract = ConsensusRegistry; + const NAME: &'static str = "getAttesterCommittee"; + + fn encode(&self) -> Vec { + vec![] + } + + type Outputs = Vec; + fn decode_outputs(tokens: Vec) -> anyhow::Result { + let [attesters] = tokens.try_into().ok().context("bad size")?; + let mut res = vec![]; + for token in attesters.into_array().context("not array")? { + res.push(Attester::from_token(token).context("attesters")?); + } + Ok(res) + } +} + +/// ConsensusRegistry.add function. +#[derive(Debug, Default)] +pub(crate) struct Add { + pub(crate) node_owner: ethabi::Address, + pub(crate) validator_weight: u32, + pub(crate) validator_pub_key: BLS12_381PublicKey, + pub(crate) validator_pop: BLS12_381Signature, + pub(crate) attester_weight: u32, + pub(crate) attester_pub_key: Secp256k1PublicKey, +} + +impl abi::Function for Add { + type Contract = ConsensusRegistry; + const NAME: &'static str = "add"; + fn encode(&self) -> Vec { + vec![ + Token::Address(self.node_owner), + Token::Uint(self.validator_weight.into()), + self.validator_pub_key.to_token(), + self.validator_pop.to_token(), + Token::Uint(self.attester_weight.into()), + self.attester_pub_key.to_token(), + ] + } + type Outputs = (); + fn decode_outputs(tokens: Vec) -> anyhow::Result<()> { + let [] = tokens.try_into().ok().context("bad size")?; + Ok(()) + } +} + +/// ConsensusRegistry.initialize function. +#[derive(Debug, Default)] +pub(crate) struct Initialize { + pub(crate) initial_owner: ethabi::Address, +} + +impl abi::Function for Initialize { + type Contract = ConsensusRegistry; + const NAME: &'static str = "initialize"; + fn encode(&self) -> Vec { + vec![Token::Address(self.initial_owner)] + } + type Outputs = (); + fn decode_outputs(tokens: Vec) -> anyhow::Result<()> { + let [] = tokens.try_into().ok().context("bad size")?; + Ok(()) + } +} + +/// ConsensusRegistry.commitAttesterCommittee function. +#[derive(Debug, Default)] +pub(crate) struct CommitAttesterCommittee; + +impl abi::Function for CommitAttesterCommittee { + type Contract = ConsensusRegistry; + const NAME: &'static str = "commitAttesterCommittee"; + fn encode(&self) -> Vec { + vec![] + } + type Outputs = (); + fn decode_outputs(tokens: Vec) -> anyhow::Result<()> { + let [] = tokens.try_into().ok().context("bad size")?; + Ok(()) + } +} + +/// ConsensusRegistry.owner function. +#[derive(Debug, Default)] +pub(crate) struct Owner; + +impl abi::Function for Owner { + type Contract = ConsensusRegistry; + const NAME: &'static str = "owner"; + fn encode(&self) -> Vec { + vec![] + } + type Outputs = ethabi::Address; + fn decode_outputs(tokens: Vec) -> anyhow::Result { + let [owner] = tokens.try_into().ok().context("bad size")?; + owner.into_address().context("not an address") + } +} + +// Auxiliary structs. + +/// Raw representation of a secp256k1 public key. +#[derive(Debug, Default)] +pub(crate) struct Secp256k1PublicKey { + pub(crate) tag: [u8; 1], + pub(crate) x: [u8; 32], +} + +impl Secp256k1PublicKey { + fn from_token(token: Token) -> anyhow::Result { + let [tag, x] = abi::into_tuple(token)?; + Ok(Self { + tag: abi::into_fixed_bytes(tag).context("tag")?, + x: abi::into_fixed_bytes(x).context("x")?, + }) + } + + fn to_token(&self) -> Token { + Token::Tuple(vec![ + Token::FixedBytes(self.tag.into()), + Token::FixedBytes(self.x.into()), + ]) + } +} + +/// Raw representation of an attester committee member. +#[derive(Debug)] +pub(crate) struct Attester { + pub(crate) weight: u32, + pub(crate) pub_key: Secp256k1PublicKey, +} + +impl Attester { + fn from_token(token: Token) -> anyhow::Result { + let [weight, pub_key] = abi::into_tuple(token)?; + Ok(Self { + weight: abi::into_uint(weight).context("weight")?, + pub_key: Secp256k1PublicKey::from_token(pub_key).context("pub_key")?, + }) + } +} + +/// Raw representation of a BLS12_381 public key. +#[derive(Debug, Default)] +pub(crate) struct BLS12_381PublicKey { + pub(crate) a: [u8; 32], + pub(crate) b: [u8; 32], + pub(crate) c: [u8; 32], +} + +impl BLS12_381PublicKey { + fn to_token(&self) -> Token { + Token::Tuple(vec![ + Token::FixedBytes(self.a.into()), + Token::FixedBytes(self.b.into()), + Token::FixedBytes(self.c.into()), + ]) + } +} + +#[derive(Debug, Default)] +pub(crate) struct BLS12_381Signature { + pub(crate) a: [u8; 32], + pub(crate) b: [u8; 16], +} + +impl BLS12_381Signature { + fn to_token(&self) -> Token { + Token::Tuple(vec![ + Token::FixedBytes(self.a.into()), + Token::FixedBytes(self.b.into()), + ]) + } +} diff --git a/core/node/consensus/src/registry/mod.rs b/core/node/consensus/src/registry/mod.rs new file mode 100644 index 000000000000..74da41309573 --- /dev/null +++ b/core/node/consensus/src/registry/mod.rs @@ -0,0 +1,80 @@ +use anyhow::Context as _; +use zksync_concurrency::{ctx, error::Wrap as _}; +use zksync_consensus_crypto::ByteFmt; +use zksync_consensus_roles::{attester, validator}; + +use crate::{storage::ConnectionPool, vm::VM}; + +mod abi; +#[cfg(test)] +pub(crate) mod testonly; +#[cfg(test)] +mod tests; + +fn decode_attester_key(k: &abi::Secp256k1PublicKey) -> anyhow::Result { + let mut x = vec![]; + x.extend(k.tag); + x.extend(k.x); + ByteFmt::decode(&x) +} + +fn decode_weighted_attester(a: &abi::Attester) -> anyhow::Result { + Ok(attester::WeightedAttester { + weight: a.weight.into(), + key: decode_attester_key(&a.pub_key).context("key")?, + }) +} + +pub type Address = crate::abi::Address; + +#[derive(Debug)] +pub(crate) struct Registry { + contract: abi::ConsensusRegistry, + genesis: validator::Genesis, + vm: VM, +} + +impl Registry { + pub async fn new(genesis: validator::Genesis, pool: ConnectionPool) -> Self { + Self { + contract: abi::ConsensusRegistry::load(), + genesis, + vm: VM::new(pool).await, + } + } + + /// Attester committee for the given batch. + /// It reads committee from the contract. + /// Falls back to committee specified in the genesis. + pub async fn attester_committee_for( + &self, + ctx: &ctx::Ctx, + address: Option
, + attested_batch: attester::BatchNumber, + ) -> ctx::Result> { + let Some(batch_defining_committee) = attested_batch.prev() else { + // Batch 0 doesn't need attestation. + return Ok(None); + }; + let Some(address) = address else { + return Ok(self.genesis.attesters.clone()); + }; + let raw = self + .vm + .call( + ctx, + batch_defining_committee, + address, + self.contract.call(abi::GetAttesterCommittee), + ) + .await + .wrap("vm.call()")?; + let mut attesters = vec![]; + for a in raw { + attesters.push(decode_weighted_attester(&a).context("decode_weighted_attester()")?); + } + Ok(Some( + attester::Committee::new(attesters.into_iter()).context("Committee::new()")?, + )) + } +} diff --git a/core/node/consensus/src/registry/testonly.rs b/core/node/consensus/src/registry/testonly.rs new file mode 100644 index 000000000000..a0c55a557feb --- /dev/null +++ b/core/node/consensus/src/registry/testonly.rs @@ -0,0 +1,118 @@ +use rand::Rng; +use zksync_consensus_crypto::ByteFmt; +use zksync_consensus_roles::{attester, validator}; +use zksync_test_account::Account; +use zksync_types::{ethabi, Execute, Transaction, U256}; + +use super::*; + +pub(crate) fn make_tx( + account: &mut Account, + address: crate::abi::Address, + call: crate::abi::Call, +) -> Transaction { + account.get_l2_tx_for_execute( + Execute { + contract_address: *address, + calldata: call.calldata().unwrap(), + value: U256::zero(), + factory_deps: vec![], + }, + None, + ) +} + +pub(crate) struct WeightedValidator { + weight: validator::Weight, + key: validator::PublicKey, + pop: validator::ProofOfPossession, +} + +fn encode_attester_key(k: &attester::PublicKey) -> abi::Secp256k1PublicKey { + let b: [u8; 33] = ByteFmt::encode(k).try_into().unwrap(); + abi::Secp256k1PublicKey { + tag: b[0..1].try_into().unwrap(), + x: b[1..33].try_into().unwrap(), + } +} + +fn encode_validator_key(k: &validator::PublicKey) -> abi::BLS12_381PublicKey { + let b: [u8; 96] = ByteFmt::encode(k).try_into().unwrap(); + abi::BLS12_381PublicKey { + a: b[0..32].try_into().unwrap(), + b: b[32..64].try_into().unwrap(), + c: b[64..96].try_into().unwrap(), + } +} + +fn encode_validator_pop(pop: &validator::ProofOfPossession) -> abi::BLS12_381Signature { + let b: [u8; 48] = ByteFmt::encode(pop).try_into().unwrap(); + abi::BLS12_381Signature { + a: b[0..32].try_into().unwrap(), + b: b[32..48].try_into().unwrap(), + } +} + +pub(crate) fn gen_validator(rng: &mut impl Rng) -> WeightedValidator { + let k: validator::SecretKey = rng.gen(); + WeightedValidator { + key: k.public(), + weight: rng.gen_range(1..100), + pop: k.sign_pop(), + } +} + +pub(crate) fn gen_attester(rng: &mut impl Rng) -> attester::WeightedAttester { + attester::WeightedAttester { + key: rng.gen(), + weight: rng.gen_range(1..100), + } +} + +impl Registry { + pub(crate) fn deploy(&self, account: &mut Account) -> (Address, Transaction) { + let tx = account.get_deploy_tx( + &abi::ConsensusRegistry::bytecode(), + None, + zksync_test_account::TxType::L2, + ); + (Address::new(tx.address), tx.tx) + } + + pub(crate) fn add( + &self, + node_owner: ethabi::Address, + validator: WeightedValidator, + attester: attester::WeightedAttester, + ) -> anyhow::Result> { + Ok(self.contract.call(abi::Add { + node_owner, + validator_pub_key: encode_validator_key(&validator.key), + validator_weight: validator + .weight + .try_into() + .context("overflow") + .context("validator_weight")?, + validator_pop: encode_validator_pop(&validator.pop), + attester_pub_key: encode_attester_key(&attester.key), + attester_weight: attester + .weight + .try_into() + .context("overflow") + .context("attester_weight")?, + })) + } + + pub(crate) fn initialize( + &self, + initial_owner: ethabi::Address, + ) -> crate::abi::Call { + self.contract.call(abi::Initialize { initial_owner }) + } + + pub(crate) fn commit_attester_committee( + &self, + ) -> crate::abi::Call { + self.contract.call(abi::CommitAttesterCommittee) + } +} diff --git a/core/node/consensus/src/registry/tests.rs b/core/node/consensus/src/registry/tests.rs new file mode 100644 index 000000000000..935cd6738918 --- /dev/null +++ b/core/node/consensus/src/registry/tests.rs @@ -0,0 +1,91 @@ +use rand::Rng as _; +use zksync_concurrency::{ctx, scope}; +use zksync_consensus_roles::{attester, validator::testonly::Setup}; +use zksync_test_account::Account; +use zksync_types::ProtocolVersionId; + +use super::*; +use crate::storage::ConnectionPool; + +/// Test checking that parsing logic matches the abi specified in the json file. +#[test] +fn test_consensus_registry_abi() { + zksync_concurrency::testonly::abort_on_panic(); + let c = abi::ConsensusRegistry::load(); + c.call(abi::GetAttesterCommittee).test().unwrap(); + c.call(abi::Add::default()).test().unwrap(); + c.call(abi::Initialize::default()).test().unwrap(); + c.call(abi::CommitAttesterCommittee).test().unwrap(); + c.call(abi::Owner).test().unwrap(); +} + +#[tokio::test(flavor = "multi_thread")] +async fn test_attester_committee() { + zksync_concurrency::testonly::abort_on_panic(); + let ctx = &ctx::test_root(&ctx::RealClock); + let rng = &mut ctx.rng(); + let setup = Setup::new(rng, 10); + let account = &mut Account::random(); + let to_fund = &[account.address]; + + scope::run!(ctx, |ctx, s| async { + let pool = ConnectionPool::test(false, ProtocolVersionId::latest()).await; + let registry = Registry::new(setup.genesis.clone(), pool.clone()).await; + + // If the registry contract address is not specified, + // then the committee from genesis should be returned. + let got = registry + .attester_committee_for(ctx, None, attester::BatchNumber(10)) + .await + .unwrap(); + assert_eq!(setup.genesis.attesters, got); + + let (mut node, runner) = crate::testonly::StateKeeper::new(ctx, pool.clone()).await?; + s.spawn_bg(runner.run_real(ctx, to_fund)); + + // Deploy registry contract and initialize it. + let committee = + attester::Committee::new((0..5).map(|_| testonly::gen_attester(rng))).unwrap(); + let (registry_addr, tx) = registry.deploy(account); + let mut txs = vec![tx]; + let account_addr = account.address(); + txs.push(testonly::make_tx( + account, + registry_addr, + registry.initialize(account_addr), + )); + // Add attesters. + for a in committee.iter() { + txs.push(testonly::make_tx( + account, + registry_addr, + registry + .add(rng.gen(), testonly::gen_validator(rng), a.clone()) + .unwrap(), + )); + } + // Commit the update. + txs.push(testonly::make_tx( + account, + registry_addr, + registry.commit_attester_committee(), + )); + + node.push_block(&txs).await; + node.seal_batch().await; + pool.wait_for_batch(ctx, node.last_batch()).await?; + + // Read the attester committee using the vm. + let batch = attester::BatchNumber(node.last_batch().0.into()); + assert_eq!( + Some(committee), + registry + .attester_committee_for(ctx, Some(registry_addr), batch + 1) + .await + .unwrap() + ); + Ok(()) + }) + .await + .unwrap(); +} diff --git a/core/node/consensus/src/storage/connection.rs b/core/node/consensus/src/storage/connection.rs index 6ff2fb1ce0a0..512b37e81a11 100644 --- a/core/node/consensus/src/storage/connection.rs +++ b/core/node/consensus/src/storage/connection.rs @@ -1,13 +1,14 @@ use anyhow::Context as _; use zksync_concurrency::{ctx, error::Wrap as _, time}; use zksync_consensus_crypto::keccak256::Keccak256; -use zksync_consensus_roles::{attester, validator}; +use zksync_consensus_roles::{attester, attester::BatchNumber, validator}; use zksync_consensus_storage::{self as storage, BatchStoreState}; use zksync_dal::{consensus_dal, consensus_dal::Payload, Core, CoreDal, DalError}; use zksync_l1_contract_interface::i_executor::structures::StoredBatchInfo; +use zksync_node_api_server::execution_sandbox::{BlockArgs, BlockStartInfo}; use zksync_node_sync::{fetcher::IoCursorExt as _, ActionQueueSender, SyncState}; use zksync_state_keeper::io::common::IoCursor; -use zksync_types::{commitment::L1BatchWithMetadata, L1BatchNumber}; +use zksync_types::{api, commitment::L1BatchWithMetadata, L1BatchNumber}; use super::{InsertCertificateError, PayloadQueue}; use crate::config; @@ -18,7 +19,7 @@ pub(crate) struct ConnectionPool(pub(crate) zksync_dal::ConnectionPool); impl ConnectionPool { /// Wrapper for `connection_tagged()`. - pub(crate) async fn connection<'a>(&'a self, ctx: &ctx::Ctx) -> ctx::Result> { + pub(crate) async fn connection(&self, ctx: &ctx::Ctx) -> ctx::Result> { Ok(Connection( ctx.wait(self.0.connection_tagged("consensus")) .await? @@ -164,6 +165,22 @@ impl<'a> Connection<'a> { .map_err(E::Other)?) } + /// Wrapper for `consensus_dal().upsert_attester_committee()`. + pub async fn upsert_attester_committee( + &mut self, + ctx: &ctx::Ctx, + number: BatchNumber, + committee: &attester::Committee, + ) -> ctx::Result<()> { + ctx.wait( + self.0 + .consensus_dal() + .upsert_attester_committee(number, committee), + ) + .await??; + Ok(()) + } + /// Wrapper for `consensus_dal().replica_state()`. pub async fn replica_state(&mut self, ctx: &ctx::Ctx) -> ctx::Result { Ok(ctx @@ -229,22 +246,22 @@ impl<'a> Connection<'a> { }) } - /// Wrapper for `consensus_dal().genesis()`. - pub async fn genesis(&mut self, ctx: &ctx::Ctx) -> ctx::Result> { - Ok(ctx - .wait(self.0.consensus_dal().genesis()) - .await? - .map_err(DalError::generalize)?) + /// Wrapper for `consensus_dal().global_config()`. + pub async fn global_config( + &mut self, + ctx: &ctx::Ctx, + ) -> ctx::Result> { + Ok(ctx.wait(self.0.consensus_dal().global_config()).await??) } - /// Wrapper for `consensus_dal().try_update_genesis()`. - pub async fn try_update_genesis( + /// Wrapper for `consensus_dal().try_update_global_config()`. + pub async fn try_update_global_config( &mut self, ctx: &ctx::Ctx, - genesis: &validator::Genesis, + cfg: &consensus_dal::GlobalConfig, ) -> ctx::Result<()> { Ok(ctx - .wait(self.0.consensus_dal().try_update_genesis(genesis)) + .wait(self.0.consensus_dal().try_update_global_config(cfg)) .await??) } @@ -267,7 +284,7 @@ impl<'a> Connection<'a> { /// (Re)initializes consensus genesis to start at the last L2 block in storage. /// Noop if `spec` matches the current genesis. - pub(crate) async fn adjust_genesis( + pub(crate) async fn adjust_global_config( &mut self, ctx: &ctx::Ctx, spec: &config::GenesisSpec, @@ -277,31 +294,34 @@ impl<'a> Connection<'a> { .await .wrap("start_transaction()")?; - let old = txn.genesis(ctx).await.wrap("genesis()")?; + let old = txn.global_config(ctx).await.wrap("genesis()")?; if let Some(old) = &old { - if &config::GenesisSpec::from_genesis(old) == spec { + if &config::GenesisSpec::from_global_config(old) == spec { // Hard fork is not needed. return Ok(()); } } tracing::info!("Performing a hard fork of consensus."); - let genesis = validator::GenesisRaw { - chain_id: spec.chain_id, - fork_number: old - .as_ref() - .map_or(validator::ForkNumber(0), |old| old.fork_number.next()), - first_block: txn.next_block(ctx).await.context("next_block()")?, - protocol_version: spec.protocol_version, - validators: spec.validators.clone(), - attesters: spec.attesters.clone(), - leader_selection: spec.leader_selection.clone(), - } - .with_hash(); + let new = consensus_dal::GlobalConfig { + genesis: validator::GenesisRaw { + chain_id: spec.chain_id, + fork_number: old.as_ref().map_or(validator::ForkNumber(0), |old| { + old.genesis.fork_number.next() + }), + first_block: txn.next_block(ctx).await.context("next_block()")?, + protocol_version: spec.protocol_version, + validators: spec.validators.clone(), + attesters: spec.attesters.clone(), + leader_selection: spec.leader_selection.clone(), + } + .with_hash(), + registry_address: spec.registry_address, + }; - txn.try_update_genesis(ctx, &genesis) + txn.try_update_global_config(ctx, &new) .await - .wrap("try_update_genesis()")?; + .wrap("try_update_global_config()")?; txn.commit(ctx).await.wrap("commit()")?; Ok(()) } @@ -447,4 +467,29 @@ impl<'a> Connection<'a> { .await? .context("attestation_status()")?) } + + /// Constructs `BlockArgs` for the last block of the batch. + pub async fn vm_block_args( + &mut self, + ctx: &ctx::Ctx, + batch: attester::BatchNumber, + ) -> ctx::Result { + let (_, block) = self + .get_l2_block_range_of_l1_batch(ctx, batch) + .await + .wrap("get_l2_block_range_of_l1_batch()")? + .context("batch not sealed")?; + let block = api::BlockId::Number(api::BlockNumber::Number(block.0.into())); + let start_info = ctx + .wait(BlockStartInfo::new( + &mut self.0, + /*max_cache_age=*/ std::time::Duration::from_secs(10), + )) + .await? + .context("BlockStartInfo::new()")?; + Ok(ctx + .wait(BlockArgs::new(&mut self.0, block, &start_info)) + .await? + .context("BlockArgs::new")?) + } } diff --git a/core/node/consensus/src/storage/store.rs b/core/node/consensus/src/storage/store.rs index 6a96812ae408..cb8e039d7d01 100644 --- a/core/node/consensus/src/storage/store.rs +++ b/core/node/consensus/src/storage/store.rs @@ -325,9 +325,10 @@ impl storage::PersistentBlockStore for Store { Ok(self .conn(ctx) .await? - .genesis(ctx) + .global_config(ctx) .await? - .context("not found")?) + .context("not found")? + .genesis) } fn persisted(&self) -> sync::watch::Receiver { diff --git a/core/node/consensus/src/storage/testonly.rs b/core/node/consensus/src/storage/testonly.rs index 5d1279afbbfd..65c464d98b93 100644 --- a/core/node/consensus/src/storage/testonly.rs +++ b/core/node/consensus/src/storage/testonly.rs @@ -1,5 +1,4 @@ //! Storage test helpers. - use anyhow::Context as _; use zksync_concurrency::{ctx, error::Wrap as _, time}; use zksync_consensus_roles::{attester, validator}; @@ -13,6 +12,7 @@ use zksync_types::{ }; use super::{Connection, ConnectionPool}; +use crate::registry; impl Connection<'_> { /// Wrapper for `consensus_dal().batch_of_block()`. @@ -181,16 +181,16 @@ impl ConnectionPool { want_last: validator::BlockNumber, ) -> ctx::Result> { let blocks = self.wait_for_block_certificates(ctx, want_last).await?; - let genesis = self + let cfg = self .connection(ctx) .await .wrap("connection()")? - .genesis(ctx) + .global_config(ctx) .await .wrap("genesis()")? .context("genesis is missing")?; for block in &blocks { - block.verify(&genesis).context(block.number())?; + block.verify(&cfg.genesis).context(block.number())?; } Ok(blocks) } @@ -199,6 +199,7 @@ impl ConnectionPool { &self, ctx: &ctx::Ctx, want_last: attester::BatchNumber, + registry_addr: Option, ) -> ctx::Result<()> { // Wait for the last batch to be attested. const POLL_INTERVAL: time::Duration = time::Duration::milliseconds(100); @@ -214,17 +215,17 @@ impl ConnectionPool { ctx.sleep(POLL_INTERVAL).await?; } let mut conn = self.connection(ctx).await.wrap("connection()")?; - let genesis = conn - .genesis(ctx) + let cfg = conn + .global_config(ctx) .await - .wrap("genesis()")? - .context("genesis is missing")?; + .wrap("global_config()")? + .context("global config is missing")?; let first = conn - .batch_of_block(ctx, genesis.first_block) + .batch_of_block(ctx, cfg.genesis.first_block) .await .wrap("batch_of_block()")? .context("batch of first_block is missing")?; - let committee = genesis.attesters.as_ref().unwrap(); + let registry = registry::Registry::new(cfg.genesis.clone(), self.clone()).await; for i in first.0..want_last.0 { let i = attester::BatchNumber(i); let hash = conn @@ -240,8 +241,13 @@ impl ConnectionPool { if cert.message.hash != hash { return Err(anyhow::format_err!("cert[{i:?}]: hash mismatch").into()); } - cert.verify(genesis.hash(), committee) - .context("cert[{i:?}].verify()")?; + let committee = registry + .attester_committee_for(ctx, registry_addr, i) + .await + .context("attester_committee_for()")? + .context("committee not specified")?; + cert.verify(cfg.genesis.hash(), &committee) + .with_context(|| format!("cert[{i:?}].verify()"))?; } Ok(()) } diff --git a/core/node/consensus/src/testonly.rs b/core/node/consensus/src/testonly.rs index 90063772da92..241998f26928 100644 --- a/core/node/consensus/src/testonly.rs +++ b/core/node/consensus/src/testonly.rs @@ -42,8 +42,9 @@ use zksync_state_keeper::{ }; use zksync_test_account::Account; use zksync_types::{ + ethabi, fee_model::{BatchFeeInput, L1PeggedBatchFeeModelInput}, - Address, L1BatchNumber, L2BlockNumber, L2ChainId, PriorityOpId, ProtocolVersionId, + L1BatchNumber, L2BlockNumber, L2ChainId, PriorityOpId, ProtocolVersionId, Transaction, }; use zksync_web3_decl::client::{Client, DynClient, L2}; @@ -54,6 +55,7 @@ use crate::{ }; /// Fake StateKeeper for tests. +#[derive(Debug)] pub(super) struct StateKeeper { protocol_version: ProtocolVersionId, // Batch of the `last_block`. @@ -62,8 +64,6 @@ pub(super) struct StateKeeper { // timestamp of the last block. last_timestamp: u64, batch_sealed: bool, - // test L2 account - account: Account, next_priority_op: PriorityOpId, actions_sender: ActionQueueSender, @@ -116,6 +116,7 @@ pub(super) fn new_configs( }) .collect(), leader: config::ValidatorPublicKey(setup.validator_keys[0].public().encode()), + registry_address: None, }; network::testonly::new_configs(rng, setup, gossip_peers) .into_iter() @@ -183,7 +184,6 @@ pub(super) struct StateKeeperRunner { addr: sync::watch::Sender>, rocksdb_dir: tempfile::TempDir, metadata_calculator: MetadataCalculator, - account: Account, } impl StateKeeper { @@ -242,7 +242,6 @@ impl StateKeeper { .await .context("MetadataCalculator::new()")?; let tree_reader = metadata_calculator.tree_reader(); - let account = Account::random(); Ok(( Self { protocol_version, @@ -256,7 +255,6 @@ impl StateKeeper { addr: addr.subscribe(), pool: pool.clone(), tree_reader, - account: account.clone(), }, StateKeeperRunner { actions_queue, @@ -265,7 +263,6 @@ impl StateKeeper { addr, rocksdb_dir, metadata_calculator, - account, }, )) } @@ -306,22 +303,29 @@ impl StateKeeper { } } - /// Pushes a new L2 block with `transactions` transactions to the `StateKeeper`. - pub async fn push_random_block(&mut self, rng: &mut impl Rng) { + pub async fn push_block(&mut self, txs: &[Transaction]) { let mut actions = vec![self.open_block()]; - for _ in 0..rng.gen_range(3..8) { - let tx = match rng.gen() { - true => l2_transaction(&mut self.account, 1_000_000), + actions.extend( + txs.iter() + .map(|tx| FetchedTransaction::new(tx.clone()).into()), + ); + actions.push(SyncAction::SealL2Block); + self.actions_sender.push_actions(actions).await.unwrap(); + } + + /// Pushes a new L2 block with `transactions` transactions to the `StateKeeper`. + pub async fn push_random_block(&mut self, rng: &mut impl Rng, account: &mut Account) { + let txs: Vec<_> = (0..rng.gen_range(3..8)) + .map(|_| match rng.gen() { + true => l2_transaction(account, 1_000_000), false => { - let tx = l1_transaction(&mut self.account, self.next_priority_op); + let tx = l1_transaction(account, self.next_priority_op); self.next_priority_op += 1; tx } - }; - actions.push(FetchedTransaction::new(tx).into()); - } - actions.push(SyncAction::SealL2Block); - self.actions_sender.push_actions(actions).await.unwrap(); + }) + .collect(); + self.push_block(&txs).await; } /// Pushes `SealBatch` command to the `StateKeeper`. @@ -334,14 +338,19 @@ impl StateKeeper { } /// Pushes `count` random L2 blocks to the StateKeeper. - pub async fn push_random_blocks(&mut self, rng: &mut impl Rng, count: usize) { + pub async fn push_random_blocks( + &mut self, + rng: &mut impl Rng, + account: &mut Account, + count: usize, + ) { for _ in 0..count { // 20% chance to seal an L1 batch. // `seal_batch()` also produces a (fictive) block. if rng.gen_range(0..100) < 20 { self.seal_batch().await; } else { - self.push_random_block(rng).await; + self.push_random_block(rng, account).await; } } } @@ -451,7 +460,13 @@ impl StateKeeper { client, sync_state: self.sync_state.clone(), } - .run(ctx, self.actions_sender, cfgs.config, cfgs.secrets) + .run( + ctx, + self.actions_sender, + cfgs.config, + cfgs.secrets, + cfgs.net.build_version, + ) .await } } @@ -534,14 +549,21 @@ async fn mock_metadata_calculator_step(ctx: &ctx::Ctx, pool: &ConnectionPool) -> impl StateKeeperRunner { // Executes the state keeper task with real metadata calculator task // and fake commitment generator (because real one is too slow). - pub async fn run_real(self, ctx: &ctx::Ctx) -> anyhow::Result<()> { + pub async fn run_real( + self, + ctx: &ctx::Ctx, + addrs_to_fund: &[ethabi::Address], + ) -> anyhow::Result<()> { let res = scope::run!(ctx, |ctx, s| async { - // Fund the test account. Required for L2 transactions to succeed. - fund(&self.pool.0, &[self.account.address]).await; + // Fund the test accounts. Required for L2 transactions to succeed. + fund(&self.pool.0, addrs_to_fund).await; let (stop_send, stop_recv) = sync::watch::channel(false); - let (persistence, l2_block_sealer) = - StateKeeperPersistence::new(self.pool.0.clone(), Address::repeat_byte(11), 5); + let (persistence, l2_block_sealer) = StateKeeperPersistence::new( + self.pool.0.clone(), + ethabi::Address::repeat_byte(11), + 5, + ); let io = ExternalIO::new( self.pool.0.clone(), @@ -649,8 +671,11 @@ impl StateKeeperRunner { pub async fn run(self, ctx: &ctx::Ctx) -> anyhow::Result<()> { let res = scope::run!(ctx, |ctx, s| async { let (stop_send, stop_recv) = sync::watch::channel(false); - let (persistence, l2_block_sealer) = - StateKeeperPersistence::new(self.pool.0.clone(), Address::repeat_byte(11), 5); + let (persistence, l2_block_sealer) = StateKeeperPersistence::new( + self.pool.0.clone(), + ethabi::Address::repeat_byte(11), + 5, + ); let tree_writes_persistence = TreeWritesPersistence::new(self.pool.0.clone()); let io = ExternalIO::new( diff --git a/core/node/consensus/src/tests/attestation.rs b/core/node/consensus/src/tests/attestation.rs index b245d0524aa9..abd35508c7f7 100644 --- a/core/node/consensus/src/tests/attestation.rs +++ b/core/node/consensus/src/tests/attestation.rs @@ -1,17 +1,24 @@ use anyhow::Context as _; -use test_casing::{test_casing, Product}; +use rand::Rng as _; +use test_casing::test_casing; use tracing::Instrument as _; use zksync_concurrency::{ctx, error::Wrap, scope}; use zksync_consensus_roles::{ attester, validator::testonly::{Setup, SetupSpec}, }; -use zksync_dal::consensus_dal::AttestationStatus; -use zksync_node_sync::MainNodeClient; +use zksync_dal::consensus_dal; +use zksync_test_account::Account; use zksync_types::{L1BatchNumber, ProtocolVersionId}; +use zksync_web3_decl::namespaces::EnNamespaceClient as _; -use super::{FROM_SNAPSHOT, VERSIONS}; -use crate::{mn::run_main_node, storage::ConnectionPool, testonly}; +use super::VERSIONS; +use crate::{ + mn::run_main_node, + registry::{testonly, Registry}, + storage::ConnectionPool, + testonly::{new_configs, StateKeeper}, +}; #[test_casing(2, VERSIONS)] #[tokio::test] @@ -19,24 +26,31 @@ async fn test_attestation_status_api(version: ProtocolVersionId) { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); + let account = &mut Account::random(); scope::run!(ctx, |ctx, s| async { let pool = ConnectionPool::test(false, version).await; - let (mut sk, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; + let (mut sk, runner) = StateKeeper::new(ctx, pool.clone()).await?; s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("validator"))); // Setup nontrivial genesis. while sk.last_sealed_batch() < L1BatchNumber(3) { - sk.push_random_blocks(rng, 10).await; + sk.push_random_blocks(rng, account, 10).await; } let mut setup = SetupSpec::new(rng, 3); setup.first_block = sk.last_block(); let first_batch = sk.last_batch(); let setup = Setup::from(setup); let mut conn = pool.connection(ctx).await.wrap("connection()")?; - conn.try_update_genesis(ctx, &setup.genesis) - .await - .wrap("try_update_genesis()")?; + conn.try_update_global_config( + ctx, + &consensus_dal::GlobalConfig { + genesis: setup.genesis.clone(), + registry_address: None, + }, + ) + .await + .wrap("try_update_global_config()")?; // Make sure that the first_batch is actually sealed. sk.seal_batch().await; pool.wait_for_batch(ctx, first_batch).await?; @@ -44,11 +58,11 @@ async fn test_attestation_status_api(version: ProtocolVersionId) { // Connect to API endpoint. let api = sk.connect(ctx).await?; let fetch_status = || async { - let s = api - .fetch_attestation_status() - .await? + let s = ctx + .wait(api.attestation_status()) + .await?? .context("no attestation_status")?; - let s: AttestationStatus = + let s: consensus_dal::AttestationStatus = zksync_protobuf::serde::deserialize(&s.0).context("deserialize()")?; anyhow::ensure!(s.genesis == setup.genesis.hash(), "genesis hash mismatch"); Ok(s) @@ -62,24 +76,37 @@ async fn test_attestation_status_api(version: ProtocolVersionId) { attester::BatchNumber(first_batch.0.into()) ); - // Insert a (fake) cert, then check again. + tracing::info!("Insert a cert"); { let mut conn = pool.connection(ctx).await?; let number = status.next_batch_to_attest; let hash = conn.batch_hash(ctx, number).await?.unwrap(); - let genesis = conn.genesis(ctx).await?.unwrap().hash(); + let gcfg = conn.global_config(ctx).await?.unwrap(); + let m = attester::Batch { + number, + hash, + genesis: gcfg.genesis.hash(), + }; + let mut sigs = attester::MultiSig::default(); + for k in &setup.attester_keys { + sigs.add(k.public(), k.sign_msg(m.clone()).sig); + } let cert = attester::BatchQC { - signatures: attester::MultiSig::default(), - message: attester::Batch { - number, - hash, - genesis, - }, + signatures: sigs, + message: m, }; + conn.upsert_attester_committee( + ctx, + cert.message.number, + setup.genesis.attesters.as_ref().unwrap(), + ) + .await + .context("upsert_attester_committee")?; conn.insert_batch_certificate(ctx, &cert) .await .context("insert_batch_certificate()")?; } + tracing::info!("Check again."); let want = status.next_batch_to_attest.next(); let got = fetch_status().await?; assert_eq!(want, got.next_batch_to_attest); @@ -93,34 +120,65 @@ async fn test_attestation_status_api(version: ProtocolVersionId) { // Test running a couple of attesters (which are also validators). // Main node is expected to collect all certificates. // External nodes are expected to just vote for the batch. -// -// TODO: it would be nice to use `StateKeeperRunner::run_real()` in this test, -// however as of now it doesn't work with ENs and it doesn't work with -// `ConnectionPool::from_snapshot`. -#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] +#[test_casing(2, VERSIONS)] #[tokio::test] -async fn test_multiple_attesters(from_snapshot: bool, version: ProtocolVersionId) { +async fn test_multiple_attesters(version: ProtocolVersionId) { const NODES: usize = 4; zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); + let account = &mut Account::random(); + let to_fund = &[account.address]; let setup = Setup::new(rng, 4); - let cfgs = testonly::new_configs(rng, &setup, NODES); - + let mut cfgs = new_configs(rng, &setup, NODES); scope::run!(ctx, |ctx, s| async { - let validator_pool = ConnectionPool::test(from_snapshot, version).await; - let (mut validator, runner) = - testonly::StateKeeper::new(ctx, validator_pool.clone()).await?; + let validator_pool = ConnectionPool::test(false, version).await; + let (mut validator, runner) = StateKeeper::new(ctx, validator_pool.clone()).await?; s.spawn_bg(async { runner - .run(ctx) + .run_real(ctx, to_fund) .instrument(tracing::info_span!("validator")) .await .context("validator") }); - // API server needs at least 1 L1 batch to start. + + tracing::info!("deploy registry with 1 attester"); + let attesters: Vec<_> = setup.genesis.attesters.as_ref().unwrap().iter().collect(); + let registry = Registry::new(setup.genesis.clone(), validator_pool.clone()).await; + let (registry_addr, tx) = registry.deploy(account); + cfgs[0] + .config + .genesis_spec + .as_mut() + .unwrap() + .registry_address = Some(*registry_addr); + let mut txs = vec![tx]; + txs.push(testonly::make_tx( + account, + registry_addr, + registry.initialize(account.address), + )); + txs.push(testonly::make_tx( + account, + registry_addr, + registry + .add( + rng.gen(), + testonly::gen_validator(rng), + attesters[0].clone(), + ) + .unwrap(), + )); + txs.push(testonly::make_tx( + account, + registry_addr, + registry.commit_attester_committee(), + )); + validator.push_block(&txs).await; validator.seal_batch().await; + + tracing::info!("wait for the batch to be processed before starting consensus"); validator_pool .wait_for_payload(ctx, validator.last_block()) .await?; @@ -137,13 +195,13 @@ async fn test_multiple_attesters(from_snapshot: bool, version: ProtocolVersionId let mut node_pools = vec![]; for (i, cfg) in cfgs[1..].iter().enumerate() { let i = ctx::NoCopy(i); - let pool = ConnectionPool::test(from_snapshot, version).await; - let (node, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; + let pool = ConnectionPool::test(false, version).await; + let (node, runner) = StateKeeper::new(ctx, pool.clone()).await?; node_pools.push(pool.clone()); s.spawn_bg(async { let i = i; runner - .run(ctx) + .run_real(ctx, to_fund) .instrument(tracing::info_span!("node", i = *i)) .await .with_context(|| format!("node{}", *i)) @@ -151,13 +209,31 @@ async fn test_multiple_attesters(from_snapshot: bool, version: ProtocolVersionId s.spawn_bg(node.run_consensus(ctx, validator.connect(ctx).await?, cfg.clone())); } - tracing::info!("Create some batches"); - validator.push_random_blocks(rng, 20).await; - validator.seal_batch().await; + tracing::info!("add attesters one by one"); + #[allow(clippy::needless_range_loop)] + for i in 1..attesters.len() { + let txs = vec![ + testonly::make_tx( + account, + registry_addr, + registry + .add( + rng.gen(), + testonly::gen_validator(rng), + attesters[i].clone(), + ) + .unwrap(), + ), + testonly::make_tx(account, registry_addr, registry.commit_attester_committee()), + ]; + validator.push_block(&txs).await; + validator.seal_batch().await; + } + tracing::info!("Wait for the batches to be attested"); let want_last = attester::BatchNumber(validator.last_sealed_batch().0.into()); validator_pool - .wait_for_batch_certificates_and_verify(ctx, want_last) + .wait_for_batch_certificates_and_verify(ctx, want_last, Some(registry_addr)) .await?; Ok(()) }) diff --git a/core/node/consensus/src/tests/batch.rs b/core/node/consensus/src/tests/batch.rs index 41d73fdb87c6..f0cae7f2c02e 100644 --- a/core/node/consensus/src/tests/batch.rs +++ b/core/node/consensus/src/tests/batch.rs @@ -1,6 +1,7 @@ use test_casing::{test_casing, Product}; use zksync_concurrency::{ctx, scope}; use zksync_consensus_roles::validator; +use zksync_test_account::Account; use zksync_types::{L1BatchNumber, ProtocolVersionId}; use super::{FROM_SNAPSHOT, VERSIONS}; @@ -13,6 +14,7 @@ async fn test_connection_get_batch(from_snapshot: bool, version: ProtocolVersion let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); let pool = ConnectionPool::test(from_snapshot, version).await; + let account = &mut Account::random(); // Fill storage with unsigned L2 blocks and L1 batches in a way that the // last L1 batch is guaranteed to have some L2 blocks executed in it. @@ -23,11 +25,11 @@ async fn test_connection_get_batch(from_snapshot: bool, version: ProtocolVersion for _ in 0..3 { for _ in 0..2 { - sk.push_random_block(rng).await; + sk.push_random_block(rng, account).await; } sk.seal_batch().await; } - sk.push_random_block(rng).await; + sk.push_random_block(rng, account).await; pool.wait_for_payload(ctx, sk.last_block()).await?; @@ -84,11 +86,13 @@ async fn test_batch_witness(version: ProtocolVersionId) { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); + let account = &mut Account::random(); + let to_fund = &[account.address]; scope::run!(ctx, |ctx, s| async { let pool = ConnectionPool::from_genesis(version).await; let (mut node, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; - s.spawn_bg(runner.run_real(ctx)); + s.spawn_bg(runner.run_real(ctx, to_fund)); tracing::info!("analyzing storage"); { @@ -101,7 +105,7 @@ async fn test_batch_witness(version: ProtocolVersionId) { } // Seal a bunch of batches. - node.push_random_blocks(rng, 10).await; + node.push_random_blocks(rng, account, 10).await; node.seal_batch().await; pool.wait_for_batch(ctx, node.last_sealed_batch()).await?; // We can verify only 2nd batch onward, because diff --git a/core/node/consensus/src/tests/mod.rs b/core/node/consensus/src/tests/mod.rs index 0b611d55f06a..91f01f865a2b 100644 --- a/core/node/consensus/src/tests/mod.rs +++ b/core/node/consensus/src/tests/mod.rs @@ -7,6 +7,8 @@ use zksync_consensus_roles::{ validator::testonly::{Setup, SetupSpec}, }; use zksync_consensus_storage::BlockStore; +use zksync_dal::consensus_dal; +use zksync_test_account::Account; use zksync_types::ProtocolVersionId; use crate::{ @@ -28,6 +30,7 @@ async fn test_validator_block_store(version: ProtocolVersionId) { let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); let pool = ConnectionPool::test(false, version).await; + let account = &mut Account::random(); // Fill storage with unsigned L2 blocks. // Fetch a suffix of blocks that we will generate (fake) certs for. @@ -35,15 +38,21 @@ async fn test_validator_block_store(version: ProtocolVersionId) { // Start state keeper. let (mut sk, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; s.spawn_bg(runner.run(ctx)); - sk.push_random_blocks(rng, 10).await; + sk.push_random_blocks(rng, account, 10).await; pool.wait_for_payload(ctx, sk.last_block()).await?; let mut setup = SetupSpec::new(rng, 3); setup.first_block = validator::BlockNumber(4); let mut setup = Setup::from(setup); let mut conn = pool.connection(ctx).await.wrap("connection()")?; - conn.try_update_genesis(ctx, &setup.genesis) - .await - .wrap("try_update_genesis()")?; + conn.try_update_global_config( + ctx, + &consensus_dal::GlobalConfig { + genesis: setup.genesis.clone(), + registry_address: None, + }, + ) + .await + .wrap("try_update_global_config()")?; for i in setup.genesis.first_block.0..sk.last_block().next().0 { let i = validator::BlockNumber(i); let payload = conn @@ -95,6 +104,7 @@ async fn test_validator(from_snapshot: bool, version: ProtocolVersionId) { let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); let cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); + let account = &mut Account::random(); scope::run!(ctx, |ctx, s| async { tracing::info!("Start state keeper."); @@ -103,7 +113,7 @@ async fn test_validator(from_snapshot: bool, version: ProtocolVersionId) { s.spawn_bg(runner.run(ctx)); tracing::info!("Populate storage with a bunch of blocks."); - sk.push_random_blocks(rng, 5).await; + sk.push_random_blocks(rng, account, 5).await; pool .wait_for_payload(ctx, sk.last_block()) .await @@ -118,7 +128,7 @@ async fn test_validator(from_snapshot: bool, version: ProtocolVersionId) { s.spawn_bg(run_main_node(ctx, cfg.config.clone(), cfg.secrets.clone(), pool.clone())); tracing::info!("Generate couple more blocks and wait for consensus to catch up."); - sk.push_random_blocks(rng, 3).await; + sk.push_random_blocks(rng, account, 3).await; pool .wait_for_block_certificate(ctx, sk.last_block()) .await @@ -126,7 +136,7 @@ async fn test_validator(from_snapshot: bool, version: ProtocolVersionId) { tracing::info!("Synchronously produce blocks one by one, and wait for consensus."); for _ in 0..2 { - sk.push_random_blocks(rng, 1).await; + sk.push_random_blocks(rng, account, 1).await; pool .wait_for_block_certificate(ctx, sk.last_block()) .await @@ -158,6 +168,7 @@ async fn test_nodes_from_various_snapshots(version: ProtocolVersionId) { let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); + let account = &mut Account::random(); scope::run!(ctx, |ctx, s| async { tracing::info!("spawn validator"); @@ -173,7 +184,7 @@ async fn test_nodes_from_various_snapshots(version: ProtocolVersionId) { )); tracing::info!("produce some batches"); - validator.push_random_blocks(rng, 5).await; + validator.push_random_blocks(rng, account, 5).await; validator.seal_batch().await; validator_pool .wait_for_block_certificate(ctx, validator.last_block()) @@ -191,7 +202,7 @@ async fn test_nodes_from_various_snapshots(version: ProtocolVersionId) { }); tracing::info!("produce more batches"); - validator.push_random_blocks(rng, 5).await; + validator.push_random_blocks(rng, account, 5).await; validator.seal_batch().await; node_pool .wait_for_block_certificate(ctx, validator.last_block()) @@ -209,7 +220,7 @@ async fn test_nodes_from_various_snapshots(version: ProtocolVersionId) { }); tracing::info!("produce more blocks and compare storages"); - validator.push_random_blocks(rng, 5).await; + validator.push_random_blocks(rng, account, 5).await; let want = validator_pool .wait_for_block_certificates_and_verify(ctx, validator.last_block()) .await?; @@ -243,6 +254,7 @@ async fn test_full_nodes(from_snapshot: bool, version: ProtocolVersionId) { let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); + let account = &mut Account::random(); // topology: // validator <-> node <-> node <-> ... @@ -264,7 +276,7 @@ async fn test_full_nodes(from_snapshot: bool, version: ProtocolVersionId) { .context("validator") }); tracing::info!("Generate a couple of blocks, before initializing consensus genesis."); - validator.push_random_blocks(rng, 5).await; + validator.push_random_blocks(rng, account, 5).await; // API server needs at least 1 L1 batch to start. validator.seal_batch().await; validator_pool @@ -299,7 +311,7 @@ async fn test_full_nodes(from_snapshot: bool, version: ProtocolVersionId) { tracing::info!("Make validator produce blocks and wait for fetchers to get them."); // Note that block from before and after genesis have to be fetched. - validator.push_random_blocks(rng, 5).await; + validator.push_random_blocks(rng, account, 5).await; let want_last = validator.last_block(); let want = validator_pool .wait_for_block_certificates_and_verify(ctx, want_last) @@ -328,6 +340,7 @@ async fn test_en_validators(from_snapshot: bool, version: ProtocolVersionId) { let rng = &mut ctx.rng(); let setup = Setup::new(rng, NODES); let cfgs = testonly::new_configs(rng, &setup, 1); + let account = &mut Account::random(); // Run all nodes in parallel. scope::run!(ctx, |ctx, s| async { @@ -342,7 +355,7 @@ async fn test_en_validators(from_snapshot: bool, version: ProtocolVersionId) { .context("main_node") }); tracing::info!("Generate a couple of blocks, before initializing consensus genesis."); - main_node.push_random_blocks(rng, 5).await; + main_node.push_random_blocks(rng, account, 5).await; // API server needs at least 1 L1 batch to start. main_node.seal_batch().await; main_node_pool @@ -381,7 +394,7 @@ async fn test_en_validators(from_snapshot: bool, version: ProtocolVersionId) { } tracing::info!("Make the main node produce blocks and wait for consensus to finalize them"); - main_node.push_random_blocks(rng, 5).await; + main_node.push_random_blocks(rng, account, 5).await; let want_last = main_node.last_block(); let want = main_node_pool .wait_for_block_certificates_and_verify(ctx, want_last) @@ -409,6 +422,7 @@ async fn test_p2p_fetcher_backfill_certs(from_snapshot: bool, version: ProtocolV let setup = Setup::new(rng, 1); let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); let node_cfg = validator_cfg.new_fullnode(rng); + let account = &mut Account::random(); scope::run!(ctx, |ctx, s| async { tracing::info!("Spawn validator."); @@ -433,7 +447,7 @@ async fn test_p2p_fetcher_backfill_certs(from_snapshot: bool, version: ProtocolV let (node, runner) = testonly::StateKeeper::new(ctx, node_pool.clone()).await?; s.spawn_bg(runner.run(ctx)); s.spawn_bg(node.run_consensus(ctx, client.clone(), node_cfg.clone())); - validator.push_random_blocks(rng, 3).await; + validator.push_random_blocks(rng, account, 3).await; node_pool .wait_for_block_certificate(ctx, validator.last_block()) .await?; @@ -447,7 +461,7 @@ async fn test_p2p_fetcher_backfill_certs(from_snapshot: bool, version: ProtocolV let (node, runner) = testonly::StateKeeper::new(ctx, node_pool.clone()).await?; s.spawn_bg(runner.run(ctx)); s.spawn_bg(node.run_fetcher(ctx, client.clone())); - validator.push_random_blocks(rng, 3).await; + validator.push_random_blocks(rng, account, 3).await; node_pool .wait_for_payload(ctx, validator.last_block()) .await?; @@ -461,7 +475,7 @@ async fn test_p2p_fetcher_backfill_certs(from_snapshot: bool, version: ProtocolV let (node, runner) = testonly::StateKeeper::new(ctx, node_pool.clone()).await?; s.spawn_bg(runner.run(ctx)); s.spawn_bg(node.run_consensus(ctx, client.clone(), node_cfg)); - validator.push_random_blocks(rng, 3).await; + validator.push_random_blocks(rng, account, 3).await; let want = validator_pool .wait_for_block_certificates_and_verify(ctx, validator.last_block()) .await?; @@ -488,6 +502,7 @@ async fn test_with_pruning(version: ProtocolVersionId) { let setup = Setup::new(rng, 1); let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); let node_cfg = validator_cfg.new_fullnode(rng); + let account = &mut Account::random(); scope::run!(ctx, |ctx, s| async { let validator_pool = ConnectionPool::test(false, version).await; @@ -535,7 +550,7 @@ async fn test_with_pruning(version: ProtocolVersionId) { }); tracing::info!("Sync some blocks"); - validator.push_random_blocks(rng, 5).await; + validator.push_random_blocks(rng, account, 5).await; validator.seal_batch().await; let to_prune = validator.last_sealed_batch(); tracing::info!( @@ -546,7 +561,7 @@ async fn test_with_pruning(version: ProtocolVersionId) { tracing::info!( "Seal another batch to make sure that there is at least 1 sealed batch after pruning." ); - validator.push_random_blocks(rng, 5).await; + validator.push_random_blocks(rng, account, 5).await; validator.seal_batch().await; validator_pool .wait_for_batch(ctx, validator.last_sealed_batch()) @@ -565,7 +580,7 @@ async fn test_with_pruning(version: ProtocolVersionId) { .prune_batches(ctx, to_prune) .await .context("prune_batches")?; - validator.push_random_blocks(rng, 5).await; + validator.push_random_blocks(rng, account, 5).await; node_pool .wait_for_block_certificates(ctx, validator.last_block()) .await @@ -582,6 +597,7 @@ async fn test_centralized_fetcher(from_snapshot: bool, version: ProtocolVersionI zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); + let account = &mut Account::random(); scope::run!(ctx, |ctx, s| async { tracing::info!("Spawn a validator."); @@ -601,7 +617,7 @@ async fn test_centralized_fetcher(from_snapshot: bool, version: ProtocolVersionI s.spawn_bg(node.run_fetcher(ctx, validator.connect(ctx).await?)); tracing::info!("Produce some blocks and wait for node to fetch them"); - validator.push_random_blocks(rng, 10).await; + validator.push_random_blocks(rng, account, 10).await; let want = validator_pool .wait_for_payload(ctx, validator.last_block()) .await?; diff --git a/core/node/consensus/src/vm.rs b/core/node/consensus/src/vm.rs new file mode 100644 index 000000000000..f7f14ad8fe0a --- /dev/null +++ b/core/node/consensus/src/vm.rs @@ -0,0 +1,96 @@ +use anyhow::Context as _; +use zksync_concurrency::{ctx, error::Wrap as _, scope}; +use zksync_consensus_roles::attester; +use zksync_multivm::interface::TxExecutionMode; +use zksync_node_api_server::{ + execution_sandbox::{TransactionExecutor, TxExecutionArgs, TxSetupArgs, VmConcurrencyLimiter}, + tx_sender::MultiVMBaseSystemContracts, +}; +use zksync_state::PostgresStorageCaches; +use zksync_system_constants::DEFAULT_L2_TX_GAS_PER_PUBDATA_BYTE; +use zksync_types::{ + ethabi, fee::Fee, fee_model::BatchFeeInput, l2::L2Tx, AccountTreeId, L2ChainId, Nonce, U256, +}; +use zksync_vm_interface::ExecutionResult; + +use crate::{abi, storage::ConnectionPool}; + +/// VM executes eth_calls on the db. +#[derive(Debug)] +pub(crate) struct VM { + pool: ConnectionPool, + setup_args: TxSetupArgs, + limiter: VmConcurrencyLimiter, +} + +impl VM { + /// Constructs a new `VM` instance. + pub async fn new(pool: ConnectionPool) -> Self { + Self { + pool, + setup_args: TxSetupArgs { + execution_mode: TxExecutionMode::EthCall, + operator_account: AccountTreeId::default(), + fee_input: BatchFeeInput::sensible_l1_pegged_default(), + base_system_contracts: scope::wait_blocking( + MultiVMBaseSystemContracts::load_eth_call_blocking, + ) + .await, + caches: PostgresStorageCaches::new(1, 1), + validation_computational_gas_limit: u32::MAX, + chain_id: L2ChainId::default(), + whitelisted_tokens_for_aa: vec![], + enforced_base_fee: None, + }, + limiter: VmConcurrencyLimiter::new(1).0, + } + } + + pub async fn call( + &self, + ctx: &ctx::Ctx, + batch: attester::BatchNumber, + address: abi::Address, + call: abi::Call, + ) -> ctx::Result { + let tx = L2Tx::new( + *address, + call.calldata().context("call.calldata()")?, + Nonce(0), + Fee { + gas_limit: U256::from(2000000000u32), + max_fee_per_gas: U256::zero(), + max_priority_fee_per_gas: U256::zero(), + gas_per_pubdata_limit: U256::from(DEFAULT_L2_TX_GAS_PER_PUBDATA_BYTE), + }, + ethabi::Address::zero(), + U256::zero(), + vec![], + Default::default(), + ); + let permit = ctx.wait(self.limiter.acquire()).await?.unwrap(); + let mut conn = self.pool.connection(ctx).await.wrap("connection()")?; + let args = conn + .vm_block_args(ctx, batch) + .await + .wrap("vm_block_args()")?; + let output = ctx + .wait(TransactionExecutor::real(usize::MAX).execute_tx_in_sandbox( + permit, + self.setup_args.clone(), + TxExecutionArgs::for_eth_call(tx.clone()), + conn.0, + args, + None, + vec![], + )) + .await? + .context("execute_tx_in_sandbox()")?; + match output.vm.result { + ExecutionResult::Success { output } => { + Ok(call.decode_outputs(&output).context("decode_output()")?) + } + other => Err(anyhow::format_err!("unsuccessful execution: {other:?}").into()), + } + } +} diff --git a/core/node/node_framework/Cargo.toml b/core/node/node_framework/Cargo.toml index fe4889225675..d5b19a1d4b01 100644 --- a/core/node/node_framework/Cargo.toml +++ b/core/node/node_framework/Cargo.toml @@ -64,6 +64,7 @@ futures.workspace = true anyhow.workspace = true tokio = { workspace = true, features = ["rt"] } ctrlc.workspace = true +semver.workspace = true [dev-dependencies] zksync_env_config.workspace = true diff --git a/core/node/node_framework/src/implementations/layers/consensus/external_node.rs b/core/node/node_framework/src/implementations/layers/consensus/external_node.rs index 14365384c1a4..5acdab568e74 100644 --- a/core/node/node_framework/src/implementations/layers/consensus/external_node.rs +++ b/core/node/node_framework/src/implementations/layers/consensus/external_node.rs @@ -23,6 +23,7 @@ use crate::{ /// Wiring layer for external node consensus component. #[derive(Debug)] pub struct ExternalNodeConsensusLayer { + pub build_version: semver::Version, pub config: Option, pub secrets: Option, } @@ -78,6 +79,7 @@ impl WiringLayer for ExternalNodeConsensusLayer { }; let consensus_task = ExternalNodeTask { + build_version: self.build_version, config, pool, main_node_client, @@ -90,6 +92,7 @@ impl WiringLayer for ExternalNodeConsensusLayer { #[derive(Debug)] pub struct ExternalNodeTask { + build_version: semver::Version, config: Option<(ConsensusConfig, ConsensusSecrets)>, pool: ConnectionPool, main_node_client: Box>, @@ -118,6 +121,7 @@ impl Task for ExternalNodeTask { self.sync_state, self.main_node_client, self.action_queue_sender, + self.build_version, )); // `run_external_node` might return an error or panic, // in which case we need to return immediately, diff --git a/core/node/node_sync/src/client.rs b/core/node/node_sync/src/client.rs index d064803eab59..ee89db10ddd1 100644 --- a/core/node/node_sync/src/client.rs +++ b/core/node/node_sync/src/client.rs @@ -42,12 +42,7 @@ pub trait MainNodeClient: 'static + Send + Sync + fmt::Debug { with_transactions: bool, ) -> EnrichedClientResult>; - async fn fetch_consensus_genesis(&self) -> EnrichedClientResult>; - async fn fetch_genesis_config(&self) -> EnrichedClientResult; - - async fn fetch_attestation_status(&self) - -> EnrichedClientResult>; } #[async_trait] @@ -133,20 +128,6 @@ impl MainNodeClient for Box> { .with_arg("with_transactions", &with_transactions) .await } - - async fn fetch_consensus_genesis(&self) -> EnrichedClientResult> { - self.consensus_genesis() - .rpc_context("consensus_genesis") - .await - } - - async fn fetch_attestation_status( - &self, - ) -> EnrichedClientResult> { - self.attestation_status() - .rpc_context("attestation_status") - .await - } } /// Main node health check. diff --git a/core/node/node_sync/src/testonly.rs b/core/node/node_sync/src/testonly.rs index b9e1adc995af..16027a71a251 100644 --- a/core/node/node_sync/src/testonly.rs +++ b/core/node/node_sync/src/testonly.rs @@ -71,18 +71,6 @@ impl MainNodeClient for MockMainNodeClient { Ok(Some(block)) } - async fn fetch_consensus_genesis( - &self, - ) -> EnrichedClientResult> { - unimplemented!() - } - - async fn fetch_attestation_status( - &self, - ) -> EnrichedClientResult> { - unimplemented!() - } - async fn fetch_genesis_config(&self) -> EnrichedClientResult { Ok(mock_genesis_config()) } diff --git a/core/node/state_keeper/src/testonly/mod.rs b/core/node/state_keeper/src/testonly/mod.rs index 0ce8c06be0e7..23aec8af49fb 100644 --- a/core/node/state_keeper/src/testonly/mod.rs +++ b/core/node/state_keeper/src/testonly/mod.rs @@ -117,7 +117,7 @@ pub async fn fund(pool: &ConnectionPool, addresses: &[Address]) { pub(crate) const DEFAULT_GAS_PER_PUBDATA: u32 = 10000; -pub(crate) fn fee(gas_limit: u32) -> Fee { +pub fn fee(gas_limit: u32) -> Fee { Fee { gas_limit: U256::from(gas_limit), max_fee_per_gas: SYSTEM_CONTEXT_MINIMAL_BASE_FEE.into(), diff --git a/prover/Cargo.lock b/prover/Cargo.lock index 24e8638876bf..c3cfada3a1a9 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -7620,9 +7620,9 @@ dependencies = [ [[package]] name = "zksync_concurrency" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0e31a9fc9a390b440cd12bbe040330dc64f64697a8a8ecbc3beb98cd0747909" +checksum = "a49ad68bfaf6fb8542c68894b68b28be31514786549855aaa8a46b36defbb100" dependencies = [ "anyhow", "once_cell", @@ -7656,9 +7656,9 @@ dependencies = [ [[package]] name = "zksync_consensus_crypto" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efb7ff3ec44b7b92fd4e28d9d92b83d61dc74125ccfc90bcfb27a5750d8a8580" +checksum = "ace39bdf50b8421c4d546381fe1ecc5212f953ce61cf93d4fa69172078dbe4af" dependencies = [ "anyhow", "blst", @@ -7680,9 +7680,9 @@ dependencies = [ [[package]] name = "zksync_consensus_roles" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72223c0b20621775db51bcc4b043addafeaf784d444af2ad4bc8bcdee477367c" +checksum = "06277266e31efdc1465f6a27ce96c7435392a270978a91956b8a848732df2cfa" dependencies = [ "anyhow", "bit-vec", @@ -7702,9 +7702,9 @@ dependencies = [ [[package]] name = "zksync_consensus_storage" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41d1750ad93f7e3a0c2f5880f9bcc1244a3b46d3e6c124c4f65f545032b87464" +checksum = "9099b2295f550176d824b5287f2f31b7739c4d10247faec1132f1c6e9d18059c" dependencies = [ "anyhow", "async-trait", @@ -7722,9 +7722,9 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ff679f8b5f671d887a750b8107f3b5c01fd6085f68eef37ab01de8d2bd0736b" +checksum = "4d624f55e2449f43b2c85588b5dd2a28b3c5ea629effc89df76e3254f8d9d2fb" dependencies = [ "anyhow", "rand 0.8.5", @@ -8034,9 +8034,9 @@ dependencies = [ [[package]] name = "zksync_protobuf" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4f6ba3bf0aac20de18b4ae18a22d8c81b83f8f72e8fdec1c879525ecdacd2f5" +checksum = "d26fb2beb3aeafb5e9babf1acf6494662cc7157b893fa248bd151494f931d07f" dependencies = [ "anyhow", "bit-vec", @@ -8055,9 +8055,9 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7798c248b9a64505f0586bd5fadad6b26c999be4a8dec6b1a86b10b3888169c5" +checksum = "58e86c198e056d921b4f3f1d2755c23d090e942b5a70b03bcb7e7c02445aa491" dependencies = [ "anyhow", "heck 0.5.0", diff --git a/zk_toolbox/Cargo.lock b/zk_toolbox/Cargo.lock index cd5d6a0b280e..75859021979f 100644 --- a/zk_toolbox/Cargo.lock +++ b/zk_toolbox/Cargo.lock @@ -6349,9 +6349,9 @@ dependencies = [ [[package]] name = "zksync_concurrency" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0e31a9fc9a390b440cd12bbe040330dc64f64697a8a8ecbc3beb98cd0747909" +checksum = "a49ad68bfaf6fb8542c68894b68b28be31514786549855aaa8a46b36defbb100" dependencies = [ "anyhow", "once_cell", @@ -6383,9 +6383,9 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ff679f8b5f671d887a750b8107f3b5c01fd6085f68eef37ab01de8d2bd0736b" +checksum = "4d624f55e2449f43b2c85588b5dd2a28b3c5ea629effc89df76e3254f8d9d2fb" dependencies = [ "anyhow", "rand", @@ -6434,9 +6434,9 @@ dependencies = [ [[package]] name = "zksync_protobuf" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4f6ba3bf0aac20de18b4ae18a22d8c81b83f8f72e8fdec1c879525ecdacd2f5" +checksum = "d26fb2beb3aeafb5e9babf1acf6494662cc7157b893fa248bd151494f931d07f" dependencies = [ "anyhow", "bit-vec", @@ -6455,9 +6455,9 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" -version = "0.1.0-rc.11" +version = "0.1.0-rc.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7798c248b9a64505f0586bd5fadad6b26c999be4a8dec6b1a86b10b3888169c5" +checksum = "58e86c198e056d921b4f3f1d2755c23d090e942b5a70b03bcb7e7c02445aa491" dependencies = [ "anyhow", "heck", diff --git a/zk_toolbox/Cargo.toml b/zk_toolbox/Cargo.toml index 4a08776558ed..e1ad63136af1 100644 --- a/zk_toolbox/Cargo.toml +++ b/zk_toolbox/Cargo.toml @@ -30,7 +30,7 @@ types = { path = "crates/types" } zksync_config = { path = "../core/lib/config" } zksync_protobuf_config = { path = "../core/lib/protobuf_config" } zksync_basic_types = { path = "../core/lib/basic_types" } -zksync_protobuf = "=0.1.0-rc.11" +zksync_protobuf = "=0.1.0-rc.12" # External dependencies anyhow = "1.0.82" From 4cff529ebcc5032594869ac165a6a4d6d779affd Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Tue, 10 Sep 2024 14:35:55 +0300 Subject: [PATCH 045/116] test: Improve revert integration test (#2822) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ - Removes a data race from the revert integration test (a single node one); a batch gets executed after it is verified that not all batches are executed. - Removes copy-paste between single-node and EN tests. - Structures revert tests into smaller steps. ## Why ❔ Makes the test less flaky and easier to maintain. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- .github/workflows/ci-core-reusable.yml | 16 +- .../tests/revert-and-restart-en.test.ts | 627 +++++------------- .../tests/revert-and-restart.test.ts | 353 +++------- core/tests/revert-test/tests/utils.ts | 394 +++++++++++ etc/utils/src/file-configs.ts | 15 +- etc/utils/src/logs.ts | 2 +- 6 files changed, 663 insertions(+), 744 deletions(-) diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index 53bd1ab7a518..18cbc2c2afa3 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -293,7 +293,7 @@ jobs: - name: Show revert.log logs if: always() - run: ci_run cat core/tests/revert-test/revert.log || true + run: ci_run cat logs/revert/default/server.log || true - name: Show upgrade.log logs if: always() @@ -382,7 +382,11 @@ jobs: - name: Run revert test run: | - ENABLE_CONSENSUS=${{ matrix.consensus }} DEPLOYMENT_MODE=${{ matrix.deployment_mode }} PASSED_ENV_VARS="ENABLE_CONSENSUS,DEPLOYMENT_MODE" ci_run zk test i revert-en + ENABLE_CONSENSUS=${{ matrix.consensus }} \ + DEPLOYMENT_MODE=${{ matrix.deployment_mode }} \ + PASSED_ENV_VARS="ENABLE_CONSENSUS,DEPLOYMENT_MODE" \ + ci_run zk test i revert-en + # test terminates the nodes, so we restart them. if [[ "${{ matrix.deployment_mode }}" == "Rollup" ]]; then ZKSYNC_ENV=docker ci_run zk server --components=$SERVER_COMPONENTS &>>server.log & @@ -414,13 +418,13 @@ jobs: if: always() run: ci_run cat ext-node.log || true - - name: Show revert_main.log logs + - name: Show revert logs (main node) if: always() - run: ci_run cat core/tests/revert-test/revert_main.log || true + run: ci_run cat logs/revert/en/default/server.log || true - - name: Show revert_ext.log logs + - name: Show revert logs (EN) if: always() - run: ci_run cat core/tests/revert-test/revert_ext.log || true + run: ci_run cat logs/revert/en/default/external_node.log || true - name: Show upgrade.log logs if: always() diff --git a/core/tests/revert-test/tests/revert-and-restart-en.test.ts b/core/tests/revert-test/tests/revert-and-restart-en.test.ts index e1694418db14..42fa01a02c90 100644 --- a/core/tests/revert-test/tests/revert-and-restart-en.test.ts +++ b/core/tests/revert-test/tests/revert-and-restart-en.test.ts @@ -4,79 +4,34 @@ // main_contract.getTotalBatchesCommitted actually checks the number of batches committed. // main_contract.getTotalBatchesExecuted actually checks the number of batches executed. import * as utils from 'utils'; -import { Tester } from './tester'; -import { exec, runServerInBackground, runExternalNodeInBackground } from './utils'; +import { + checkRandomTransfer, + executeDepositAfterRevert, + executeRevert, + Node, + NodeSpawner, + NodeType, + waitToCommitBatchesWithoutExecution, + waitToExecuteBatch +} from './utils'; import * as zksync from 'zksync-ethers'; import * as ethers from 'ethers'; -import { expect, assert } from 'chai'; -import fs from 'fs'; +import { assert, expect } from 'chai'; +import fs from 'node:fs/promises'; import * as child_process from 'child_process'; import * as dotenv from 'dotenv'; -import { - getAllConfigsPath, - loadConfig, - shouldLoadConfigFromFile, - replaceAggregatedBlockExecuteDeadline -} from 'utils/build/file-configs'; +import { loadConfig, replaceAggregatedBlockExecuteDeadline, shouldLoadConfigFromFile } from 'utils/build/file-configs'; import path from 'path'; -import { ChildProcessWithoutNullStreams } from 'child_process'; import { logsTestPath } from 'utils/build/logs'; -import { killPidWithAllChilds } from 'utils/build/kill'; +import { IZkSyncHyperchain } from 'zksync-ethers/build/typechain'; const pathToHome = path.join(__dirname, '../../../..'); const fileConfig = shouldLoadConfigFromFile(); -let mainEnv: string; -let extEnv: string; - -let deploymentMode: string; - -if (fileConfig.loadFromFile) { - const genesisConfig = loadConfig({ pathToHome, chain: fileConfig.chain, config: 'genesis.yaml' }); - deploymentMode = genesisConfig.deploymentMode; -} else { - if (!process.env.DEPLOYMENT_MODE) { - throw new Error('DEPLOYMENT_MODE is not set'); - } - if (!['Validium', 'Rollup'].includes(process.env.DEPLOYMENT_MODE)) { - throw new Error(`Unknown deployment mode: ${process.env.DEPLOYMENT_MODE}`); - } - deploymentMode = process.env.DEPLOYMENT_MODE; -} - -if (deploymentMode == 'Validium') { - mainEnv = process.env.IN_DOCKER ? 'dev_validium_docker' : 'dev_validium'; - extEnv = process.env.IN_DOCKER ? 'ext-node-validium-docker' : 'ext-node-validium'; -} else { - // Rollup deployment mode - mainEnv = process.env.IN_DOCKER ? 'docker' : 'dev'; - extEnv = process.env.IN_DOCKER ? 'ext-node-docker' : 'ext-node'; -} - async function logsPath(name: string): Promise { return await logsTestPath(fileConfig.chain, 'logs/revert/en', name); } -interface SuggestedValues { - lastExecutedL1BatchNumber: bigint; - nonce: number; - priorityFee: number; -} - -// Parses output of "print-suggested-values" command of the revert block tool. -function parseSuggestedValues(jsonString: string): SuggestedValues { - const json = JSON.parse(jsonString); - assert(json && typeof json === 'object'); - assert(Number.isInteger(json.last_executed_l1_batch_number)); - assert(Number.isInteger(json.nonce)); - assert(Number.isInteger(json.priority_fee)); - return { - lastExecutedL1BatchNumber: BigInt(json.last_executed_l1_batch_number), - nonce: json.nonce, - priorityFee: json.priority_fee - }; -} - function run(cmd: string, args: string[], options: child_process.SpawnOptions): child_process.SpawnSyncReturns { let res = child_process.spawnSync(cmd, args, options); expect(res.error).to.be.undefined; @@ -94,7 +49,7 @@ function compileBinaries() { // Fetches env vars for the given environment (like 'dev', 'ext-node'). // TODO: it would be better to import zk tool code directly. -function fetchEnv(zksyncEnv: string): any { +function fetchEnv(zksyncEnv: string): Record { let res = run('./bin/zk', ['f', 'env'], { cwd: process.env.ZKSYNC_HOME, env: { @@ -106,218 +61,62 @@ function fetchEnv(zksyncEnv: string): any { return { ...process.env, ...dotenv.parse(res.stdout) }; } -async function runBlockReverter(args: string[]): Promise { - let env = fetchEnv(mainEnv); - - let fileConfigFlags = ''; +/** Loads env profiles for the main and external nodes */ +function loadEnvs() { + let deploymentMode: string; if (fileConfig.loadFromFile) { - const configPaths = getAllConfigsPath({ pathToHome, chain: fileConfig.chain }); - fileConfigFlags = ` - --config-path=${configPaths['general.yaml']} - --contracts-config-path=${configPaths['contracts.yaml']} - --secrets-path=${configPaths['secrets.yaml']} - --wallets-path=${configPaths['wallets.yaml']} - --genesis-path=${configPaths['genesis.yaml']} - `; - } - - const cmd = `cd ${pathToHome} && RUST_LOG=off cargo run --bin block_reverter --release -- ${args.join( - ' ' - )} ${fileConfigFlags}`; - const executedProcess = await exec(cmd, { - cwd: env.ZKSYNC_HOME, - env: { - ...env, - PATH: process.env.PATH - } - }); - - return executedProcess.stdout; -} - -async function killServerAndWaitForShutdown(proc: MainNode | ExtNode) { - await proc.terminate(); - // Wait until it's really stopped. - let iter = 0; - while (iter < 30) { - try { - await proc.tester.syncWallet.provider.getBlockNumber(); - await utils.sleep(2); - iter += 1; - } catch (_) { - // When exception happens, we assume that server died. - return; + const genesisConfig = loadConfig({ pathToHome, chain: fileConfig.chain, config: 'genesis.yaml' }); + deploymentMode = genesisConfig.deploymentMode; + } else { + deploymentMode = process.env.DEPLOYMENT_MODE ?? 'Rollup'; + if (!['Validium', 'Rollup'].includes(deploymentMode)) { + throw new Error(`Unknown deployment mode: ${deploymentMode}`); } } - // It's going to panic anyway, since the server is a singleton entity, so better to exit early. - throw new Error("Server didn't stop after a kill request"); -} - -class MainNode { - constructor(public tester: Tester, public proc: ChildProcessWithoutNullStreams, public zkInception: boolean) {} - - public async terminate() { - try { - await killPidWithAllChilds(this.proc.pid!, 9); - } catch (err) { - console.log(`ignored error: ${err}`); - } - } - - // Terminates all main node processes running. - // - // WARNING: This is not safe to use when running nodes on multiple chains. - public static async terminateAll() { - try { - await utils.exec('killall -INT zksync_server'); - } catch (err) { - console.log(`ignored error: ${err}`); - } + console.log(`Using deployment mode: ${deploymentMode}`); + + let mainEnvName: string; + let extEnvName: string; + if (deploymentMode === 'Validium') { + mainEnvName = process.env.IN_DOCKER ? 'dev_validium_docker' : 'dev_validium'; + extEnvName = process.env.IN_DOCKER ? 'ext-node-validium-docker' : 'ext-node-validium'; + } else { + // Rollup deployment mode + mainEnvName = process.env.IN_DOCKER ? 'docker' : 'dev'; + extEnvName = process.env.IN_DOCKER ? 'ext-node-docker' : 'ext-node'; } - // Spawns a main node. - // if enableConsensus is set, consensus component will be started in the main node. - // if enableExecute is NOT set, main node will NOT send L1 transactions to execute L1 batches. - public static async spawn( - logs: fs.WriteStream, - enableConsensus: boolean, - enableExecute: boolean, - ethClientWeb3Url: string, - apiWeb3JsonRpcHttpUrl: string, - baseTokenAddress: string - ): Promise { - let env = fetchEnv(mainEnv); - env.ETH_SENDER_SENDER_AGGREGATED_BLOCK_EXECUTE_DEADLINE = enableExecute ? '1' : '10000'; - // Set full mode for the Merkle tree as it is required to get blocks committed. - env.DATABASE_MERKLE_TREE_MODE = 'full'; - - if (fileConfig.loadFromFile) { - replaceAggregatedBlockExecuteDeadline(pathToHome, fileConfig, enableExecute ? 1 : 10000); - } - - let components = 'api,tree,eth,state_keeper,commitment_generator,da_dispatcher,vm_runner_protective_reads'; - if (enableConsensus) { - components += ',consensus'; - } - if (baseTokenAddress != zksync.utils.LEGACY_ETH_ADDRESS) { - components += ',base_token_ratio_persister'; - } - let proc = runServerInBackground({ - components: [components], - stdio: ['ignore', logs, logs], - cwd: pathToHome, - env: env, - useZkInception: fileConfig.loadFromFile, - chain: fileConfig.chain - }); - - // Wait until the main node starts responding. - let tester: Tester = await Tester.init(ethClientWeb3Url, apiWeb3JsonRpcHttpUrl, baseTokenAddress); - while (true) { - try { - console.log(`Web3 ${apiWeb3JsonRpcHttpUrl}`); - await tester.syncWallet.provider.getBridgehubContractAddress(); - break; - } catch (err) { - if (proc.exitCode != null) { - assert.fail(`server failed to start, exitCode = ${proc.exitCode}`); - } - console.log('MainNode waiting for api endpoint'); - await utils.sleep(1); - } - } - return new MainNode(tester, proc, fileConfig.loadFromFile); - } -} - -class ExtNode { - constructor(public tester: Tester, private proc: child_process.ChildProcess, public zkInception: boolean) {} - - public async terminate() { - try { - await killPidWithAllChilds(this.proc.pid!, 9); - } catch (err) { - console.log(`ignored error: ${err}`); - } - } - - // Terminates all main node processes running. - // - // WARNING: This is not safe to use when running nodes on multiple chains. - public static async terminateAll() { - try { - await utils.exec('killall -INT zksync_external_node'); - } catch (err) { - console.log(`ignored error: ${err}`); - } - } - - // Spawns an external node. - // If enableConsensus is set, the node will use consensus P2P network to fetch blocks. - public static async spawn( - logs: fs.WriteStream, - enableConsensus: boolean, - ethClientWeb3Url: string, - enEthClientUrl: string, - baseTokenAddress: string - ): Promise { - let env = fetchEnv(extEnv); - let args = []; - if (enableConsensus) { - args.push('--enable-consensus'); - } - - // Run server in background. - let proc = runExternalNodeInBackground({ - stdio: ['ignore', logs, logs], - cwd: pathToHome, - env: env, - useZkInception: fileConfig.loadFromFile, - chain: fileConfig.chain - }); - - // Wait until the node starts responding. - let tester: Tester = await Tester.init(ethClientWeb3Url, enEthClientUrl, baseTokenAddress); - while (true) { - try { - await tester.syncWallet.provider.getBlockNumber(); - break; - } catch (err) { - if (proc.exitCode != null) { - assert.fail(`node failed to start, exitCode = ${proc.exitCode}`); - } - console.log('ExtNode waiting for api endpoint'); - await utils.sleep(1); - } - } - return new ExtNode(tester, proc, fileConfig.loadFromFile); - } - - // Waits for the node process to exit. - public async waitForExit(): Promise { - while (this.proc.exitCode === null) { - await utils.sleep(1); - } - return this.proc.exitCode; - } + console.log(`Fetching main node env: ${mainEnvName}`); + const mainEnv = fetchEnv(mainEnvName); + console.log(`Fetching EN env: ${extEnvName}`); + const extEnv = fetchEnv(extEnvName); + return [mainEnv, extEnv]; } describe('Block reverting test', function () { - let ethClientWeb3Url: string; - let apiWeb3JsonRpcHttpUrl: string; - let baseTokenAddress: string; - let enEthClientUrl: string; let operatorAddress: string; - let mainLogs: fs.WriteStream; - let extLogs: fs.WriteStream; let depositAmount: bigint; - let enableConsensus: boolean; - let mainNode: MainNode; - let extNode: ExtNode; + let mainNodeSpawner: NodeSpawner; + let mainEnv: Record; + let mainNode: Node; + let extNodeSpawner: NodeSpawner; + let extNode: Node; + let mainContract: IZkSyncHyperchain; + let alice: zksync.Wallet; + let depositL1BatchNumber: number; + let batchesCommittedBeforeRevert: bigint; const autoKill: boolean = !fileConfig.loadFromFile || !process.env.NO_KILL; before('initialize test', async () => { + let ethClientWeb3Url: string; + let apiWeb3JsonRpcHttpUrl: string; + let baseTokenAddress: string; + let enEthClientUrl: string; + + let extEnv; + [mainEnv, extEnv] = loadEnvs(); + if (fileConfig.loadFromFile) { const secretsConfig = loadConfig({ pathToHome, chain: fileConfig.chain, config: 'secrets.yaml' }); const generalConfig = loadConfig({ pathToHome, chain: fileConfig.chain, config: 'general.yaml' }); @@ -336,223 +135,143 @@ describe('Block reverting test', function () { enEthClientUrl = externalNodeGeneralConfig.api.web3_json_rpc.http_url; operatorAddress = walletsConfig.operator.address; } else { - let env = fetchEnv(mainEnv); - ethClientWeb3Url = env.ETH_CLIENT_WEB3_URL; - apiWeb3JsonRpcHttpUrl = env.API_WEB3_JSON_RPC_HTTP_URL; - baseTokenAddress = env.CONTRACTS_BASE_TOKEN_ADDR; - enEthClientUrl = `http://127.0.0.1:${env.EN_HTTP_PORT}`; + ethClientWeb3Url = mainEnv.ETH_CLIENT_WEB3_URL!; + apiWeb3JsonRpcHttpUrl = mainEnv.API_WEB3_JSON_RPC_HTTP_URL!; + baseTokenAddress = mainEnv.CONTRACTS_BASE_TOKEN_ADDR!; + enEthClientUrl = `http://127.0.0.1:${extEnv.EN_HTTP_PORT!}`; // TODO use env variable for this? operatorAddress = '0xde03a0B5963f75f1C8485B355fF6D30f3093BDE7'; } - mainLogs = fs.createWriteStream(await logsPath('server.log'), { flags: 'a' }); - extLogs = fs.createWriteStream(await logsPath('external_node.log'), { flags: 'a' }); + + const pathToMainLogs = await logsPath('server.log'); + const mainLogs = await fs.open(pathToMainLogs, 'a'); + console.log(`Writing main node logs to ${pathToMainLogs}`); + + const pathToEnLogs = await logsPath('external_node.log'); + const extLogs = await fs.open(pathToEnLogs, 'a'); + console.log(`Writing EN logs to ${pathToEnLogs}`); + if (process.env.SKIP_COMPILATION !== 'true' && !fileConfig.loadFromFile) { compileBinaries(); } - enableConsensus = process.env.ENABLE_CONSENSUS === 'true'; + const enableConsensus = process.env.ENABLE_CONSENSUS === 'true'; console.log(`enableConsensus = ${enableConsensus}`); depositAmount = ethers.parseEther('0.001'); - }); - - step('run', async () => { - if (autoKill) { - console.log('Make sure that nodes are not running'); - await ExtNode.terminateAll(); - await MainNode.terminateAll(); - } - console.log('Start main node'); - mainNode = await MainNode.spawn( - mainLogs, + const mainNodeSpawnOptions = { enableConsensus, - true, ethClientWeb3Url, apiWeb3JsonRpcHttpUrl, baseTokenAddress - ); - console.log('Start ext node'); - extNode = await ExtNode.spawn(extLogs, enableConsensus, ethClientWeb3Url, enEthClientUrl, baseTokenAddress); + }; + mainNodeSpawner = new NodeSpawner(pathToHome, mainLogs, fileConfig, mainNodeSpawnOptions, mainEnv); + const extNodeSpawnOptions = { + enableConsensus, + ethClientWeb3Url, + apiWeb3JsonRpcHttpUrl: enEthClientUrl, + baseTokenAddress + }; + extNodeSpawner = new NodeSpawner(pathToHome, extLogs, fileConfig, extNodeSpawnOptions, extEnv); + }); + step('Make sure that nodes are not running', async () => { + if (autoKill) { + await Node.killAll(NodeType.MAIN); + await Node.killAll(NodeType.EXT); + } + }); + + step('Start main node', async () => { + mainNode = await mainNodeSpawner.spawnMainNode(true); + }); + + step('Start external node', async () => { + extNode = await extNodeSpawner.spawnExtNode(); + }); + + step('Fund wallets', async () => { await mainNode.tester.fundSyncWallet(); + mainContract = await mainNode.tester.syncWallet.getMainContract(); await extNode.tester.fundSyncWallet(); + alice = extNode.tester.emptyWallet(); + }); - const main_contract = await mainNode.tester.syncWallet.getMainContract(); - const baseToken = await mainNode.tester.syncWallet.getBaseToken(); - const isETHBasedChain = baseToken === zksync.utils.ETH_ADDRESS_IN_CONTRACTS; - const alice: zksync.Wallet = extNode.tester.emptyWallet(); + step('Seal L1 batch', async () => { + depositL1BatchNumber = await extNode.createBatchWithDeposit(alice.address, depositAmount); + }); - console.log( - 'Finalize an L1 transaction to ensure at least 1 executed L1 batch and that all transactions are processed' - ); + step('wait for L1 batch to get executed', async () => { + await waitToExecuteBatch(mainContract, depositL1BatchNumber); + }); - for (let iter = 0; iter < 30; iter++) { - try { - const h: zksync.types.PriorityOpResponse = await extNode.tester.syncWallet.deposit({ - token: isETHBasedChain ? zksync.utils.LEGACY_ETH_ADDRESS : baseToken, - amount: depositAmount, - to: alice.address, - approveBaseERC20: true, - approveERC20: true - }); - await h.waitFinalize(); - break; - } catch (error: any) { - if (error.message == 'server shutting down') { - await utils.sleep(2); - continue; - } - } - } + step('Restart main node with batch execution turned off', async () => { + await mainNode.killAndWaitForShutdown(); + mainNode = await mainNodeSpawner.spawnMainNode(false); + }); - console.log('Restart the main node with L1 batch execution disabled.'); - await killServerAndWaitForShutdown(mainNode); - mainNode = await MainNode.spawn( - mainLogs, - enableConsensus, - false, - ethClientWeb3Url, - apiWeb3JsonRpcHttpUrl, - baseTokenAddress - ); + // FIXME: need 2 batches? + step('seal another L1 batch', async () => { + await extNode.createBatchWithDeposit(alice.address, depositAmount); + }); - console.log('Commit at least 2 L1 batches which are not executed'); - const lastExecuted = await main_contract.getTotalBatchesExecuted(); - // One is not enough to test the reversion of sk cache because - // it gets updated with some batch logs only at the start of the next batch. - const initialL1BatchNumber = await main_contract.getTotalBatchesCommitted(); - const firstDepositHandle = await extNode.tester.syncWallet.deposit({ - token: isETHBasedChain ? zksync.utils.LEGACY_ETH_ADDRESS : baseToken, - amount: depositAmount, - to: alice.address, - approveBaseERC20: true, - approveERC20: true - }); - - await firstDepositHandle.wait(); - while ((await extNode.tester.web3Provider.getL1BatchNumber()) <= initialL1BatchNumber) { - await utils.sleep(0.1); - } + step('check wallet balance', async () => { + const balance = await alice.getBalance(); + console.log(`Balance before revert: ${balance}`); + assert(balance === depositAmount * 2n, 'Incorrect balance after deposits'); + }); - const secondDepositHandle = await extNode.tester.syncWallet.deposit({ - token: isETHBasedChain ? zksync.utils.LEGACY_ETH_ADDRESS : baseToken, - amount: depositAmount, - to: alice.address, - approveBaseERC20: true, - approveERC20: true - }); - await secondDepositHandle.wait(); - while ((await extNode.tester.web3Provider.getL1BatchNumber()) <= initialL1BatchNumber + 1n) { - await utils.sleep(0.3); - } + step('wait for the new batch to be committed', async () => { + batchesCommittedBeforeRevert = await waitToCommitBatchesWithoutExecution(mainContract); + }); - const alice2 = await alice.getBalance(); - while (true) { - const lastCommitted = await main_contract.getTotalBatchesCommitted(); - console.log(`lastExecuted = ${lastExecuted}, lastCommitted = ${lastCommitted}`); - if (lastCommitted - lastExecuted >= 2n) { - console.log('Terminate the main node'); - await killServerAndWaitForShutdown(mainNode); - break; - } - await utils.sleep(0.3); - } + step('stop server', async () => { + await mainNode.killAndWaitForShutdown(); + }); - console.log('Ask block_reverter to suggest to which L1 batch we should revert'); - const values_json = await runBlockReverter([ - 'print-suggested-values', - '--json', - '--operator-address', - operatorAddress - ]); - console.log(`values = ${values_json}`); - const values = parseSuggestedValues(values_json); - assert(lastExecuted === values.lastExecutedL1BatchNumber); - - console.log('Send reverting transaction to L1'); - await runBlockReverter([ - 'send-eth-transaction', - '--l1-batch-number', - values.lastExecutedL1BatchNumber.toString(), - '--nonce', - values.nonce.toString(), - '--priority-fee-per-gas', - values.priorityFee.toString() - ]); - - console.log('Check that batches are reverted on L1'); - const lastCommitted2 = await main_contract.getTotalBatchesCommitted(); - console.log(`lastCommitted = ${lastCommitted2}, want ${lastExecuted}`); - assert(lastCommitted2 === lastExecuted); - - console.log('Rollback db'); - await runBlockReverter([ - 'rollback-db', - '--l1-batch-number', - values.lastExecutedL1BatchNumber.toString(), - '--rollback-postgres', - '--rollback-tree', - '--rollback-sk-cache', - '--rollback-vm-runners-cache' - ]); - - console.log('Start main node.'); - mainNode = await MainNode.spawn( - mainLogs, - enableConsensus, - true, - ethClientWeb3Url, - apiWeb3JsonRpcHttpUrl, - baseTokenAddress + step('revert batches', async () => { + await executeRevert( + pathToHome, + fileConfig.chain, + operatorAddress, + batchesCommittedBeforeRevert, + mainContract, + mainEnv ); + }); - console.log('Wait for the external node to detect reorg and terminate'); + step('restart server', async () => { + mainNode = await mainNodeSpawner.spawnMainNode(true); + }); + + step('Wait for EN to detect reorg and terminate', async () => { await extNode.waitForExit(); + }); - console.log('Restart external node and wait for it to revert.'); - extNode = await ExtNode.spawn(extLogs, enableConsensus, ethClientWeb3Url, enEthClientUrl, baseTokenAddress); - - console.log('Execute an L1 transaction'); - const depositHandle = await extNode.tester.syncWallet.deposit({ - token: isETHBasedChain ? zksync.utils.LEGACY_ETH_ADDRESS : baseToken, - amount: depositAmount, - to: alice.address, - approveBaseERC20: true, - approveERC20: true - }); - - let l1TxResponse = await alice._providerL1().getTransaction(depositHandle.hash); - while (!l1TxResponse) { - console.log(`Deposit ${depositHandle.hash} is not visible to the L1 network; sleeping`); - await utils.sleep(1); - l1TxResponse = await alice._providerL1().getTransaction(depositHandle.hash); - } + step('Restart EN', async () => { + extNode = await extNodeSpawner.spawnExtNode(); + }); - // TODO: it would be nice to know WHY it "doesn't work well with block reversions" and what it actually means. - console.log( - "ethers doesn't work well with block reversions, so wait for the receipt before calling `.waitFinalize()`." - ); - const l2Tx = await alice._providerL2().getL2TransactionFromPriorityOp(l1TxResponse); - let receipt = null; - while (true) { - receipt = await extNode.tester.syncWallet.provider.getTransactionReceipt(l2Tx.hash); - if (receipt != null) { - break; - } + step('wait until last deposit is re-executed', async () => { + let balanceBefore; + let tryCount = 0; + while ((balanceBefore = await alice.getBalance()) !== 2n * depositAmount && tryCount < 30) { + console.log(`Balance after revert: ${balanceBefore}`); + tryCount++; await utils.sleep(1); } - await depositHandle.waitFinalize(); - expect(receipt.status).to.be.eql(1); - - // The reverted transactions are expected to be reexecuted before the next transaction is applied. - // Hence we compare the state against the alice2, rather than against alice3. - const alice4want = alice2 + depositAmount; - const alice4 = await alice.getBalance(); - console.log(`Alice's balance is ${alice4}, want ${alice4want}`); - assert(alice4 === alice4want); - - console.log('Execute an L2 transaction'); - await checkedRandomTransfer(alice, 1n); + assert(balanceBefore === 2n * depositAmount, 'Incorrect balance after revert'); + }); + + step('execute transaction after revert', async () => { + await executeDepositAfterRevert(extNode.tester, alice, depositAmount); + const balanceAfter = await alice.getBalance(); + console.log(`Balance after another deposit: ${balanceAfter}`); + assert(balanceAfter === depositAmount * 3n, 'Incorrect balance after another deposit'); + }); + + step('check random transfer', async () => { + await checkRandomTransfer(alice, 1n); }); after('terminate nodes', async () => { @@ -564,25 +283,3 @@ describe('Block reverting test', function () { } }); }); - -// Transfers amount from sender to a random wallet in an L2 transaction. -async function checkedRandomTransfer(sender: zksync.Wallet, amount: bigint) { - const senderBalanceBefore = await sender.getBalance(); - const receiver = zksync.Wallet.createRandom().connect(sender.provider); - const transferHandle = await sender.sendTransaction({ to: receiver.address, value: amount, type: 0 }); - - // ethers doesn't work well with block reversions, so we poll for the receipt manually. - let txReceipt = null; - do { - txReceipt = await sender.provider.getTransactionReceipt(transferHandle.hash); - await utils.sleep(1); - } while (txReceipt === null); - - const senderBalance = await sender.getBalance(); - const receiverBalance = await receiver.provider!.getBalance(receiver.address); - - expect(receiverBalance === amount, 'Failed updated the balance of the receiver').to.be.true; - - const spentAmount = txReceipt.gasUsed * transferHandle.gasPrice! + amount; - expect(senderBalance + spentAmount >= senderBalanceBefore, 'Failed to update the balance of the sender').to.be.true; -} diff --git a/core/tests/revert-test/tests/revert-and-restart.test.ts b/core/tests/revert-test/tests/revert-and-restart.test.ts index a01788284d2a..163a7294b5f6 100644 --- a/core/tests/revert-test/tests/revert-and-restart.test.ts +++ b/core/tests/revert-test/tests/revert-and-restart.test.ts @@ -1,104 +1,52 @@ import * as utils from 'utils'; -import { loadConfig, shouldLoadConfigFromFile, getAllConfigsPath } from 'utils/build/file-configs'; -import { runServerInBackground } from './utils'; -import { Tester } from './tester'; +import { loadConfig, shouldLoadConfigFromFile } from 'utils/build/file-configs'; +import { + checkRandomTransfer, + executeDepositAfterRevert, + executeRevert, + Node, + NodeSpawner, + NodeType, + waitToCommitBatchesWithoutExecution, + waitToExecuteBatch +} from './utils'; import * as zksync from 'zksync-ethers'; import * as ethers from 'ethers'; -import { expect } from 'chai'; +import { assert } from 'chai'; import { IZkSyncHyperchain } from 'zksync-ethers/build/typechain'; import path from 'path'; -import { ChildProcessWithoutNullStreams } from 'child_process'; import fs from 'node:fs/promises'; import { logsTestPath } from 'utils/build/logs'; -import { killPidWithAllChilds } from 'utils/build/kill'; - -// Parses output of "print-suggested-values" command of the revert block tool. -function parseSuggestedValues(suggestedValuesString: string): { - lastL1BatchNumber: bigint; - nonce: bigint; - priorityFee: bigint; -} { - const json = JSON.parse(suggestedValuesString); - if (!json || typeof json !== 'object') { - throw new TypeError('suggested values are not an object'); - } - - const lastL1BatchNumber = json.last_executed_l1_batch_number; - if (!Number.isInteger(lastL1BatchNumber)) { - throw new TypeError('suggested `lastL1BatchNumber` is not an integer'); - } - const nonce = json.nonce; - if (!Number.isInteger(nonce)) { - throw new TypeError('suggested `nonce` is not an integer'); - } - const priorityFee = json.priority_fee; - if (!Number.isInteger(priorityFee)) { - throw new TypeError('suggested `priorityFee` is not an integer'); - } - - return { - lastL1BatchNumber: BigInt(lastL1BatchNumber), - nonce: BigInt(nonce), - priorityFee: BigInt(priorityFee) - }; -} - -async function killServerAndWaitForShutdown(tester: Tester, serverProcess?: ChildProcessWithoutNullStreams) { - if (!serverProcess) { - await utils.exec('killall -9 zksync_server').catch(ignoreError); - return; - } - await killPidWithAllChilds(serverProcess.pid!, 9).catch(ignoreError); - // Wait until it's really stopped. - let iter = 0; - while (iter < 30) { - try { - await tester.syncWallet.provider.getBlockNumber(); - await utils.sleep(2); - iter += 1; - } catch (_) { - // When exception happens, we assume that server died. - return; - } - } - // It's going to panic anyway, since the server is a singleton entity, so better to exit early. - throw new Error("Server didn't stop after a kill request"); -} function ignoreError(_err: any, context?: string) { const message = context ? `Error ignored (context: ${context}).` : 'Error ignored.'; console.info(message); } -const fileConfig = shouldLoadConfigFromFile(); -const depositAmount = ethers.parseEther('0.001'); - -async function logsPath(name: string): Promise { - return await logsTestPath(fileConfig.chain, 'logs/revert/', name); -} - describe('Block reverting test', function () { - let tester: Tester; let alice: zksync.Wallet; let mainContract: IZkSyncHyperchain; - let blocksCommittedBeforeRevert: bigint; - let logs: fs.FileHandle; + let depositL1BatchNumber: number; + let batchesCommittedBeforeRevert: bigint; + let mainLogs: fs.FileHandle; let operatorAddress: string; + let baseTokenAddress: string; let ethClientWeb3Url: string; let apiWeb3JsonRpcHttpUrl: string; - let serverProcess: ChildProcessWithoutNullStreams | undefined; + let mainNodeSpawner: NodeSpawner; + let mainNode: Node; + + const fileConfig = shouldLoadConfigFromFile(); const pathToHome = path.join(__dirname, '../../../..'); const autoKill: boolean = !fileConfig.loadFromFile || !process.env.NO_KILL; const enableConsensus = process.env.ENABLE_CONSENSUS == 'true'; - let components = 'api,tree,eth,state_keeper,commitment_generator,da_dispatcher,vm_runner_protective_reads'; - if (enableConsensus) { - components += ',consensus'; + const depositAmount = ethers.parseEther('0.001'); + + async function logsPath(name: string): Promise { + return await logsTestPath(fileConfig.chain, 'logs/revert/', name); } before('initialize test', async () => { - // Clone file configs if necessary - let baseTokenAddress: string; - if (!fileConfig.loadFromFile) { operatorAddress = process.env.ETH_SENDER_SENDER_OPERATOR_COMMIT_ETH_ADDR!; ethClientWeb3Url = process.env.ETH_CLIENT_WEB3_URL!; @@ -132,198 +80,107 @@ describe('Block reverting test', function () { baseTokenAddress = contractsConfig.l1.base_token_addr; } - // Create test wallets - tester = await Tester.init(ethClientWeb3Url, apiWeb3JsonRpcHttpUrl, baseTokenAddress); - alice = tester.emptyWallet(); + const pathToMainLogs = await logsPath('server.log'); + mainLogs = await fs.open(pathToMainLogs, 'a'); + console.log(`Writing server logs to ${pathToMainLogs}`); + + mainNodeSpawner = new NodeSpawner(pathToHome, mainLogs, fileConfig, { + enableConsensus, + ethClientWeb3Url, + apiWeb3JsonRpcHttpUrl, + baseTokenAddress + }); }); - step('run server and execute some transactions', async () => { + step('Make sure that the server is not running', async () => { if (autoKill) { // Make sure server isn't running. - await killServerAndWaitForShutdown(tester); - } - - // Run server in background. - logs = await fs.open(await logsPath('server.log'), 'a'); - serverProcess = runServerInBackground({ - components: [components], - stdio: ['ignore', logs, logs], - cwd: pathToHome, - useZkInception: fileConfig.loadFromFile, - chain: fileConfig.chain - }); - - // Server may need some time to recompile if it's a cold run, so wait for it. - let iter = 0; - while (iter < 30 && !mainContract) { - try { - mainContract = await tester.syncWallet.getMainContract(); - } catch (err) { - ignoreError(err, 'waiting for server HTTP JSON-RPC to start'); - await utils.sleep(2); - iter += 1; - } - } - if (!mainContract) { - throw new Error('Server did not start'); - } - - await tester.fundSyncWallet(); - - // Seal 2 L1 batches. - // One is not enough to test the reversion of sk cache because - // it gets updated with some batch logs only at the start of the next batch. - const initialL1BatchNumber = await tester.web3Provider.getL1BatchNumber(); - const firstDepositHandle = await tester.syncWallet.deposit({ - token: tester.isETHBasedChain ? zksync.utils.LEGACY_ETH_ADDRESS : tester.baseTokenAddress, - amount: depositAmount, - to: alice.address, - approveBaseERC20: true, - approveERC20: true - }); - await firstDepositHandle.wait(); - while ((await tester.web3Provider.getL1BatchNumber()) <= initialL1BatchNumber) { - await utils.sleep(1); + await Node.killAll(NodeType.MAIN); } - const secondDepositHandle = await tester.syncWallet.deposit({ - token: tester.isETHBasedChain ? zksync.utils.LEGACY_ETH_ADDRESS : tester.baseTokenAddress, - amount: depositAmount, - to: alice.address, - approveBaseERC20: true, - approveERC20: true - }); - await secondDepositHandle.wait(); - while ((await tester.web3Provider.getL1BatchNumber()) <= initialL1BatchNumber + 1) { - await utils.sleep(1); - } - - const balance = await alice.getBalance(); - expect(balance === depositAmount * 2n, 'Incorrect balance after deposits').to.be.true; + }); - // Check L1 committed and executed blocks. - let blocksCommitted = await mainContract.getTotalBatchesCommitted(); - let blocksExecuted = await mainContract.getTotalBatchesExecuted(); - let tryCount = 0; - while (blocksCommitted === blocksExecuted && tryCount < 100) { - blocksCommitted = await mainContract.getTotalBatchesCommitted(); - blocksExecuted = await mainContract.getTotalBatchesExecuted(); - tryCount += 1; - await utils.sleep(1); - } - expect(blocksCommitted > blocksExecuted, 'There is no committed but not executed block').to.be.true; - blocksCommittedBeforeRevert = blocksCommitted; + step('start server', async () => { + mainNode = await mainNodeSpawner.spawnMainNode(true); + }); - // Stop server. - await killServerAndWaitForShutdown(tester, serverProcess!); + step('fund wallet', async () => { + await mainNode.tester.fundSyncWallet(); + mainContract = await mainNode.tester.syncWallet.getMainContract(); + alice = mainNode.tester.emptyWallet(); }); - step('revert blocks', async () => { - let fileConfigFlags = ''; - if (fileConfig.loadFromFile) { - const configPaths = getAllConfigsPath({ - pathToHome, - chain: fileConfig.chain - }); - fileConfigFlags = ` - --config-path=${configPaths['general.yaml']} - --contracts-config-path=${configPaths['contracts.yaml']} - --secrets-path=${configPaths['secrets.yaml']} - --wallets-path=${configPaths['wallets.yaml']} - --genesis-path=${configPaths['genesis.yaml']} - `; - } + // Seal 2 L1 batches. + // One is not enough to test the reversion of sk cache because + // it gets updated with some batch logs only at the start of the next batch. + step('seal L1 batch', async () => { + depositL1BatchNumber = await mainNode.createBatchWithDeposit(alice.address, depositAmount); + }); - const executedProcess = await utils.exec( - `cd ${pathToHome} && RUST_LOG=off cargo run --bin block_reverter --release -- print-suggested-values --json --operator-address ${operatorAddress} ${fileConfigFlags}` - // ^ Switch off logs to not pollute the output JSON - ); - const suggestedValuesOutput = executedProcess.stdout; - const { lastL1BatchNumber, nonce, priorityFee } = parseSuggestedValues(suggestedValuesOutput); - expect(lastL1BatchNumber < blocksCommittedBeforeRevert, 'There should be at least one block for revert').to.be - .true; + step('wait for an L1 batch to get executed', async () => { + await waitToExecuteBatch(mainContract, depositL1BatchNumber); + }); - console.log( - `Reverting with parameters: last unreverted L1 batch number: ${lastL1BatchNumber}, nonce: ${nonce}, priorityFee: ${priorityFee}` - ); + step('restart server with batch execution turned off', async () => { + await mainNode.killAndWaitForShutdown(); + mainNode = await mainNodeSpawner.spawnMainNode(false); + }); - console.log('Sending ETH transaction..'); - await utils.spawn( - `cd ${pathToHome} && cargo run --bin block_reverter --release -- send-eth-transaction --l1-batch-number ${lastL1BatchNumber} --nonce ${nonce} --priority-fee-per-gas ${priorityFee} ${fileConfigFlags}` - ); + step('seal another L1 batch', async () => { + await mainNode.createBatchWithDeposit(alice.address, depositAmount); + }); - console.log('Rolling back DB..'); - await utils.spawn( - `cd ${pathToHome} && cargo run --bin block_reverter --release -- rollback-db --l1-batch-number ${lastL1BatchNumber} --rollback-postgres --rollback-tree --rollback-sk-cache --rollback-vm-runners-cache ${fileConfigFlags}` - ); + step('check wallet balance', async () => { + const balance = await alice.getBalance(); + console.log(`Balance before revert: ${balance}`); + assert(balance === depositAmount * 2n, 'Incorrect balance after deposits'); + }); - let blocksCommitted = await mainContract.getTotalBatchesCommitted(); - expect(blocksCommitted === lastL1BatchNumber, 'Revert on contract was unsuccessful').to.be.true; + step('wait for the new batch to be committed', async () => { + batchesCommittedBeforeRevert = await waitToCommitBatchesWithoutExecution(mainContract); }); - step('execute transaction after revert', async () => { - // Run server. - serverProcess = runServerInBackground({ - components: [components], - stdio: ['ignore', logs, logs], - cwd: pathToHome, - useZkInception: fileConfig.loadFromFile, - chain: fileConfig.chain - }); - await utils.sleep(30); + step('stop server', async () => { + await mainNode.killAndWaitForShutdown(); + }); - const balanceBefore = await alice.getBalance(); - expect(balanceBefore === depositAmount * 2n, 'Incorrect balance after revert').to.be.true; + step('revert batches', async () => { + await executeRevert(pathToHome, fileConfig.chain, operatorAddress, batchesCommittedBeforeRevert, mainContract); + }); - // Execute a transaction - const depositHandle = await tester.syncWallet.deposit({ - token: tester.isETHBasedChain ? zksync.utils.LEGACY_ETH_ADDRESS : tester.baseTokenAddress, - amount: depositAmount, - to: alice.address, - approveBaseERC20: true, - approveERC20: true - }); + step('restart server', async () => { + mainNode = await mainNodeSpawner.spawnMainNode(true); + }); - let l1TxResponse = await alice._providerL1().getTransaction(depositHandle.hash); - while (!l1TxResponse) { - console.log(`Deposit ${depositHandle.hash} is not visible to the L1 network; sleeping`); + step('wait until last deposit is re-executed', async () => { + let balanceBefore; + let tryCount = 0; + while ((balanceBefore = await alice.getBalance()) !== 2n * depositAmount && tryCount < 30) { + console.log(`Balance after revert: ${balanceBefore}`); + tryCount++; await utils.sleep(1); - l1TxResponse = await alice._providerL1().getTransaction(depositHandle.hash); } + assert(balanceBefore === 2n * depositAmount, 'Incorrect balance after revert'); + }); - // ethers doesn't work well with block reversions, so wait for the receipt before calling `.waitFinalize()`. - const l2Tx = await alice._providerL2().getL2TransactionFromPriorityOp(l1TxResponse); - let receipt = null; - do { - receipt = await tester.syncWallet.provider.getTransactionReceipt(l2Tx.hash); - await utils.sleep(1); - } while (receipt == null); - - await depositHandle.waitFinalize(); - expect(receipt.status).to.be.eql(1); - + step('execute transaction after revert', async () => { + await executeDepositAfterRevert(mainNode.tester, alice, depositAmount); const balanceAfter = await alice.getBalance(); - expect(balanceAfter === depositAmount * 3n, 'Incorrect balance after another deposit').to.be.true; + console.log(`Balance after another deposit: ${balanceAfter}`); + assert(balanceAfter === depositAmount * 3n, 'Incorrect balance after another deposit'); }); step('execute transactions after simple restart', async () => { // Execute an L2 transaction - await checkedRandomTransfer(alice, 1n); + await checkRandomTransfer(alice, 1n); // Stop server. - await killServerAndWaitForShutdown(tester, serverProcess!); + await mainNode.killAndWaitForShutdown(); // Run again. - serverProcess = runServerInBackground({ - components: [components], - stdio: ['ignore', logs, logs], - cwd: pathToHome, - useZkInception: fileConfig.loadFromFile, - chain: fileConfig.chain - }); - await utils.sleep(30); + mainNode = await mainNodeSpawner.spawnMainNode(true); // Trying to send a transaction from the same address again - await checkedRandomTransfer(alice, 1n); + await checkRandomTransfer(alice, 1n); }); after('Try killing server', async () => { @@ -332,29 +189,3 @@ describe('Block reverting test', function () { } }); }); - -async function checkedRandomTransfer(sender: zksync.Wallet, amount: bigint) { - const senderBalanceBefore = await sender.getBalance(); - const receiverHD = zksync.Wallet.createRandom(); - const receiver = new zksync.Wallet(receiverHD.privateKey, sender.provider); - const transferHandle = await sender.sendTransaction({ - to: receiver.address, - value: amount, - type: 0 - }); - - // ethers doesn't work well with block reversions, so we poll for the receipt manually. - let txReceipt = null; - do { - txReceipt = await sender.provider.getTransactionReceipt(transferHandle.hash); - await utils.sleep(1); - } while (txReceipt == null); - - const senderBalance = await sender.getBalance(); - const receiverBalance = await receiver.getBalance(); - - expect(receiverBalance === amount, 'Failed updated the balance of the receiver').to.be.true; - - const spentAmount = txReceipt.gasUsed * transferHandle.gasPrice! + amount; - expect(senderBalance + spentAmount >= senderBalanceBefore, 'Failed to update the balance of the sender').to.be.true; -} diff --git a/core/tests/revert-test/tests/utils.ts b/core/tests/revert-test/tests/utils.ts index 4e3e292da654..ea8a45b97c37 100644 --- a/core/tests/revert-test/tests/utils.ts +++ b/core/tests/revert-test/tests/utils.ts @@ -1,5 +1,13 @@ import { exec as _exec, spawn as _spawn, ChildProcessWithoutNullStreams, type ProcessEnvOptions } from 'child_process'; import { promisify } from 'util'; +import { assert, expect } from 'chai'; +import { FileConfig, getAllConfigsPath, replaceAggregatedBlockExecuteDeadline } from 'utils/build/file-configs'; +import { IZkSyncHyperchain } from 'zksync-ethers/build/typechain'; +import { Tester } from './tester'; +import { killPidWithAllChilds } from 'utils/build/kill'; +import * as utils from 'utils'; +import fs from 'node:fs/promises'; +import * as zksync from 'zksync-ethers'; // executes a command in background and returns a child process handle // by default pipes data to parent's stdio but this can be overridden @@ -100,3 +108,389 @@ export function exec(command: string, options: ProcessEnvOptions) { command = command.replace(/\n/g, ' '); return promisified(command, options); } + +export interface SuggestedValues { + lastExecutedL1BatchNumber: bigint; + nonce: number; + priorityFee: number; +} + +/** Parses output of "print-suggested-values" command of the revert block tool. */ +export function parseSuggestedValues(jsonString: string): SuggestedValues { + const json = JSON.parse(jsonString); + assert(json && typeof json === 'object'); + assert(Number.isInteger(json.last_executed_l1_batch_number)); + assert(Number.isInteger(json.nonce)); + assert(Number.isInteger(json.priority_fee)); + return { + lastExecutedL1BatchNumber: BigInt(json.last_executed_l1_batch_number), + nonce: json.nonce, + priorityFee: json.priority_fee + }; +} + +async function runBlockReverter( + pathToHome: string, + chain: string | undefined, + env: ProcessEnvOptions['env'] | undefined, + args: string[] +): Promise { + let fileConfigFlags = ''; + if (chain) { + const configPaths = getAllConfigsPath({ pathToHome, chain }); + fileConfigFlags = ` + --config-path=${configPaths['general.yaml']} + --contracts-config-path=${configPaths['contracts.yaml']} + --secrets-path=${configPaths['secrets.yaml']} + --wallets-path=${configPaths['wallets.yaml']} + --genesis-path=${configPaths['genesis.yaml']} + `; + } + + const cmd = `cd ${pathToHome} && RUST_LOG=off cargo run --bin block_reverter --release -- ${args.join( + ' ' + )} ${fileConfigFlags}`; + + const options = env + ? { + cwd: env.ZKSYNC_HOME, + env: { + ...env, + PATH: process.env.PATH + } + } + : {}; + const executedProcess = await exec(cmd, options); + return executedProcess.stdout; +} + +export async function executeRevert( + pathToHome: string, + chain: string | undefined, + operatorAddress: string, + batchesCommittedBeforeRevert: bigint, + mainContract: IZkSyncHyperchain, + env?: ProcessEnvOptions['env'] +) { + const suggestedValuesOutput = await runBlockReverter(pathToHome, chain, env, [ + 'print-suggested-values', + '--json', + '--operator-address', + operatorAddress + ]); + const values = parseSuggestedValues(suggestedValuesOutput); + assert( + values.lastExecutedL1BatchNumber < batchesCommittedBeforeRevert, + 'There should be at least one block for revert' + ); + + console.log('Reverting with parameters', values); + + console.log('Sending ETH transaction..'); + await runBlockReverter(pathToHome, chain, env, [ + 'send-eth-transaction', + '--l1-batch-number', + values.lastExecutedL1BatchNumber.toString(), + '--nonce', + values.nonce.toString(), + '--priority-fee-per-gas', + values.priorityFee.toString() + ]); + + console.log('Rolling back DB..'); + await runBlockReverter(pathToHome, chain, env, [ + 'rollback-db', + '--l1-batch-number', + values.lastExecutedL1BatchNumber.toString(), + '--rollback-postgres', + '--rollback-tree', + '--rollback-sk-cache', + '--rollback-vm-runners-cache' + ]); + + const blocksCommitted = await mainContract.getTotalBatchesCommitted(); + assert(blocksCommitted === values.lastExecutedL1BatchNumber, 'Revert on contract was unsuccessful'); +} + +export interface MainNodeSpawnOptions { + enableConsensus: boolean; + ethClientWeb3Url: string; + apiWeb3JsonRpcHttpUrl: string; + baseTokenAddress: string; +} + +export enum NodeType { + MAIN = 'zksync_server', + EXT = 'zksync_external_node' +} + +export class Node { + constructor( + public readonly tester: Tester, + private readonly proc: ChildProcessWithoutNullStreams, + private readonly type: TYPE + ) {} + + public async terminate() { + try { + await killPidWithAllChilds(this.proc.pid!, 9); + } catch (err) { + console.log(`ignored error: ${err}`); + } + } + + /** + * Terminates all main node processes running. + * + * WARNING: This is not safe to use when running nodes on multiple chains. + */ + public static async killAll(type: NodeType) { + try { + await utils.exec(`killall -KILL ${type}`); + } catch (err) { + console.log(`ignored error: ${err}`); + } + } + + /** Waits for the node process to exit. */ + public async waitForExit(): Promise { + while (this.proc.exitCode === null) { + await utils.sleep(1); + } + return this.proc.exitCode; + } + + public async killAndWaitForShutdown() { + await this.terminate(); + // Wait until it's really stopped. + let iter = 0; + while (iter < 30) { + try { + await this.tester.syncWallet.provider.getBlockNumber(); + await utils.sleep(2); + iter += 1; + } catch (_) { + // When exception happens, we assume that server died. + return; + } + } + // It's going to panic anyway, since the server is a singleton entity, so better to exit early. + throw new Error(`${this.type} didn't stop after a kill request`); + } + + public async createBatchWithDeposit(to: string, amount: bigint) { + const initialL1BatchNumber = await this.tester.web3Provider.getL1BatchNumber(); + console.log(`Initial L1 batch: ${initialL1BatchNumber}`); + + const depositHandle = await this.tester.syncWallet.deposit({ + token: this.tester.isETHBasedChain ? zksync.utils.LEGACY_ETH_ADDRESS : this.tester.baseTokenAddress, + amount, + to, + approveBaseERC20: true, + approveERC20: true + }); + + let depositBatchNumber; + while (!(depositBatchNumber = (await depositHandle.wait()).l1BatchNumber)) { + console.log('Deposit is not included in L1 batch; sleeping'); + await utils.sleep(1); + } + console.log(`Deposit was included into L1 batch ${depositBatchNumber}`); + expect(depositBatchNumber).to.be.greaterThan(initialL1BatchNumber); + return depositBatchNumber; + } +} + +export class NodeSpawner { + public constructor( + private readonly pathToHome: string, + private readonly logs: fs.FileHandle, + private readonly fileConfig: FileConfig, + private readonly options: MainNodeSpawnOptions, + private readonly env?: ProcessEnvOptions['env'] + ) {} + + public async spawnMainNode(enableExecute: boolean): Promise> { + const env = this.env ?? process.env; + env.ETH_SENDER_SENDER_AGGREGATED_BLOCK_EXECUTE_DEADLINE = enableExecute ? '1' : '10000'; + // Set full mode for the Merkle tree as it is required to get blocks committed. + env.DATABASE_MERKLE_TREE_MODE = 'full'; + + const { fileConfig, pathToHome, options, logs } = this; + + if (fileConfig.loadFromFile) { + replaceAggregatedBlockExecuteDeadline(pathToHome, fileConfig, enableExecute ? 1 : 10000); + } + + let components = 'api,tree,eth,state_keeper,commitment_generator,da_dispatcher,vm_runner_protective_reads'; + if (options.enableConsensus) { + components += ',consensus'; + } + if (options.baseTokenAddress != zksync.utils.LEGACY_ETH_ADDRESS) { + components += ',base_token_ratio_persister'; + } + let proc = runServerInBackground({ + components: [components], + stdio: ['ignore', logs, logs], + cwd: pathToHome, + env: env, + useZkInception: fileConfig.loadFromFile, + chain: fileConfig.chain + }); + + // Wait until the main node starts responding. + const tester = await Tester.init( + options.ethClientWeb3Url, + options.apiWeb3JsonRpcHttpUrl, + options.baseTokenAddress + ); + await waitForNodeToStart(tester, proc, options.apiWeb3JsonRpcHttpUrl); + return new Node(tester, proc, NodeType.MAIN); + } + + public async spawnExtNode(): Promise> { + const env = this.env ?? process.env; + const { pathToHome, fileConfig, logs, options } = this; + + let args = []; // FIXME: unused + if (options.enableConsensus) { + args.push('--enable-consensus'); + } + + // Run server in background. + let proc = runExternalNodeInBackground({ + stdio: ['ignore', logs, logs], + cwd: pathToHome, + env, + useZkInception: fileConfig.loadFromFile, + chain: fileConfig.chain + }); + + const tester = await Tester.init( + options.ethClientWeb3Url, + options.apiWeb3JsonRpcHttpUrl, + options.baseTokenAddress + ); + await waitForNodeToStart(tester, proc, options.apiWeb3JsonRpcHttpUrl); + return new Node(tester, proc, NodeType.EXT); + } +} + +async function waitForNodeToStart(tester: Tester, proc: ChildProcessWithoutNullStreams, l2Url: string) { + while (true) { + try { + const blockNumber = await tester.syncWallet.provider.getBlockNumber(); + console.log(`Initialized node API on ${l2Url}; latest block: ${blockNumber}`); + break; + } catch (err) { + if (proc.exitCode != null) { + assert.fail(`server failed to start, exitCode = ${proc.exitCode}`); + } + console.log(`Node waiting for API on ${l2Url}`); + await utils.sleep(1); + } + } +} + +export async function waitToExecuteBatch(mainContract: IZkSyncHyperchain, latestBatch: number) { + let tryCount = 0; + const initialExecutedBatch = await mainContract.getTotalBatchesExecuted(); + console.log(`Initial executed L1 batch: ${initialExecutedBatch}`); + + if (initialExecutedBatch >= latestBatch) { + console.log('Latest batch is executed; no need to wait'); + return; + } + + let lastExecutedBatch; + while ( + (lastExecutedBatch = await mainContract.getTotalBatchesExecuted()) === initialExecutedBatch && + tryCount < 100 + ) { + console.log(`Last executed batch: ${lastExecutedBatch}`); + tryCount++; + await utils.sleep(1); + } + assert(lastExecutedBatch > initialExecutedBatch); +} + +export async function waitToCommitBatchesWithoutExecution(mainContract: IZkSyncHyperchain): Promise { + let batchesCommitted = await mainContract.getTotalBatchesCommitted(); + let batchesExecuted = await mainContract.getTotalBatchesExecuted(); + console.log(`Batches committed: ${batchesCommitted}, executed: ${batchesExecuted}`); + + let tryCount = 0; + while ((batchesExecuted === 0n || batchesCommitted === batchesExecuted) && tryCount < 100) { + await utils.sleep(1); + batchesCommitted = await mainContract.getTotalBatchesCommitted(); + batchesExecuted = await mainContract.getTotalBatchesExecuted(); + console.log(`Batches committed: ${batchesCommitted}, executed: ${batchesExecuted}`); + tryCount += 1; + } + expect(batchesCommitted > batchesExecuted, 'There is no committed but not executed batch').to.be.true; + return batchesCommitted; +} + +export async function executeDepositAfterRevert(tester: Tester, wallet: zksync.Wallet, amount: bigint) { + const depositHandle = await tester.syncWallet.deposit({ + token: tester.isETHBasedChain ? zksync.utils.LEGACY_ETH_ADDRESS : tester.baseTokenAddress, + amount, + to: wallet.address, + approveBaseERC20: true, + approveERC20: true + }); + + let l1TxResponse = await wallet._providerL1().getTransaction(depositHandle.hash); + while (!l1TxResponse) { + console.log(`Deposit ${depositHandle.hash} is not visible to the L1 network; sleeping`); + await utils.sleep(1); + l1TxResponse = await wallet._providerL1().getTransaction(depositHandle.hash); + } + console.log(`Got L1 deposit tx`, l1TxResponse); + + // ethers doesn't work well with block reversions, so wait for the receipt before calling `.waitFinalize()`. + const l2Tx = await wallet._providerL2().getL2TransactionFromPriorityOp(l1TxResponse); + let receipt = null; + while (receipt === null) { + console.log(`L2 deposit transaction ${l2Tx.hash} is not confirmed; sleeping`); + await utils.sleep(1); + receipt = await tester.syncWallet.provider.getTransactionReceipt(l2Tx.hash); + } + expect(receipt.status).to.be.eql(1); + console.log(`L2 deposit transaction ${l2Tx.hash} is confirmed`); + + await depositHandle.waitFinalize(); + console.log('New deposit is finalized'); +} + +export async function checkRandomTransfer(sender: zksync.Wallet, amount: bigint) { + const senderBalanceBefore = await sender.getBalance(); + console.log(`Sender's balance before transfer: ${senderBalanceBefore}`); + + const receiverHD = zksync.Wallet.createRandom(); + const receiver = new zksync.Wallet(receiverHD.privateKey, sender.provider); + const transferHandle = await sender.sendTransaction({ + to: receiver.address, + value: amount, + type: 0 + }); + + // ethers doesn't work well with block reversions, so we poll for the receipt manually. + let txReceipt = null; + while (txReceipt === null) { + console.log(`Transfer ${transferHandle.hash} is not confirmed, sleeping`); + await utils.sleep(1); + txReceipt = await sender.provider.getTransactionReceipt(transferHandle.hash); + } + + const senderBalance = await sender.getBalance(); + console.log(`Sender's balance after transfer: ${senderBalance}`); + const receiverBalance = await receiver.getBalance(); + console.log(`Receiver's balance after transfer: ${receiverBalance}`); + + assert(receiverBalance === amount, 'Failed updated the balance of the receiver'); + + const spentAmount = txReceipt.gasUsed * transferHandle.gasPrice! + amount; + console.log(`Expected spent amount: ${spentAmount}`); + assert(senderBalance + spentAmount >= senderBalanceBefore, 'Failed to update the balance of the sender'); +} diff --git a/etc/utils/src/file-configs.ts b/etc/utils/src/file-configs.ts index fad72901d15d..374bf53f6be9 100644 --- a/etc/utils/src/file-configs.ts +++ b/etc/utils/src/file-configs.ts @@ -2,18 +2,11 @@ import * as path from 'path'; import * as fs from 'fs'; import * as yaml from 'yaml'; -export function shouldLoadConfigFromFile() { +export type FileConfig = { loadFromFile: false; chain?: undefined } | { loadFromFile: true; chain: string }; + +export function shouldLoadConfigFromFile(): FileConfig { const chain = process.env.CHAIN_NAME; - if (chain) { - return { - loadFromFile: true, - chain - } as const; - } else { - return { - loadFromFile: false - } as const; - } + return chain ? { loadFromFile: true, chain } : { loadFromFile: false }; } export const configNames = [ diff --git a/etc/utils/src/logs.ts b/etc/utils/src/logs.ts index cdb26f5ad1b7..7db54ef8600c 100644 --- a/etc/utils/src/logs.ts +++ b/etc/utils/src/logs.ts @@ -1,7 +1,7 @@ import path from 'path'; import fs from 'node:fs/promises'; -const pathToHome = path.join(__dirname, '../../../..'); +const pathToHome = path.join(__dirname, '../../..'); export async function logsTestPath(chain: string | undefined, relativePath: string, name: string): Promise { chain = chain ? chain! : 'default'; From fe0867732f65459d366ffa029e87b17482574117 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Tue, 10 Sep 2024 15:35:52 +0300 Subject: [PATCH 046/116] test: Fix "missing revert data" error; fix / debug integration tests (#2804) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ - Fixes the "missing revert data" error by updating the used reth Docker image. The error is probably caused by [this issue](https://github.com/paradigmxyz/reth/issues/7381) fixed in the new reth versions. - Removes "web3 API compatibility tests › Should check API returns error when there are too many logs in eth_getLogs" test as fundamentally flaky and able to poison other tests. - Adds logging for upgrade test to investigate L1 "nonce too low" errors. ## Why ❔ Flaky CI bad. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- core/bin/zksync_server/src/node_builder.rs | 1 + .../ts-integration/tests/api/web3.test.ts | 58 ++++--------------- core/tests/upgrade-test/tests/upgrade.test.ts | 34 ++++++----- docker-compose-cpu-runner.yml | 2 +- docker-compose-gpu-runner-cuda-12-0.yml | 2 +- docker-compose-gpu-runner.yml | 2 +- docker-compose.yml | 2 +- 7 files changed, 35 insertions(+), 66 deletions(-) diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs index 36ee7d990cf9..e2a0c5846b5d 100644 --- a/core/bin/zksync_server/src/node_builder.rs +++ b/core/bin/zksync_server/src/node_builder.rs @@ -364,6 +364,7 @@ impl MainNodeBuilder { subscriptions_limit: Some(rpc_config.subscriptions_limit()), batch_request_size_limit: Some(rpc_config.max_batch_request_size()), response_body_size_limit: Some(rpc_config.max_response_body_size()), + with_extended_tracing: rpc_config.extended_api_tracing, ..Default::default() }; self.node.add_layer(Web3ServerLayer::http( diff --git a/core/tests/ts-integration/tests/api/web3.test.ts b/core/tests/ts-integration/tests/api/web3.test.ts index b20e9d1e37d3..79789e744471 100644 --- a/core/tests/ts-integration/tests/api/web3.test.ts +++ b/core/tests/ts-integration/tests/api/web3.test.ts @@ -202,7 +202,7 @@ describe('web3 API compatibility tests', () => { test('Should test web3 response extensions', async () => { if (testMaster.isFastMode()) { - // This test requires a new L1 batch to be created, which may be very time consuming on stage. + // This test requires a new L1 batch to be created, which may be very time-consuming on stage. return; } @@ -333,7 +333,7 @@ describe('web3 API compatibility tests', () => { // Pubsub notifier is not reactive + tests are being run in parallel, so we can't expect that the next block // would be expected one. Instead, we just want to receive an event with the particular block number. - wsProvider.on('block', (block) => { + await wsProvider.on('block', (block) => { if (block >= currentBlock) { newBlock = block; } @@ -355,7 +355,6 @@ describe('web3 API compatibility tests', () => { // ...though the gap should not be *too* big. expect(newBlock).toBeLessThan(currentBlock + 100); await tx.wait(); // To not leave a hanging promise. - wsProvider.removeAllListeners(); await wsProvider.destroy(); }); @@ -368,7 +367,7 @@ describe('web3 API compatibility tests', () => { let newTxHash: string | null = null; // We can't use `once` as there may be other pending txs sent together with our one. - wsProvider.on('pending', async (txHash) => { + await wsProvider.on('pending', async (txHash) => { const tx = await alice.provider.getTransaction(txHash); // We're waiting for the exact transaction to appear. if (!tx || tx.to != uniqueRecipient) { @@ -392,7 +391,6 @@ describe('web3 API compatibility tests', () => { expect(newTxHash as string).toEqual(tx.hash); await tx.wait(); // To not leave a hanging promise. - wsProvider.removeAllListeners(); await wsProvider.destroy(); }); @@ -404,7 +402,7 @@ describe('web3 API compatibility tests', () => { // We're sending a few transfers from the wallet, so we'll use a new account to make event unique. let uniqueRecipient = testMaster.newEmptyAccount().address; - // Setup a filter for an ERC20 transfer. + // Set up a filter for an ERC20 transfer. const erc20TransferTopic = ethers.id('Transfer(address,address,uint256)'); let filter = { address: l2Token, @@ -414,15 +412,15 @@ describe('web3 API compatibility tests', () => { ethers.zeroPadValue(uniqueRecipient, 32) // Recipient ] }; - wsProvider.once(filter, (event) => { + await wsProvider.once(filter, (event) => { newEvent = event; }); - // Setup a filter that should not match anything. + // Set up a filter that should not match anything. let incorrectFilter = { address: alice.address }; - wsProvider.once(incorrectFilter, (_) => { + await wsProvider.once(incorrectFilter, (_) => { expect(null).fail('Found log for incorrect filter'); }); @@ -439,7 +437,6 @@ describe('web3 API compatibility tests', () => { expect((newEvent as any).transactionHash).toEqual(tx.hash); await tx.wait(); // To not leave a hanging promise. - wsProvider.removeAllListeners(); await wsProvider.destroy(); }); @@ -608,7 +605,7 @@ describe('web3 API compatibility tests', () => { // Pubsub notify is not reactive and may be laggy, so we want to increase the chances // for test to pass. So we try to sleep a few iterations until we receive expected amount - // of events. If we won't receive them, we continue and the test will fail anyway. + // of events. If we don't receive them, we continue and the test will fail anyway. const expectedTrivialEventsCount = 2; const expectedSimpleEventsCount = 2; const expectedIndexedEventsCount = 1; @@ -681,42 +678,9 @@ describe('web3 API compatibility tests', () => { ).resolves.toHaveProperty('result', expect.stringMatching(HEX_VALUE_REGEX)); }); - test('Should check API returns error when there are too many logs in eth_getLogs', async () => { - const contract = await deployContract(alice, contracts.events, []); - const maxLogsLimit = testMaster.environment().maxLogsLimit; - - // Send 3 transactions that emit `maxLogsLimit / 2` events. - const tx1 = await contract.emitManyEvents(maxLogsLimit / 2); - const tx1Receipt = await tx1.wait(); - - const tx2 = await contract.emitManyEvents(maxLogsLimit / 2); - await tx2.wait(); - - const tx3 = await contract.emitManyEvents(maxLogsLimit / 2); - const tx3Receipt = await tx3.wait(); - - // There are around `0.5 * maxLogsLimit` logs in [tx1Receipt.blockNumber, tx1Receipt.blockNumber] range, - // so query with such filter should succeed. - await expect( - alice.provider.getLogs({ - fromBlock: tx1Receipt.blockNumber, - toBlock: tx1Receipt.blockNumber - }) - ).resolves; - - // There are at least `1.5 * maxLogsLimit` logs in [tx1Receipt.blockNumber, tx3Receipt.blockNumber] range, - // so query with such filter should fail. - await expect( - alice.provider.getLogs({ - fromBlock: tx1Receipt.blockNumber, - toBlock: tx3Receipt.blockNumber - }) - ).rejects.toThrow(`Query returned more than ${maxLogsLimit} results.`); - }); - test('Should throw error for estimate gas for account with balance < tx.value', async () => { let poorBob = testMaster.newEmptyAccount(); - expect( + await expect( poorBob.estimateGas({ value: 1, to: alice.address }) ).toBeRejected(/*'insufficient balance for transfer'*/); }); @@ -860,7 +824,7 @@ describe('web3 API compatibility tests', () => { const getLogsByHash = (await alice.provider.getLogs({ blockHash: latestBlock.hash || undefined })).map((x) => { return new zksync.types.Log({ ...x, l1BatchNumber: 0 }, alice.provider); // Set bogus value. }); - await expect(getLogsByNumber).toEqual(getLogsByHash); + expect(getLogsByNumber).toEqual(getLogsByHash); // Check that incorrect queries are rejected. await expect( @@ -1030,7 +994,7 @@ describe('web3 API compatibility tests', () => { const incrementFunctionData = contract2.interface.encodeFunctionData('increment', [1]); // Assert that the estimation fails because the increment function is not present in contract1 - expect( + await expect( alice.provider.estimateGas({ to: contract1Address.toString(), data: incrementFunctionData diff --git a/core/tests/upgrade-test/tests/upgrade.test.ts b/core/tests/upgrade-test/tests/upgrade.test.ts index ffa28e4f1099..0f70e751b844 100644 --- a/core/tests/upgrade-test/tests/upgrade.test.ts +++ b/core/tests/upgrade-test/tests/upgrade.test.ts @@ -280,9 +280,11 @@ describe('Upgrade test', function () { ); executeOperation = chainUpgradeCalldata; + console.log('Sending scheduleTransparentOperation'); await sendGovernanceOperation(stmUpgradeData.scheduleTransparentOperation); + console.log('Sending executeOperation'); await sendGovernanceOperation(stmUpgradeData.executeOperation); - + console.log('Sending chain admin operation'); await sendChainAdminOperation(setTimestampCalldata); // Wait for server to process L1 event. @@ -371,23 +373,25 @@ describe('Upgrade test', function () { }); async function sendGovernanceOperation(data: string) { - await ( - await ecosystemGovWallet.sendTransaction({ - to: await governanceContract.getAddress(), - data: data, - type: 0 - }) - ).wait(); + const transaction = await ecosystemGovWallet.sendTransaction({ + to: await governanceContract.getAddress(), + data: data, + type: 0 + }); + console.log(`Sent governance operation, tx_hash=${transaction.hash}, nonce=${transaction.nonce}`); + await transaction.wait(); + console.log(`Governance operation succeeded, tx_hash=${transaction.hash}`); } async function sendChainAdminOperation(data: string) { - await ( - await adminGovWallet.sendTransaction({ - to: await chainAdminContract.getAddress(), - data: data, - type: 0 - }) - ).wait(); + const transaction = await adminGovWallet.sendTransaction({ + to: await chainAdminContract.getAddress(), + data: data, + type: 0 + }); + console.log(`Sent chain admin operation, tx_hash=${transaction.hash}, nonce=${transaction.nonce}`); + await transaction.wait(); + console.log(`Chain admin operation succeeded, tx_hash=${transaction.hash}`); } }); diff --git a/docker-compose-cpu-runner.yml b/docker-compose-cpu-runner.yml index 08d01390d770..beb54f3ade98 100644 --- a/docker-compose-cpu-runner.yml +++ b/docker-compose-cpu-runner.yml @@ -2,7 +2,7 @@ version: '3.2' services: reth: restart: always - image: "ghcr.io/paradigmxyz/reth:v0.2.0-beta.2" + image: "ghcr.io/paradigmxyz/reth:v1.0.6" volumes: - type: bind source: ./volumes/reth/data diff --git a/docker-compose-gpu-runner-cuda-12-0.yml b/docker-compose-gpu-runner-cuda-12-0.yml index 92a7b0b00887..35a0faeb9620 100644 --- a/docker-compose-gpu-runner-cuda-12-0.yml +++ b/docker-compose-gpu-runner-cuda-12-0.yml @@ -2,7 +2,7 @@ version: '3.2' services: reth: restart: always - image: "ghcr.io/paradigmxyz/reth:v0.2.0-beta.2" + image: "ghcr.io/paradigmxyz/reth:v1.0.6" volumes: - type: bind source: ./volumes/reth/data diff --git a/docker-compose-gpu-runner.yml b/docker-compose-gpu-runner.yml index bbd61715842d..f95ae0d5f544 100644 --- a/docker-compose-gpu-runner.yml +++ b/docker-compose-gpu-runner.yml @@ -2,7 +2,7 @@ version: '3.2' services: reth: restart: always - image: "ghcr.io/paradigmxyz/reth:v0.2.0-beta.2" + image: "ghcr.io/paradigmxyz/reth:v1.0.6" volumes: - type: bind source: ./volumes/reth/data diff --git a/docker-compose.yml b/docker-compose.yml index 7751c99d68a7..1e3a273ec9a4 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -2,7 +2,7 @@ version: '3.2' services: reth: restart: always - image: "ghcr.io/paradigmxyz/reth:v0.2.0-beta.2" + image: "ghcr.io/paradigmxyz/reth:v1.0.6" ports: - 127.0.0.1:8545:8545 volumes: From 6009499aa49858cf84b2a9e446d948745ba53793 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Tue, 10 Sep 2024 16:45:54 +0300 Subject: [PATCH 047/116] refactor(api): Extract oneshot VM executor to executor crate (#2806) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Extracts oneshot VM executor to the executor crate. ## Why ❔ To make executor logic more reusable and maintainable. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- Cargo.lock | 6 +- core/lib/multivm/src/tracers/mod.rs | 9 +- core/lib/multivm/src/tracers/validator/mod.rs | 12 +- .../multivm/src/tracers/validator/types.rs | 76 +---- .../src/tracers/validator/vm_1_4_1/mod.rs | 8 +- .../src/tracers/validator/vm_1_4_2/mod.rs | 8 +- .../validator/vm_boojum_integration/mod.rs | 8 +- .../src/tracers/validator/vm_latest/mod.rs | 8 +- .../validator/vm_refunds_enhancement/mod.rs | 8 +- .../validator/vm_virtual_blocks/mod.rs | 7 +- core/lib/vm_executor/Cargo.toml | 1 + core/lib/vm_executor/src/batch/factory.rs | 4 +- core/lib/vm_executor/src/batch/metrics.rs | 9 +- core/lib/vm_executor/src/lib.rs | 2 + core/lib/vm_executor/src/oneshot/metrics.rs | 143 +++++++++ .../vm_executor/src/oneshot/mock.rs} | 80 ++--- core/lib/vm_executor/src/oneshot/mod.rs | 291 ++++++++++++++++++ core/lib/vm_executor/src/shared.rs | 12 + core/lib/vm_interface/src/executor.rs | 34 +- core/lib/vm_interface/src/lib.rs | 11 +- core/lib/vm_interface/src/types/inputs/mod.rs | 72 +++++ .../src/types/outputs/execution_result.rs | 19 +- .../lib/vm_interface/src/types/outputs/mod.rs | 6 +- core/lib/vm_interface/src/types/tracer.rs | 79 +++++ core/node/api_server/Cargo.toml | 1 + .../api_server/src/execution_sandbox/apply.rs | 249 +-------------- .../src/execution_sandbox/execute.rs | 159 ++++------ .../api_server/src/execution_sandbox/mod.rs | 38 +-- .../api_server/src/execution_sandbox/tests.rs | 141 +++++++-- .../src/execution_sandbox/tracers.rs | 51 --- .../src/execution_sandbox/validate.rs | 27 +- .../src/execution_sandbox/vm_metrics.rs | 143 +-------- core/node/api_server/src/tx_sender/mod.rs | 15 +- core/node/api_server/src/tx_sender/tests.rs | 6 +- .../api_server/src/web3/namespaces/debug.rs | 32 +- core/node/api_server/src/web3/testonly.rs | 6 +- core/node/api_server/src/web3/tests/mod.rs | 6 +- core/node/api_server/src/web3/tests/vm.rs | 3 +- core/node/consensus/Cargo.toml | 4 - core/node/consensus/src/en.rs | 2 +- core/node/consensus/src/vm.rs | 10 +- 41 files changed, 981 insertions(+), 825 deletions(-) create mode 100644 core/lib/vm_executor/src/oneshot/metrics.rs rename core/{node/api_server/src/execution_sandbox/testonly.rs => lib/vm_executor/src/oneshot/mock.rs} (58%) create mode 100644 core/lib/vm_executor/src/oneshot/mod.rs create mode 100644 core/lib/vm_executor/src/shared.rs delete mode 100644 core/node/api_server/src/execution_sandbox/tracers.rs diff --git a/Cargo.lock b/Cargo.lock index d5abe5c3b151..ff1e44348b68 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9045,6 +9045,7 @@ dependencies = [ "zksync_system_constants", "zksync_types", "zksync_utils", + "zksync_vm_executor", "zksync_web3_decl", ] @@ -9055,7 +9056,6 @@ dependencies = [ "anyhow", "async-trait", "hex", - "jsonrpsee", "rand 0.8.5", "secrecy", "semver", @@ -9064,7 +9064,6 @@ dependencies = [ "thiserror", "tokio", "tracing", - "zksync_basic_types", "zksync_concurrency", "zksync_config", "zksync_consensus_bft", @@ -9079,7 +9078,6 @@ dependencies = [ "zksync_l1_contract_interface", "zksync_merkle_tree", "zksync_metadata_calculator", - "zksync_multivm", "zksync_node_api_server", "zksync_node_genesis", "zksync_node_sync", @@ -9087,7 +9085,6 @@ dependencies = [ "zksync_protobuf", "zksync_state", "zksync_state_keeper", - "zksync_storage", "zksync_system_constants", "zksync_test_account", "zksync_types", @@ -9803,6 +9800,7 @@ dependencies = [ "zksync_dal", "zksync_multivm", "zksync_types", + "zksync_utils", ] [[package]] diff --git a/core/lib/multivm/src/tracers/mod.rs b/core/lib/multivm/src/tracers/mod.rs index 69501cf39882..35224d993a17 100644 --- a/core/lib/multivm/src/tracers/mod.rs +++ b/core/lib/multivm/src/tracers/mod.rs @@ -1,11 +1,6 @@ pub use self::{ - call_tracer::CallTracer, - multivm_dispatcher::TracerDispatcher, - prestate_tracer::PrestateTracer, - storage_invocation::StorageInvocations, - validator::{ - ValidationError, ValidationTracer, ValidationTracerParams, ViolatedValidationRule, - }, + call_tracer::CallTracer, multivm_dispatcher::TracerDispatcher, prestate_tracer::PrestateTracer, + storage_invocation::StorageInvocations, validator::ValidationTracer, }; mod call_tracer; diff --git a/core/lib/multivm/src/tracers/validator/mod.rs b/core/lib/multivm/src/tracers/validator/mod.rs index 307256792cf7..a1573f24c668 100644 --- a/core/lib/multivm/src/tracers/validator/mod.rs +++ b/core/lib/multivm/src/tracers/validator/mod.rs @@ -11,10 +11,12 @@ use zksync_types::{ use zksync_utils::{be_bytes_to_safe_address, u256_to_account_address, u256_to_h256}; use self::types::{NewTrustedValidationItems, ValidationTracerMode}; -pub use self::types::{ValidationError, ValidationTracerParams, ViolatedValidationRule}; use crate::{ glue::tracers::IntoOldVmTracer, - interface::storage::{StoragePtr, WriteStorage}, + interface::{ + storage::{StoragePtr, WriteStorage}, + tracer::{ValidationParams, ViolatedValidationRule}, + }, }; mod types; @@ -50,7 +52,7 @@ type ValidationRoundResult = Result ValidationTracer { pub fn new( - params: ValidationTracerParams, + params: ValidationParams, vm_version: VmVersion, ) -> (Self, Arc>) { let result = Arc::new(OnceCell::new()); @@ -179,8 +181,8 @@ impl ValidationTracer { } } - pub fn params(&self) -> ValidationTracerParams { - ValidationTracerParams { + pub fn params(&self) -> ValidationParams { + ValidationParams { user_address: self.user_address, paymaster_address: self.paymaster_address, trusted_slots: self.trusted_slots.clone(), diff --git a/core/lib/multivm/src/tracers/validator/types.rs b/core/lib/multivm/src/tracers/validator/types.rs index 418d2b893503..b9d442279927 100644 --- a/core/lib/multivm/src/tracers/validator/types.rs +++ b/core/lib/multivm/src/tracers/validator/types.rs @@ -1,9 +1,4 @@ -use std::{collections::HashSet, fmt, fmt::Display}; - -use zksync_types::{Address, H256, U256}; -use zksync_utils::u256_to_h256; - -use crate::interface::Halt; +use zksync_types::{Address, H256}; #[derive(Debug, Clone, Eq, PartialEq, Copy)] #[allow(clippy::enum_variant_names)] @@ -21,72 +16,3 @@ pub(super) struct NewTrustedValidationItems { pub(super) new_allowed_slots: Vec, pub(super) new_trusted_addresses: Vec
, } - -#[derive(Debug, Clone)] -pub struct ValidationTracerParams { - pub user_address: Address, - pub paymaster_address: Address, - /// Slots that are trusted (i.e. the user can access them). - pub trusted_slots: HashSet<(Address, U256)>, - /// Trusted addresses (the user can access any slots on these addresses). - pub trusted_addresses: HashSet
, - /// Slots, that are trusted and the value of them is the new trusted address. - /// They are needed to work correctly with beacon proxy, where the address of the implementation is - /// stored in the beacon. - pub trusted_address_slots: HashSet<(Address, U256)>, - /// Number of computational gas that validation step is allowed to use. - pub computational_gas_limit: u32, -} - -#[derive(Debug, Clone)] -pub enum ViolatedValidationRule { - TouchedUnallowedStorageSlots(Address, U256), - CalledContractWithNoCode(Address), - TouchedUnallowedContext, - TookTooManyComputationalGas(u32), -} - -impl fmt::Display for ViolatedValidationRule { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - ViolatedValidationRule::TouchedUnallowedStorageSlots(contract, key) => write!( - f, - "Touched unallowed storage slots: address {}, key: {}", - hex::encode(contract), - hex::encode(u256_to_h256(*key)) - ), - ViolatedValidationRule::CalledContractWithNoCode(contract) => { - write!(f, "Called contract with no code: {}", hex::encode(contract)) - } - ViolatedValidationRule::TouchedUnallowedContext => { - write!(f, "Touched unallowed context") - } - ViolatedValidationRule::TookTooManyComputationalGas(gas_limit) => { - write!( - f, - "Took too many computational gas, allowed limit: {}", - gas_limit - ) - } - } - } -} - -#[derive(Debug, Clone)] -pub enum ValidationError { - FailedTx(Halt), - ViolatedRule(ViolatedValidationRule), -} - -impl Display for ValidationError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - Self::FailedTx(revert_reason) => { - write!(f, "Validation revert: {}", revert_reason) - } - Self::ViolatedRule(rule) => { - write!(f, "Violated validation rules: {}", rule) - } - } - } -} diff --git a/core/lib/multivm/src/tracers/validator/vm_1_4_1/mod.rs b/core/lib/multivm/src/tracers/validator/vm_1_4_1/mod.rs index 2beca41fb481..d1ddb2b44c80 100644 --- a/core/lib/multivm/src/tracers/validator/vm_1_4_1/mod.rs +++ b/core/lib/multivm/src/tracers/validator/vm_1_4_1/mod.rs @@ -9,13 +9,13 @@ use zksync_utils::{h256_to_account_address, u256_to_account_address, u256_to_h25 use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, - tracer::{TracerExecutionStatus, TracerExecutionStopReason}, + tracer::{TracerExecutionStatus, TracerExecutionStopReason, ViolatedValidationRule}, Halt, }, tracers::{ dynamic::vm_1_4_1::DynTracer, validator::{ - types::{NewTrustedValidationItems, ValidationTracerMode, ViolatedValidationRule}, + types::{NewTrustedValidationItems, ValidationTracerMode}, ValidationRoundResult, ValidationTracer, }, }, @@ -88,7 +88,7 @@ impl ValidationTracer { Opcode::Context(context) => { match context { ContextOpcode::Meta => { - return Err(ViolatedValidationRule::TouchedUnallowedContext); + return Err(ViolatedValidationRule::TouchedDisallowedContext); } ContextOpcode::ErgsLeft => { // TODO (SMA-1168): implement the correct restrictions for the gas left opcode. @@ -102,7 +102,7 @@ impl ValidationTracer { let msg_sender = state.vm_local_state.callstack.current.msg_sender; if !self.is_allowed_storage_read(storage.clone(), this_address, key, msg_sender) { - return Err(ViolatedValidationRule::TouchedUnallowedStorageSlots( + return Err(ViolatedValidationRule::TouchedDisallowedStorageSlots( this_address, key, )); diff --git a/core/lib/multivm/src/tracers/validator/vm_1_4_2/mod.rs b/core/lib/multivm/src/tracers/validator/vm_1_4_2/mod.rs index 3394a6c3f2b5..a51644ff9ea2 100644 --- a/core/lib/multivm/src/tracers/validator/vm_1_4_2/mod.rs +++ b/core/lib/multivm/src/tracers/validator/vm_1_4_2/mod.rs @@ -9,13 +9,13 @@ use zksync_utils::{h256_to_account_address, u256_to_account_address, u256_to_h25 use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, - tracer::{TracerExecutionStatus, TracerExecutionStopReason}, + tracer::{TracerExecutionStatus, TracerExecutionStopReason, ViolatedValidationRule}, Halt, }, tracers::{ dynamic::vm_1_4_1::DynTracer, validator::{ - types::{NewTrustedValidationItems, ValidationTracerMode, ViolatedValidationRule}, + types::{NewTrustedValidationItems, ValidationTracerMode}, ValidationRoundResult, ValidationTracer, }, }, @@ -88,7 +88,7 @@ impl ValidationTracer { Opcode::Context(context) => { match context { ContextOpcode::Meta => { - return Err(ViolatedValidationRule::TouchedUnallowedContext); + return Err(ViolatedValidationRule::TouchedDisallowedContext); } ContextOpcode::ErgsLeft => { // TODO (SMA-1168): implement the correct restrictions for the gas left opcode. @@ -102,7 +102,7 @@ impl ValidationTracer { let msg_sender = state.vm_local_state.callstack.current.msg_sender; if !self.is_allowed_storage_read(storage.clone(), this_address, key, msg_sender) { - return Err(ViolatedValidationRule::TouchedUnallowedStorageSlots( + return Err(ViolatedValidationRule::TouchedDisallowedStorageSlots( this_address, key, )); diff --git a/core/lib/multivm/src/tracers/validator/vm_boojum_integration/mod.rs b/core/lib/multivm/src/tracers/validator/vm_boojum_integration/mod.rs index 53b5bf04d2e7..7f9767a5e632 100644 --- a/core/lib/multivm/src/tracers/validator/vm_boojum_integration/mod.rs +++ b/core/lib/multivm/src/tracers/validator/vm_boojum_integration/mod.rs @@ -9,13 +9,13 @@ use zksync_utils::{h256_to_account_address, u256_to_account_address, u256_to_h25 use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, - tracer::{TracerExecutionStatus, TracerExecutionStopReason}, + tracer::{TracerExecutionStatus, TracerExecutionStopReason, ViolatedValidationRule}, Halt, }, tracers::{ dynamic::vm_1_4_0::DynTracer, validator::{ - types::{NewTrustedValidationItems, ValidationTracerMode, ViolatedValidationRule}, + types::{NewTrustedValidationItems, ValidationTracerMode}, ValidationRoundResult, ValidationTracer, }, }, @@ -88,7 +88,7 @@ impl ValidationTracer { Opcode::Context(context) => { match context { ContextOpcode::Meta => { - return Err(ViolatedValidationRule::TouchedUnallowedContext); + return Err(ViolatedValidationRule::TouchedDisallowedContext); } ContextOpcode::ErgsLeft => { // TODO (SMA-1168): implement the correct restrictions for the gas left opcode. @@ -102,7 +102,7 @@ impl ValidationTracer { let msg_sender = state.vm_local_state.callstack.current.msg_sender; if !self.is_allowed_storage_read(storage.clone(), this_address, key, msg_sender) { - return Err(ViolatedValidationRule::TouchedUnallowedStorageSlots( + return Err(ViolatedValidationRule::TouchedDisallowedStorageSlots( this_address, key, )); diff --git a/core/lib/multivm/src/tracers/validator/vm_latest/mod.rs b/core/lib/multivm/src/tracers/validator/vm_latest/mod.rs index e963c79f4e41..c206bd6fb2ad 100644 --- a/core/lib/multivm/src/tracers/validator/vm_latest/mod.rs +++ b/core/lib/multivm/src/tracers/validator/vm_latest/mod.rs @@ -9,13 +9,13 @@ use zksync_utils::{h256_to_account_address, u256_to_account_address, u256_to_h25 use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, - tracer::{TracerExecutionStatus, TracerExecutionStopReason}, + tracer::{TracerExecutionStatus, TracerExecutionStopReason, ViolatedValidationRule}, Halt, }, tracers::{ dynamic::vm_1_5_0::DynTracer, validator::{ - types::{NewTrustedValidationItems, ValidationTracerMode, ViolatedValidationRule}, + types::{NewTrustedValidationItems, ValidationTracerMode}, ValidationRoundResult, ValidationTracer, }, }, @@ -86,7 +86,7 @@ impl ValidationTracer { Opcode::Context(context) => { match context { ContextOpcode::Meta => { - return Err(ViolatedValidationRule::TouchedUnallowedContext); + return Err(ViolatedValidationRule::TouchedDisallowedContext); } ContextOpcode::ErgsLeft => { // TODO (SMA-1168): implement the correct restrictions for the gas left opcode. @@ -100,7 +100,7 @@ impl ValidationTracer { let msg_sender = state.vm_local_state.callstack.current.msg_sender; if !self.is_allowed_storage_read(storage.clone(), this_address, key, msg_sender) { - return Err(ViolatedValidationRule::TouchedUnallowedStorageSlots( + return Err(ViolatedValidationRule::TouchedDisallowedStorageSlots( this_address, key, )); diff --git a/core/lib/multivm/src/tracers/validator/vm_refunds_enhancement/mod.rs b/core/lib/multivm/src/tracers/validator/vm_refunds_enhancement/mod.rs index 6107125d14d0..0badd7c58775 100644 --- a/core/lib/multivm/src/tracers/validator/vm_refunds_enhancement/mod.rs +++ b/core/lib/multivm/src/tracers/validator/vm_refunds_enhancement/mod.rs @@ -9,13 +9,13 @@ use zksync_utils::{h256_to_account_address, u256_to_account_address, u256_to_h25 use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, - tracer::{TracerExecutionStatus, TracerExecutionStopReason}, + tracer::{TracerExecutionStatus, TracerExecutionStopReason, ViolatedValidationRule}, Halt, }, tracers::{ dynamic::vm_1_3_3::DynTracer, validator::{ - types::{NewTrustedValidationItems, ValidationTracerMode, ViolatedValidationRule}, + types::{NewTrustedValidationItems, ValidationTracerMode}, ValidationRoundResult, ValidationTracer, }, }, @@ -88,7 +88,7 @@ impl ValidationTracer { Opcode::Context(context) => { match context { ContextOpcode::Meta => { - return Err(ViolatedValidationRule::TouchedUnallowedContext); + return Err(ViolatedValidationRule::TouchedDisallowedContext); } ContextOpcode::ErgsLeft => { // TODO (SMA-1168): implement the correct restrictions for the gas left opcode. @@ -102,7 +102,7 @@ impl ValidationTracer { let msg_sender = state.vm_local_state.callstack.current.msg_sender; if !self.is_allowed_storage_read(storage.clone(), this_address, key, msg_sender) { - return Err(ViolatedValidationRule::TouchedUnallowedStorageSlots( + return Err(ViolatedValidationRule::TouchedDisallowedStorageSlots( this_address, key, )); diff --git a/core/lib/multivm/src/tracers/validator/vm_virtual_blocks/mod.rs b/core/lib/multivm/src/tracers/validator/vm_virtual_blocks/mod.rs index bb166bedcdad..86a639915c9d 100644 --- a/core/lib/multivm/src/tracers/validator/vm_virtual_blocks/mod.rs +++ b/core/lib/multivm/src/tracers/validator/vm_virtual_blocks/mod.rs @@ -9,12 +9,13 @@ use zksync_utils::{h256_to_account_address, u256_to_account_address, u256_to_h25 use crate::{ interface::{ storage::{StoragePtr, WriteStorage}, + tracer::ViolatedValidationRule, VmExecutionResultAndLogs, }, tracers::{ dynamic::vm_1_3_3::DynTracer, validator::{ - types::{NewTrustedValidationItems, ValidationTracerMode, ViolatedValidationRule}, + types::{NewTrustedValidationItems, ValidationTracerMode}, ValidationRoundResult, ValidationTracer, }, }, @@ -87,7 +88,7 @@ impl ValidationTracer { Opcode::Context(context) => { match context { ContextOpcode::Meta => { - return Err(ViolatedValidationRule::TouchedUnallowedContext); + return Err(ViolatedValidationRule::TouchedDisallowedContext); } ContextOpcode::ErgsLeft => { // TODO (SMA-1168): implement the correct restrictions for the gas left opcode. @@ -101,7 +102,7 @@ impl ValidationTracer { let msg_sender = state.vm_local_state.callstack.current.msg_sender; if !self.is_allowed_storage_read(storage.clone(), this_address, key, msg_sender) { - return Err(ViolatedValidationRule::TouchedUnallowedStorageSlots( + return Err(ViolatedValidationRule::TouchedDisallowedStorageSlots( this_address, key, )); diff --git a/core/lib/vm_executor/Cargo.toml b/core/lib/vm_executor/Cargo.toml index 9471e263bf43..089c2a9bcca7 100644 --- a/core/lib/vm_executor/Cargo.toml +++ b/core/lib/vm_executor/Cargo.toml @@ -15,6 +15,7 @@ zksync_contracts.workspace = true zksync_dal.workspace = true zksync_types.workspace = true zksync_multivm.workspace = true +zksync_utils.workspace = true async-trait.workspace = true once_cell.workspace = true diff --git a/core/lib/vm_executor/src/batch/factory.rs b/core/lib/vm_executor/src/batch/factory.rs index d02014584467..68a3769ee622 100644 --- a/core/lib/vm_executor/src/batch/factory.rs +++ b/core/lib/vm_executor/src/batch/factory.rs @@ -18,9 +18,9 @@ use zksync_types::{vm::FastVmMode, Transaction}; use super::{ executor::{Command, MainBatchExecutor}, - metrics::{TxExecutionStage, BATCH_TIP_METRICS, KEEPER_METRICS}, + metrics::{TxExecutionStage, BATCH_TIP_METRICS, EXECUTOR_METRICS, KEEPER_METRICS}, }; -use crate::batch::metrics::{InteractionType, EXECUTOR_METRICS}; +use crate::shared::InteractionType; /// The default implementation of [`BatchExecutorFactory`]. /// Creates real batch executors which maintain the VM (as opposed to the test factories which don't use the VM). diff --git a/core/lib/vm_executor/src/batch/metrics.rs b/core/lib/vm_executor/src/batch/metrics.rs index 170ed4717989..6851193e9be9 100644 --- a/core/lib/vm_executor/src/batch/metrics.rs +++ b/core/lib/vm_executor/src/batch/metrics.rs @@ -5,6 +5,8 @@ use std::time::Duration; use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Histogram, Metrics}; use zksync_multivm::interface::VmExecutionResultAndLogs; +use crate::shared::InteractionType; + #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] #[metrics(label = "command", rename_all = "snake_case")] pub(super) enum ExecutorCommand { @@ -26,13 +28,6 @@ pub(super) enum TxExecutionStage { TxRollback, } -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] -#[metrics(label = "interaction", rename_all = "snake_case")] -pub(super) enum InteractionType { - GetValue, - SetValue, -} - /// Executor-related metrics. #[derive(Debug, Metrics)] #[metrics(prefix = "state_keeper")] diff --git a/core/lib/vm_executor/src/lib.rs b/core/lib/vm_executor/src/lib.rs index 24fb3d8f7eee..1a0fbb002df9 100644 --- a/core/lib/vm_executor/src/lib.rs +++ b/core/lib/vm_executor/src/lib.rs @@ -6,4 +6,6 @@ pub use zksync_multivm::interface::executor as interface; pub mod batch; +pub mod oneshot; +mod shared; pub mod storage; diff --git a/core/lib/vm_executor/src/oneshot/metrics.rs b/core/lib/vm_executor/src/oneshot/metrics.rs new file mode 100644 index 000000000000..8a89ce0a9a4f --- /dev/null +++ b/core/lib/vm_executor/src/oneshot/metrics.rs @@ -0,0 +1,143 @@ +use std::time::Duration; + +use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Histogram, Metrics}; +use zksync_multivm::interface::{storage::StorageViewMetrics, VmMemoryMetrics}; + +use crate::shared::InteractionType; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] +#[metrics(label = "type", rename_all = "snake_case")] +enum SizeType { + Inner, + History, +} + +const MEMORY_SIZE_BUCKETS: Buckets = Buckets::values(&[ + 1_000.0, + 10_000.0, + 100_000.0, + 500_000.0, + 1_000_000.0, + 5_000_000.0, + 10_000_000.0, + 50_000_000.0, + 100_000_000.0, + 500_000_000.0, + 1_000_000_000.0, +]); + +#[derive(Debug, Metrics)] +#[metrics(prefix = "runtime_context_memory")] +struct RuntimeContextMemoryMetrics { + #[metrics(buckets = MEMORY_SIZE_BUCKETS)] + event_sink_size: Family>, + #[metrics(buckets = MEMORY_SIZE_BUCKETS)] + memory_size: Family>, + #[metrics(buckets = MEMORY_SIZE_BUCKETS)] + decommitter_size: Family>, + #[metrics(buckets = MEMORY_SIZE_BUCKETS)] + storage_size: Family>, + #[metrics(buckets = MEMORY_SIZE_BUCKETS)] + storage_view_cache_size: Histogram, + #[metrics(buckets = MEMORY_SIZE_BUCKETS)] + full: Histogram, +} + +#[vise::register] +static MEMORY_METRICS: vise::Global = vise::Global::new(); + +const INTERACTION_AMOUNT_BUCKETS: Buckets = Buckets::exponential(10.0..=10_000_000.0, 10.0); + +#[derive(Debug, Metrics)] +#[metrics(prefix = "runtime_context_storage_interaction")] +struct RuntimeContextStorageMetrics { + #[metrics(buckets = INTERACTION_AMOUNT_BUCKETS)] + amount: Family>, + #[metrics(buckets = Buckets::LATENCIES)] + duration: Family>, + #[metrics(buckets = Buckets::LATENCIES)] + duration_per_unit: Family>, + #[metrics(buckets = Buckets::ZERO_TO_ONE)] + ratio: Histogram, +} + +#[vise::register] +static STORAGE_METRICS: vise::Global = vise::Global::new(); + +pub(super) fn report_vm_memory_metrics( + tx_id: &str, + memory_metrics: &VmMemoryMetrics, + vm_execution_took: Duration, + storage_metrics: StorageViewMetrics, +) { + MEMORY_METRICS.event_sink_size[&SizeType::Inner].observe(memory_metrics.event_sink_inner); + MEMORY_METRICS.event_sink_size[&SizeType::History].observe(memory_metrics.event_sink_history); + MEMORY_METRICS.memory_size[&SizeType::Inner].observe(memory_metrics.memory_inner); + MEMORY_METRICS.memory_size[&SizeType::History].observe(memory_metrics.memory_history); + MEMORY_METRICS.decommitter_size[&SizeType::Inner] + .observe(memory_metrics.decommittment_processor_inner); + MEMORY_METRICS.decommitter_size[&SizeType::History] + .observe(memory_metrics.decommittment_processor_history); + MEMORY_METRICS.storage_size[&SizeType::Inner].observe(memory_metrics.storage_inner); + MEMORY_METRICS.storage_size[&SizeType::History].observe(memory_metrics.storage_history); + + MEMORY_METRICS + .storage_view_cache_size + .observe(storage_metrics.cache_size); + MEMORY_METRICS + .full + .observe(memory_metrics.full_size() + storage_metrics.cache_size); + + let total_storage_invocations = storage_metrics.get_value_storage_invocations + + storage_metrics.set_value_storage_invocations; + let total_time_spent_in_storage = + storage_metrics.time_spent_on_get_value + storage_metrics.time_spent_on_set_value; + + STORAGE_METRICS.amount[&InteractionType::Missed] + .observe(storage_metrics.storage_invocations_missed); + STORAGE_METRICS.amount[&InteractionType::GetValue] + .observe(storage_metrics.get_value_storage_invocations); + STORAGE_METRICS.amount[&InteractionType::SetValue] + .observe(storage_metrics.set_value_storage_invocations); + STORAGE_METRICS.amount[&InteractionType::Total].observe(total_storage_invocations); + + STORAGE_METRICS.duration[&InteractionType::Missed] + .observe(storage_metrics.time_spent_on_storage_missed); + STORAGE_METRICS.duration[&InteractionType::GetValue] + .observe(storage_metrics.time_spent_on_get_value); + STORAGE_METRICS.duration[&InteractionType::SetValue] + .observe(storage_metrics.time_spent_on_set_value); + STORAGE_METRICS.duration[&InteractionType::Total].observe(total_time_spent_in_storage); + + if total_storage_invocations > 0 { + STORAGE_METRICS.duration_per_unit[&InteractionType::Total] + .observe(total_time_spent_in_storage.div_f64(total_storage_invocations as f64)); + } + if storage_metrics.storage_invocations_missed > 0 { + let duration_per_unit = storage_metrics + .time_spent_on_storage_missed + .div_f64(storage_metrics.storage_invocations_missed as f64); + STORAGE_METRICS.duration_per_unit[&InteractionType::Missed].observe(duration_per_unit); + } + + STORAGE_METRICS + .ratio + .observe(total_time_spent_in_storage.as_secs_f64() / vm_execution_took.as_secs_f64()); + + const STORAGE_INVOCATIONS_DEBUG_THRESHOLD: usize = 1_000; + + if total_storage_invocations > STORAGE_INVOCATIONS_DEBUG_THRESHOLD { + tracing::info!( + "Tx {tx_id} resulted in {total_storage_invocations} storage_invocations, {} new_storage_invocations, \ + {} get_value_storage_invocations, {} set_value_storage_invocations, \ + vm execution took {vm_execution_took:?}, storage interaction took {total_time_spent_in_storage:?} \ + (missed: {:?} get: {:?} set: {:?})", + storage_metrics.storage_invocations_missed, + storage_metrics.get_value_storage_invocations, + storage_metrics.set_value_storage_invocations, + storage_metrics.time_spent_on_storage_missed, + storage_metrics.time_spent_on_get_value, + storage_metrics.time_spent_on_set_value, + ); + } +} diff --git a/core/node/api_server/src/execution_sandbox/testonly.rs b/core/lib/vm_executor/src/oneshot/mock.rs similarity index 58% rename from core/node/api_server/src/execution_sandbox/testonly.rs rename to core/lib/vm_executor/src/oneshot/mock.rs index d9d60f52415a..8f3a12603c1a 100644 --- a/core/node/api_server/src/execution_sandbox/testonly.rs +++ b/core/lib/vm_executor/src/oneshot/mock.rs @@ -1,18 +1,18 @@ use std::fmt; use async_trait::async_trait; -#[cfg(test)] -use zksync_multivm::interface::ExecutionResult; use zksync_multivm::interface::{ - storage::ReadStorage, BytecodeCompressionError, OneshotEnv, TxExecutionMode, - VmExecutionResultAndLogs, + executor::{OneshotExecutor, TransactionValidator}, + storage::ReadStorage, + tracer::{ValidationError, ValidationParams}, + ExecutionResult, OneshotEnv, OneshotTracingParams, OneshotTransactionExecutionResult, + TxExecutionArgs, TxExecutionMode, VmExecutionResultAndLogs, }; -use zksync_types::Transaction; - -use super::{execute::TransactionExecutor, OneshotExecutor, TxExecutionArgs}; +use zksync_types::{l2::L2Tx, Transaction}; type TxResponseFn = dyn Fn(&Transaction, &OneshotEnv) -> VmExecutionResultAndLogs + Send + Sync; +/// Mock [`OneshotExecutor`] implementation. pub struct MockOneshotExecutor { call_responses: Box, tx_responses: Box, @@ -30,10 +30,7 @@ impl Default for MockOneshotExecutor { fn default() -> Self { Self { call_responses: Box::new(|tx, _| { - panic!( - "Unexpected call with data {}", - hex::encode(tx.execute.calldata()) - ); + panic!("Unexpected call with data {:?}", tx.execute.calldata()); }), tx_responses: Box::new(|tx, _| { panic!("Unexpect transaction call: {tx:?}"); @@ -43,23 +40,23 @@ impl Default for MockOneshotExecutor { } impl MockOneshotExecutor { - #[cfg(test)] - pub(crate) fn set_call_responses(&mut self, responses: F) + /// Sets call response closure used by this executor. + pub fn set_call_responses(&mut self, responses: F) where F: Fn(&Transaction, &OneshotEnv) -> ExecutionResult + 'static + Send + Sync, { self.call_responses = self.wrap_responses(responses); } - #[cfg(test)] - pub(crate) fn set_tx_responses(&mut self, responses: F) + /// Sets transaction response closure used by this executor. The closure will be called both for transaction execution / validation, + /// and for gas estimation. + pub fn set_tx_responses(&mut self, responses: F) where F: Fn(&Transaction, &OneshotEnv) -> ExecutionResult + 'static + Send + Sync, { self.tx_responses = self.wrap_responses(responses); } - #[cfg(test)] fn wrap_responses(&mut self, responses: F) -> Box where F: Fn(&Transaction, &OneshotEnv) -> ExecutionResult + 'static + Send + Sync, @@ -76,8 +73,8 @@ impl MockOneshotExecutor { ) } - #[cfg(test)] - pub(crate) fn set_tx_responses_with_logs(&mut self, responses: F) + /// Same as [`Self::set_tx_responses()`], but allows to customize returned VM logs etc. + pub fn set_full_tx_responses(&mut self, responses: F) where F: Fn(&Transaction, &OneshotEnv) -> VmExecutionResultAndLogs + 'static + Send + Sync, { @@ -99,34 +96,41 @@ impl OneshotExecutor for MockOneshotExecutor where S: ReadStorage + Send + 'static, { - type Tracers = (); - - async fn inspect_transaction( + async fn inspect_transaction_with_bytecode_compression( &self, _storage: S, env: OneshotEnv, args: TxExecutionArgs, - (): Self::Tracers, - ) -> anyhow::Result { - Ok(self.mock_inspect(env, args)) + _params: OneshotTracingParams, + ) -> anyhow::Result { + Ok(OneshotTransactionExecutionResult { + tx_result: Box::new(self.mock_inspect(env, args)), + compression_result: Ok(()), + call_traces: vec![], + }) } +} - async fn inspect_transaction_with_bytecode_compression( +#[async_trait] +impl TransactionValidator for MockOneshotExecutor +where + S: ReadStorage + Send + 'static, +{ + async fn validate_transaction( &self, _storage: S, env: OneshotEnv, - args: TxExecutionArgs, - (): Self::Tracers, - ) -> anyhow::Result<( - Result<(), BytecodeCompressionError>, - VmExecutionResultAndLogs, - )> { - Ok((Ok(()), self.mock_inspect(env, args))) - } -} - -impl From for TransactionExecutor { - fn from(executor: MockOneshotExecutor) -> Self { - Self::Mock(executor) + tx: L2Tx, + _validation_params: ValidationParams, + ) -> anyhow::Result> { + Ok( + match self + .mock_inspect(env, TxExecutionArgs::for_validation(tx)) + .result + { + ExecutionResult::Halt { reason } => Err(ValidationError::FailedTx(reason)), + ExecutionResult::Success { .. } | ExecutionResult::Revert { .. } => Ok(()), + }, + ) } } diff --git a/core/lib/vm_executor/src/oneshot/mod.rs b/core/lib/vm_executor/src/oneshot/mod.rs new file mode 100644 index 000000000000..cac8edfdfdf8 --- /dev/null +++ b/core/lib/vm_executor/src/oneshot/mod.rs @@ -0,0 +1,291 @@ +//! Oneshot VM executor. + +use std::{ + sync::Arc, + time::{Duration, Instant}, +}; + +use anyhow::Context; +use async_trait::async_trait; +use once_cell::sync::OnceCell; +use zksync_multivm::{ + interface::{ + executor::{OneshotExecutor, TransactionValidator}, + storage::{ReadStorage, StoragePtr, StorageView, WriteStorage}, + tracer::{ValidationError, ValidationParams}, + ExecutionResult, OneshotEnv, OneshotTracingParams, OneshotTransactionExecutionResult, + StoredL2BlockEnv, TxExecutionArgs, TxExecutionMode, VmExecutionMode, VmInterface, + }, + tracers::{CallTracer, StorageInvocations, ValidationTracer}, + utils::adjust_pubdata_price_for_tx, + vm_latest::HistoryDisabled, + zk_evm_latest::ethereum_types::U256, + MultiVMTracer, VmInstance, +}; +use zksync_types::{ + block::pack_block_info, + get_nonce_key, + l2::L2Tx, + utils::{decompose_full_nonce, nonces_to_full_nonce, storage_key_for_eth_balance}, + AccountTreeId, Nonce, StorageKey, Transaction, SYSTEM_CONTEXT_ADDRESS, + SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, +}; +use zksync_utils::{h256_to_u256, u256_to_h256}; + +pub use self::mock::MockOneshotExecutor; + +mod metrics; +mod mock; + +/// Main [`OneshotExecutor`] implementation used by the API server. +#[derive(Debug, Default)] +pub struct MainOneshotExecutor { + missed_storage_invocation_limit: usize, + execution_latency_histogram: Option<&'static vise::Histogram>, +} + +impl MainOneshotExecutor { + /// Creates a new executor with the specified limit of cache misses for storage read operations (an anti-DoS measure). + /// The limit is applied for calls and gas estimations, but not during transaction validation. + pub fn new(missed_storage_invocation_limit: usize) -> Self { + Self { + missed_storage_invocation_limit, + execution_latency_histogram: None, + } + } + + /// Sets a histogram for measuring VM execution latency. + pub fn set_execution_latency_histogram( + &mut self, + histogram: &'static vise::Histogram, + ) { + self.execution_latency_histogram = Some(histogram); + } +} + +#[async_trait] +impl OneshotExecutor for MainOneshotExecutor +where + S: ReadStorage + Send + 'static, +{ + async fn inspect_transaction_with_bytecode_compression( + &self, + storage: S, + env: OneshotEnv, + args: TxExecutionArgs, + params: OneshotTracingParams, + ) -> anyhow::Result { + let missed_storage_invocation_limit = match env.system.execution_mode { + // storage accesses are not limited for tx validation + TxExecutionMode::VerifyExecute => usize::MAX, + TxExecutionMode::EthCall | TxExecutionMode::EstimateFee => { + self.missed_storage_invocation_limit + } + }; + let execution_latency_histogram = self.execution_latency_histogram; + + tokio::task::spawn_blocking(move || { + let mut tracers = vec![]; + let mut calls_result = Arc::>::default(); + if params.trace_calls { + tracers.push(CallTracer::new(calls_result.clone()).into_tracer_pointer()); + } + tracers.push( + StorageInvocations::new(missed_storage_invocation_limit).into_tracer_pointer(), + ); + + let executor = VmSandbox::new(storage, env, args, execution_latency_histogram); + let mut result = executor.apply(|vm, transaction| { + let (compression_result, tx_result) = vm + .inspect_transaction_with_bytecode_compression( + tracers.into(), + transaction, + true, + ); + OneshotTransactionExecutionResult { + tx_result: Box::new(tx_result), + compression_result: compression_result.map(drop), + call_traces: vec![], + } + }); + + result.call_traces = Arc::make_mut(&mut calls_result).take().unwrap_or_default(); + result + }) + .await + .context("VM execution panicked") + } +} + +#[async_trait] +impl TransactionValidator for MainOneshotExecutor +where + S: ReadStorage + Send + 'static, +{ + async fn validate_transaction( + &self, + storage: S, + env: OneshotEnv, + tx: L2Tx, + validation_params: ValidationParams, + ) -> anyhow::Result> { + anyhow::ensure!( + env.system.execution_mode == TxExecutionMode::VerifyExecute, + "Unexpected execution mode for tx validation: {:?} (expected `VerifyExecute`)", + env.system.execution_mode + ); + let execution_latency_histogram = self.execution_latency_histogram; + + tokio::task::spawn_blocking(move || { + let (validation_tracer, mut validation_result) = + ValidationTracer::::new( + validation_params, + env.system.version.into(), + ); + let tracers = vec![validation_tracer.into_tracer_pointer()]; + + let executor = VmSandbox::new( + storage, + env, + TxExecutionArgs::for_validation(tx), + execution_latency_histogram, + ); + let exec_result = executor.apply(|vm, transaction| { + vm.push_transaction(transaction); + vm.inspect(tracers.into(), VmExecutionMode::OneTx) + }); + let validation_result = Arc::make_mut(&mut validation_result) + .take() + .map_or(Ok(()), Err); + + match (exec_result.result, validation_result) { + (_, Err(violated_rule)) => Err(ValidationError::ViolatedRule(violated_rule)), + (ExecutionResult::Halt { reason }, _) => Err(ValidationError::FailedTx(reason)), + _ => Ok(()), + } + }) + .await + .context("VM execution panicked") + } +} + +#[derive(Debug)] +struct VmSandbox { + vm: Box>, + storage_view: StoragePtr>, + transaction: Transaction, + execution_latency_histogram: Option<&'static vise::Histogram>, +} + +impl VmSandbox { + /// This method is blocking. + fn new( + storage: S, + mut env: OneshotEnv, + execution_args: TxExecutionArgs, + execution_latency_histogram: Option<&'static vise::Histogram>, + ) -> Self { + let mut storage_view = StorageView::new(storage); + Self::setup_storage_view(&mut storage_view, &execution_args, env.current_block); + + let protocol_version = env.system.version; + if execution_args.adjust_pubdata_price { + env.l1_batch.fee_input = adjust_pubdata_price_for_tx( + env.l1_batch.fee_input, + execution_args.transaction.gas_per_pubdata_byte_limit(), + env.l1_batch.enforced_base_fee.map(U256::from), + protocol_version.into(), + ); + }; + + let storage_view = storage_view.to_rc_ptr(); + let vm = Box::new(VmInstance::new_with_specific_version( + env.l1_batch, + env.system, + storage_view.clone(), + protocol_version.into_api_vm_version(), + )); + + Self { + vm, + storage_view, + transaction: execution_args.transaction, + execution_latency_histogram, + } + } + + /// This method is blocking. + fn setup_storage_view( + storage_view: &mut StorageView, + execution_args: &TxExecutionArgs, + current_block: Option, + ) { + let storage_view_setup_started_at = Instant::now(); + if let Some(nonce) = execution_args.enforced_nonce { + let nonce_key = get_nonce_key(&execution_args.transaction.initiator_account()); + let full_nonce = storage_view.read_value(&nonce_key); + let (_, deployment_nonce) = decompose_full_nonce(h256_to_u256(full_nonce)); + let enforced_full_nonce = nonces_to_full_nonce(U256::from(nonce.0), deployment_nonce); + storage_view.set_value(nonce_key, u256_to_h256(enforced_full_nonce)); + } + + let payer = execution_args.transaction.payer(); + let balance_key = storage_key_for_eth_balance(&payer); + let mut current_balance = h256_to_u256(storage_view.read_value(&balance_key)); + current_balance += execution_args.added_balance; + storage_view.set_value(balance_key, u256_to_h256(current_balance)); + + // Reset L2 block info if necessary. + if let Some(current_block) = current_block { + let l2_block_info_key = StorageKey::new( + AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), + SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, + ); + let l2_block_info = + pack_block_info(current_block.number.into(), current_block.timestamp); + storage_view.set_value(l2_block_info_key, u256_to_h256(l2_block_info)); + + let l2_block_txs_rolling_hash_key = StorageKey::new( + AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), + SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, + ); + storage_view.set_value( + l2_block_txs_rolling_hash_key, + current_block.txs_rolling_hash, + ); + } + + let storage_view_setup_time = storage_view_setup_started_at.elapsed(); + // We don't want to emit too many logs. + if storage_view_setup_time > Duration::from_millis(10) { + tracing::debug!("Prepared the storage view (took {storage_view_setup_time:?})",); + } + } + + pub(super) fn apply(mut self, apply_fn: F) -> T + where + F: FnOnce(&mut VmInstance, Transaction) -> T, + { + let tx_id = format!( + "{:?}-{}", + self.transaction.initiator_account(), + self.transaction.nonce().unwrap_or(Nonce(0)) + ); + + let started_at = Instant::now(); + let result = apply_fn(&mut *self.vm, self.transaction); + let vm_execution_took = started_at.elapsed(); + + if let Some(histogram) = self.execution_latency_histogram { + histogram.observe(vm_execution_took); + } + let memory_metrics = self.vm.record_vm_memory_metrics(); + metrics::report_vm_memory_metrics( + &tx_id, + &memory_metrics, + vm_execution_took, + self.storage_view.as_ref().borrow_mut().metrics(), + ); + result + } +} diff --git a/core/lib/vm_executor/src/shared.rs b/core/lib/vm_executor/src/shared.rs new file mode 100644 index 000000000000..420005be05d6 --- /dev/null +++ b/core/lib/vm_executor/src/shared.rs @@ -0,0 +1,12 @@ +//! Functionality shared among different types of executors. + +use vise::{EncodeLabelSet, EncodeLabelValue}; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] +#[metrics(label = "interaction", rename_all = "snake_case")] +pub(crate) enum InteractionType { + Missed, + GetValue, + SetValue, + Total, +} diff --git a/core/lib/vm_interface/src/executor.rs b/core/lib/vm_interface/src/executor.rs index ee6665abfcb1..119f975fecd5 100644 --- a/core/lib/vm_interface/src/executor.rs +++ b/core/lib/vm_interface/src/executor.rs @@ -3,11 +3,13 @@ use std::fmt; use async_trait::async_trait; -use zksync_types::Transaction; +use zksync_types::{l2::L2Tx, Transaction}; use crate::{ - storage::StorageView, BatchTransactionExecutionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, - SystemEnv, + storage::{ReadStorage, StorageView}, + tracer::{ValidationError, ValidationParams}, + BatchTransactionExecutionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, OneshotEnv, + OneshotTracingParams, OneshotTransactionExecutionResult, SystemEnv, TxExecutionArgs, }; /// Factory of [`BatchExecutor`]s. @@ -42,3 +44,29 @@ pub trait BatchExecutor: 'static + Send + fmt::Debug { /// Finished the current L1 batch. async fn finish_batch(self: Box) -> anyhow::Result<(FinishedL1Batch, StorageView)>; } + +/// VM executor capable of executing isolated transactions / calls (as opposed to [batch execution](BatchExecutor)). +#[async_trait] +pub trait OneshotExecutor { + /// Executes a transaction or call with optional tracers. + async fn inspect_transaction_with_bytecode_compression( + &self, + storage: S, + env: OneshotEnv, + args: TxExecutionArgs, + tracing: OneshotTracingParams, + ) -> anyhow::Result; +} + +/// VM executor capable of validating transactions. +#[async_trait] +pub trait TransactionValidator: OneshotExecutor { + /// Validates the provided transaction. + async fn validate_transaction( + &self, + storage: S, + env: OneshotEnv, + tx: L2Tx, + validation_params: ValidationParams, + ) -> anyhow::Result>; +} diff --git a/core/lib/vm_interface/src/lib.rs b/core/lib/vm_interface/src/lib.rs index 315eb2bb36a7..2b30f82e0ce5 100644 --- a/core/lib/vm_interface/src/lib.rs +++ b/core/lib/vm_interface/src/lib.rs @@ -24,15 +24,16 @@ pub use crate::{ VmRevertReason, VmRevertReasonParsingError, }, inputs::{ - L1BatchEnv, L2BlockEnv, OneshotEnv, StoredL2BlockEnv, SystemEnv, TxExecutionMode, - VmExecutionMode, + L1BatchEnv, L2BlockEnv, OneshotEnv, OneshotTracingParams, StoredL2BlockEnv, SystemEnv, + TxExecutionArgs, TxExecutionMode, VmExecutionMode, }, outputs::{ BatchTransactionExecutionResult, BootloaderMemory, Call, CallType, CircuitStatistic, CompressedBytecodeInfo, CurrentExecutionState, DeduplicatedWritesMetrics, - ExecutionResult, FinishedL1Batch, L2Block, Refunds, TransactionExecutionMetrics, - TransactionExecutionResult, TxExecutionStatus, VmEvent, VmExecutionLogs, - VmExecutionMetrics, VmExecutionResultAndLogs, VmExecutionStatistics, VmMemoryMetrics, + ExecutionResult, FinishedL1Batch, L2Block, OneshotTransactionExecutionResult, Refunds, + TransactionExecutionMetrics, TransactionExecutionResult, TxExecutionStatus, VmEvent, + VmExecutionLogs, VmExecutionMetrics, VmExecutionResultAndLogs, VmExecutionStatistics, + VmMemoryMetrics, }, tracer, }, diff --git a/core/lib/vm_interface/src/types/inputs/mod.rs b/core/lib/vm_interface/src/types/inputs/mod.rs index 4801c4d88b55..24f58ae72f16 100644 --- a/core/lib/vm_interface/src/types/inputs/mod.rs +++ b/core/lib/vm_interface/src/types/inputs/mod.rs @@ -1,3 +1,7 @@ +use zksync_types::{ + l2::L2Tx, ExecuteTransactionCommon, Nonce, PackedEthSignature, Transaction, U256, +}; + pub use self::{ execution_mode::VmExecutionMode, l1_batch_env::L1BatchEnv, @@ -21,3 +25,71 @@ pub struct OneshotEnv { /// in the system context contract, which are set from `L1BatchEnv.first_l2_block` by default. pub current_block: Option, } + +/// Executor-independent arguments necessary to for oneshot transaction execution. +/// +/// # Developer guidelines +/// +/// Please don't add fields that duplicate `SystemEnv` or `L1BatchEnv` information, since both of these +/// are also provided to an executor. +#[derive(Debug)] +pub struct TxExecutionArgs { + /// Transaction / call itself. + pub transaction: Transaction, + /// Nonce override for the initiator account. + pub enforced_nonce: Option, + /// Balance added to the initiator account. + pub added_balance: U256, + /// If `true`, then the batch's L1 / pubdata gas price will be adjusted so that the transaction's gas per pubdata limit is <= + /// to the one in the block. This is often helpful in case we want the transaction validation to work regardless of the + /// current L1 prices for gas or pubdata. + pub adjust_pubdata_price: bool, +} + +impl TxExecutionArgs { + pub fn for_validation(tx: L2Tx) -> Self { + Self { + enforced_nonce: Some(tx.nonce()), + added_balance: U256::zero(), + adjust_pubdata_price: true, + transaction: tx.into(), + } + } + + pub fn for_eth_call(mut call: L2Tx) -> Self { + if call.common_data.signature.is_empty() { + call.common_data.signature = PackedEthSignature::default().serialize_packed().into(); + } + + Self { + enforced_nonce: None, + added_balance: U256::zero(), + adjust_pubdata_price: false, + transaction: call.into(), + } + } + + pub fn for_gas_estimate(transaction: Transaction) -> Self { + // For L2 transactions we need to explicitly put enough balance into the account of the users + // while for L1->L2 transactions the `to_mint` field plays this role + let added_balance = match &transaction.common_data { + ExecuteTransactionCommon::L2(data) => data.fee.gas_limit * data.fee.max_fee_per_gas, + ExecuteTransactionCommon::L1(_) => U256::zero(), + ExecuteTransactionCommon::ProtocolUpgrade(_) => U256::zero(), + }; + + Self { + enforced_nonce: transaction.nonce(), + added_balance, + adjust_pubdata_price: true, + transaction, + } + } +} + +/// Inputs and outputs for all tracers supported for oneshot transaction / call execution. +#[derive(Debug, Default)] +pub struct OneshotTracingParams { + /// Whether to trace contract calls. + pub trace_calls: bool, +} diff --git a/core/lib/vm_interface/src/types/outputs/execution_result.rs b/core/lib/vm_interface/src/types/outputs/execution_result.rs index d74d74652e28..6f9c02f0b587 100644 --- a/core/lib/vm_interface/src/types/outputs/execution_result.rs +++ b/core/lib/vm_interface/src/types/outputs/execution_result.rs @@ -11,7 +11,8 @@ use zksync_types::{ }; use crate::{ - CompressedBytecodeInfo, Halt, VmExecutionMetrics, VmExecutionStatistics, VmRevertReason, + BytecodeCompressionError, CompressedBytecodeInfo, Halt, VmExecutionMetrics, + VmExecutionStatistics, VmRevertReason, }; const L1_MESSAGE_EVENT_SIGNATURE: H256 = H256([ @@ -297,11 +298,14 @@ impl Call { } } -/// Mid-level transaction execution output returned by a batch executor. +/// Mid-level transaction execution output returned by a [batch executor](crate::executor::BatchExecutor). #[derive(Debug, Clone)] pub struct BatchTransactionExecutionResult { + /// VM result. pub tx_result: Box, + /// Compressed bytecodes used by the transaction. pub compressed_bytecodes: Vec, + /// Call traces (if requested; otherwise, empty). pub call_traces: Vec, } @@ -311,6 +315,17 @@ impl BatchTransactionExecutionResult { } } +/// Mid-level transaction execution output returned by a [oneshot executor](crate::executor::OneshotExecutor). +#[derive(Debug)] +pub struct OneshotTransactionExecutionResult { + /// VM result. + pub tx_result: Box, + /// Result of compressing bytecodes used by the transaction. + pub compression_result: Result<(), BytecodeCompressionError>, + /// Call traces (if requested; otherwise, empty). + pub call_traces: Vec, +} + /// High-level transaction execution result used by the API server sandbox etc. #[derive(Debug, Clone, PartialEq)] pub struct TransactionExecutionResult { diff --git a/core/lib/vm_interface/src/types/outputs/mod.rs b/core/lib/vm_interface/src/types/outputs/mod.rs index abefa59bbe7e..1fa1cd5d1688 100644 --- a/core/lib/vm_interface/src/types/outputs/mod.rs +++ b/core/lib/vm_interface/src/types/outputs/mod.rs @@ -1,9 +1,9 @@ pub use self::{ bytecode::CompressedBytecodeInfo, execution_result::{ - BatchTransactionExecutionResult, Call, CallType, ExecutionResult, Refunds, - TransactionExecutionResult, TxExecutionStatus, VmEvent, VmExecutionLogs, - VmExecutionResultAndLogs, + BatchTransactionExecutionResult, Call, CallType, ExecutionResult, + OneshotTransactionExecutionResult, Refunds, TransactionExecutionResult, TxExecutionStatus, + VmEvent, VmExecutionLogs, VmExecutionResultAndLogs, }, execution_state::{BootloaderMemory, CurrentExecutionState}, finished_l1batch::FinishedL1Batch, diff --git a/core/lib/vm_interface/src/types/tracer.rs b/core/lib/vm_interface/src/types/tracer.rs index 1b42b2eabbb3..ba07772c7f23 100644 --- a/core/lib/vm_interface/src/types/tracer.rs +++ b/core/lib/vm_interface/src/types/tracer.rs @@ -1,3 +1,7 @@ +use std::{collections::HashSet, fmt}; + +use zksync_types::{Address, U256}; + use crate::Halt; #[derive(Debug, Clone, PartialEq)] @@ -37,3 +41,78 @@ pub enum VmExecutionStopReason { VmFinished, TracerRequestedStop(TracerExecutionStopReason), } + +/// Transaction validation parameters. +#[derive(Debug, Clone)] +pub struct ValidationParams { + pub user_address: Address, + pub paymaster_address: Address, + /// Slots that are trusted (i.e. the user can access them). + pub trusted_slots: HashSet<(Address, U256)>, + /// Trusted addresses (the user can access any slots on these addresses). + pub trusted_addresses: HashSet
, + /// Slots, that are trusted and the value of them is the new trusted address. + /// They are needed to work correctly with beacon proxy, where the address of the implementation is + /// stored in the beacon. + pub trusted_address_slots: HashSet<(Address, U256)>, + /// Number of computational gas that validation step is allowed to use. + pub computational_gas_limit: u32, +} + +/// Rules that can be violated when validating a transaction. +#[derive(Debug, Clone)] +pub enum ViolatedValidationRule { + /// The transaction touched disallowed storage slots during validation. + TouchedDisallowedStorageSlots(Address, U256), + /// The transaction called a contract without attached bytecode. + CalledContractWithNoCode(Address), + /// The transaction touched disallowed context. + TouchedDisallowedContext, + /// The transaction used too much gas during validation. + TookTooManyComputationalGas(u32), +} + +impl fmt::Display for ViolatedValidationRule { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + ViolatedValidationRule::TouchedDisallowedStorageSlots(contract, key) => write!( + f, + "Touched disallowed storage slots: address {contract:x}, key: {key:x}", + ), + ViolatedValidationRule::CalledContractWithNoCode(contract) => { + write!(f, "Called contract with no code: {contract:x}") + } + ViolatedValidationRule::TouchedDisallowedContext => { + write!(f, "Touched disallowed context") + } + ViolatedValidationRule::TookTooManyComputationalGas(gas_limit) => { + write!( + f, + "Took too many computational gas, allowed limit: {gas_limit}" + ) + } + } + } +} + +/// Errors returned when validating a transaction. +#[derive(Debug)] +pub enum ValidationError { + /// VM execution was halted during validation. + FailedTx(Halt), + /// Transaction violated one of account validation rules. + ViolatedRule(ViolatedValidationRule), +} + +impl fmt::Display for ValidationError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Self::FailedTx(revert_reason) => { + write!(f, "Validation revert: {}", revert_reason) + } + Self::ViolatedRule(rule) => { + write!(f, "Violated validation rules: {}", rule) + } + } + } +} diff --git a/core/node/api_server/Cargo.toml b/core/node/api_server/Cargo.toml index f7d40210b485..040e2a94a110 100644 --- a/core/node/api_server/Cargo.toml +++ b/core/node/api_server/Cargo.toml @@ -29,6 +29,7 @@ zksync_utils.workspace = true zksync_protobuf.workspace = true zksync_mini_merkle_tree.workspace = true zksync_multivm.workspace = true +zksync_vm_executor.workspace = true vise.workspace = true anyhow.workspace = true diff --git a/core/node/api_server/src/execution_sandbox/apply.rs b/core/node/api_server/src/execution_sandbox/apply.rs index 8b5cf69822bf..0fbf8abc3dd4 100644 --- a/core/node/api_server/src/execution_sandbox/apply.rs +++ b/core/node/api_server/src/execution_sandbox/apply.rs @@ -9,19 +9,12 @@ use std::time::{Duration, Instant}; use anyhow::Context as _; -use async_trait::async_trait; use tokio::runtime::Handle; use zksync_dal::{Connection, Core, CoreDal, DalError}; use zksync_multivm::{ - interface::{ - storage::{ReadStorage, StoragePtr, StorageView, WriteStorage}, - BytecodeCompressionError, L1BatchEnv, L2BlockEnv, OneshotEnv, StoredL2BlockEnv, SystemEnv, - TxExecutionMode, VmExecutionMode, VmExecutionResultAndLogs, VmInterface, - }, - tracers::StorageInvocations, - utils::{adjust_pubdata_price_for_tx, get_eth_call_gas_limit}, - vm_latest::{constants::BATCH_COMPUTATIONAL_GAS_LIMIT, HistoryDisabled}, - MultiVMTracer, MultiVmTracerPointer, VmInstance, + interface::{L1BatchEnv, L2BlockEnv, OneshotEnv, StoredL2BlockEnv, SystemEnv}, + utils::get_eth_call_gas_limit, + vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, }; use zksync_state::PostgresStorage; use zksync_system_constants::{ @@ -30,18 +23,15 @@ use zksync_system_constants::{ }; use zksync_types::{ api, - block::{pack_block_info, unpack_block_info, L2BlockHasher}, + block::{unpack_block_info, L2BlockHasher}, fee_model::BatchFeeInput, - get_nonce_key, - utils::{decompose_full_nonce, nonces_to_full_nonce, storage_key_for_eth_balance}, - AccountTreeId, L1BatchNumber, L2BlockNumber, Nonce, ProtocolVersionId, StorageKey, Transaction, - H256, U256, + AccountTreeId, L1BatchNumber, L2BlockNumber, ProtocolVersionId, StorageKey, H256, U256, }; -use zksync_utils::{h256_to_u256, time::seconds_since_epoch, u256_to_h256}; +use zksync_utils::{h256_to_u256, time::seconds_since_epoch}; use super::{ - vm_metrics::{self, SandboxStage, SANDBOX_METRICS}, - ApiTracer, BlockArgs, OneshotExecutor, TxExecutionArgs, TxSetupArgs, + vm_metrics::{SandboxStage, SANDBOX_METRICS}, + BlockArgs, TxSetupArgs, }; pub(super) async fn prepare_env_and_storage( @@ -207,218 +197,6 @@ fn prepare_env( (system_env, l1_batch_env) } -// public for testing purposes -#[derive(Debug)] -pub(super) struct VmSandbox { - vm: Box>, - storage_view: StoragePtr>, - transaction: Transaction, -} - -impl VmSandbox { - /// This method is blocking. - pub fn new(storage: S, mut env: OneshotEnv, execution_args: TxExecutionArgs) -> Self { - let mut storage_view = StorageView::new(storage); - Self::setup_storage_view(&mut storage_view, &execution_args, env.current_block); - - let protocol_version = env.system.version; - if execution_args.adjust_pubdata_price { - env.l1_batch.fee_input = adjust_pubdata_price_for_tx( - env.l1_batch.fee_input, - execution_args.transaction.gas_per_pubdata_byte_limit(), - env.l1_batch.enforced_base_fee.map(U256::from), - protocol_version.into(), - ); - }; - - let storage_view = storage_view.to_rc_ptr(); - let vm = Box::new(VmInstance::new_with_specific_version( - env.l1_batch, - env.system, - storage_view.clone(), - protocol_version.into_api_vm_version(), - )); - - Self { - vm, - storage_view, - transaction: execution_args.transaction, - } - } - - /// This method is blocking. - fn setup_storage_view( - storage_view: &mut StorageView, - execution_args: &TxExecutionArgs, - current_block: Option, - ) { - let storage_view_setup_started_at = Instant::now(); - if let Some(nonce) = execution_args.enforced_nonce { - let nonce_key = get_nonce_key(&execution_args.transaction.initiator_account()); - let full_nonce = storage_view.read_value(&nonce_key); - let (_, deployment_nonce) = decompose_full_nonce(h256_to_u256(full_nonce)); - let enforced_full_nonce = nonces_to_full_nonce(U256::from(nonce.0), deployment_nonce); - storage_view.set_value(nonce_key, u256_to_h256(enforced_full_nonce)); - } - - let payer = execution_args.transaction.payer(); - let balance_key = storage_key_for_eth_balance(&payer); - let mut current_balance = h256_to_u256(storage_view.read_value(&balance_key)); - current_balance += execution_args.added_balance; - storage_view.set_value(balance_key, u256_to_h256(current_balance)); - - // Reset L2 block info if necessary. - if let Some(current_block) = current_block { - let l2_block_info_key = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - ); - let l2_block_info = - pack_block_info(current_block.number.into(), current_block.timestamp); - storage_view.set_value(l2_block_info_key, u256_to_h256(l2_block_info)); - - let l2_block_txs_rolling_hash_key = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - ); - storage_view.set_value( - l2_block_txs_rolling_hash_key, - current_block.txs_rolling_hash, - ); - } - - let storage_view_setup_time = storage_view_setup_started_at.elapsed(); - // We don't want to emit too many logs. - if storage_view_setup_time > Duration::from_millis(10) { - tracing::debug!("Prepared the storage view (took {storage_view_setup_time:?})",); - } - } - - fn wrap_tracers( - tracers: Vec, - env: &OneshotEnv, - missed_storage_invocation_limit: usize, - ) -> Vec, HistoryDisabled>> { - let storage_invocation_tracer = StorageInvocations::new(missed_storage_invocation_limit); - let protocol_version = env.system.version; - tracers - .into_iter() - .map(|tracer| tracer.into_boxed(protocol_version)) - .chain([storage_invocation_tracer.into_tracer_pointer()]) - .collect() - } - - pub(super) fn apply(mut self, apply_fn: F) -> T - where - F: FnOnce(&mut VmInstance, Transaction) -> T, - { - let tx_id = format!( - "{:?}-{}", - self.transaction.initiator_account(), - self.transaction.nonce().unwrap_or(Nonce(0)) - ); - - let execution_latency = SANDBOX_METRICS.sandbox[&SandboxStage::Execution].start(); - let result = apply_fn(&mut *self.vm, self.transaction); - let vm_execution_took = execution_latency.observe(); - - let memory_metrics = self.vm.record_vm_memory_metrics(); - vm_metrics::report_vm_memory_metrics( - &tx_id, - &memory_metrics, - vm_execution_took, - self.storage_view.as_ref().borrow_mut().metrics(), - ); - result - } -} - -/// Main [`OneshotExecutor`] implementation used by the API server. -#[derive(Debug, Default)] -pub struct MainOneshotExecutor { - missed_storage_invocation_limit: usize, -} - -impl MainOneshotExecutor { - /// Creates a new executor with the specified limit of cache misses for storage read operations (an anti-DoS measure). - /// The limit is applied for calls and gas estimations, but not during transaction validation. - pub fn new(missed_storage_invocation_limit: usize) -> Self { - Self { - missed_storage_invocation_limit, - } - } -} - -#[async_trait] -impl OneshotExecutor for MainOneshotExecutor -where - S: ReadStorage + Send + 'static, -{ - type Tracers = Vec; - - async fn inspect_transaction( - &self, - storage: S, - env: OneshotEnv, - args: TxExecutionArgs, - tracers: Self::Tracers, - ) -> anyhow::Result { - let missed_storage_invocation_limit = match env.system.execution_mode { - // storage accesses are not limited for tx validation - TxExecutionMode::VerifyExecute => usize::MAX, - TxExecutionMode::EthCall | TxExecutionMode::EstimateFee => { - self.missed_storage_invocation_limit - } - }; - - tokio::task::spawn_blocking(move || { - let tracers = VmSandbox::wrap_tracers(tracers, &env, missed_storage_invocation_limit); - let executor = VmSandbox::new(storage, env, args); - executor.apply(|vm, transaction| { - vm.push_transaction(transaction); - vm.inspect(tracers.into(), VmExecutionMode::OneTx) - }) - }) - .await - .context("VM execution panicked") - } - - async fn inspect_transaction_with_bytecode_compression( - &self, - storage: S, - env: OneshotEnv, - args: TxExecutionArgs, - tracers: Self::Tracers, - ) -> anyhow::Result<( - Result<(), BytecodeCompressionError>, - VmExecutionResultAndLogs, - )> { - let missed_storage_invocation_limit = match env.system.execution_mode { - // storage accesses are not limited for tx validation - TxExecutionMode::VerifyExecute => usize::MAX, - TxExecutionMode::EthCall | TxExecutionMode::EstimateFee => { - self.missed_storage_invocation_limit - } - }; - - tokio::task::spawn_blocking(move || { - let tracers = VmSandbox::wrap_tracers(tracers, &env, missed_storage_invocation_limit); - let executor = VmSandbox::new(storage, env, args); - executor.apply(|vm, transaction| { - let (bytecodes_result, exec_result) = vm - .inspect_transaction_with_bytecode_compression( - tracers.into(), - transaction, - true, - ); - (bytecodes_result.map(drop), exec_result) - }) - }) - .await - .context("VM execution panicked") - } -} - async fn read_stored_l2_block( connection: &mut Connection<'_, Core>, l2_block_number: L2BlockNumber, @@ -467,15 +245,6 @@ impl BlockArgs { ) } - fn is_estimate_like(&self) -> bool { - matches!( - self.block_id, - api::BlockId::Number(api::BlockNumber::Pending) - | api::BlockId::Number(api::BlockNumber::Latest) - | api::BlockId::Number(api::BlockNumber::Committed) - ) - } - pub(crate) async fn default_eth_call_gas( &self, connection: &mut Connection<'_, Core>, @@ -529,7 +298,7 @@ impl BlockArgs { .context("resolved L2 block disappeared from storage")? }; - let historical_fee_input = if !self.is_estimate_like() { + let historical_fee_input = if !self.resolves_to_latest_sealed_l2_block() { let l2_block_header = connection .blocks_dal() .get_l2_block_header(self.resolved_block_number) diff --git a/core/node/api_server/src/execution_sandbox/execute.rs b/core/node/api_server/src/execution_sandbox/execute.rs index f247313db2b1..d22d7de47d0f 100644 --- a/core/node/api_server/src/execution_sandbox/execute.rs +++ b/core/node/api_server/src/execution_sandbox/execute.rs @@ -3,86 +3,27 @@ use async_trait::async_trait; use zksync_dal::{Connection, Core}; use zksync_multivm::interface::{ - storage::ReadStorage, BytecodeCompressionError, OneshotEnv, TransactionExecutionMetrics, - VmExecutionResultAndLogs, -}; -use zksync_types::{ - api::state_override::StateOverride, l2::L2Tx, ExecuteTransactionCommon, Nonce, - PackedEthSignature, Transaction, U256, + executor::{OneshotExecutor, TransactionValidator}, + storage::ReadStorage, + tracer::{ValidationError, ValidationParams}, + Call, OneshotEnv, OneshotTracingParams, OneshotTransactionExecutionResult, + TransactionExecutionMetrics, TxExecutionArgs, VmExecutionResultAndLogs, }; +use zksync_types::{api::state_override::StateOverride, l2::L2Tx}; +use zksync_vm_executor::oneshot::{MainOneshotExecutor, MockOneshotExecutor}; use super::{ - apply::{self, MainOneshotExecutor}, - storage::StorageWithOverrides, - testonly::MockOneshotExecutor, - vm_metrics, ApiTracer, BlockArgs, OneshotExecutor, TxSetupArgs, VmPermit, + apply, storage::StorageWithOverrides, vm_metrics, BlockArgs, TxSetupArgs, VmPermit, + SANDBOX_METRICS, }; - -/// Executor-independent arguments necessary to for oneshot transaction execution. -/// -/// # Developer guidelines -/// -/// Please don't add fields that duplicate `SystemEnv` or `L1BatchEnv` information, since both of these -/// are also provided to an executor. -#[derive(Debug)] -pub struct TxExecutionArgs { - /// Transaction / call itself. - pub transaction: Transaction, - /// Nonce override for the initiator account. - pub enforced_nonce: Option, - /// Balance added to the initiator account. - pub added_balance: U256, - /// If `true`, then the batch's L1 / pubdata gas price will be adjusted so that the transaction's gas per pubdata limit is <= - /// to the one in the block. This is often helpful in case we want the transaction validation to work regardless of the - /// current L1 prices for gas or pubdata. - pub adjust_pubdata_price: bool, -} - -impl TxExecutionArgs { - pub fn for_validation(tx: L2Tx) -> Self { - Self { - enforced_nonce: Some(tx.nonce()), - added_balance: U256::zero(), - adjust_pubdata_price: true, - transaction: tx.into(), - } - } - - pub fn for_eth_call(mut call: L2Tx) -> Self { - if call.common_data.signature.is_empty() { - call.common_data.signature = PackedEthSignature::default().serialize_packed().into(); - } - - Self { - enforced_nonce: None, - added_balance: U256::zero(), - adjust_pubdata_price: false, - transaction: call.into(), - } - } - - pub fn for_gas_estimate(transaction: Transaction) -> Self { - // For L2 transactions we need to explicitly put enough balance into the account of the users - // while for L1->L2 transactions the `to_mint` field plays this role - let added_balance = match &transaction.common_data { - ExecuteTransactionCommon::L2(data) => data.fee.gas_limit * data.fee.max_fee_per_gas, - ExecuteTransactionCommon::L1(_) => U256::zero(), - ExecuteTransactionCommon::ProtocolUpgrade(_) => U256::zero(), - }; - - Self { - enforced_nonce: transaction.nonce(), - added_balance, - adjust_pubdata_price: true, - transaction, - } - } -} +use crate::execution_sandbox::vm_metrics::SandboxStage; #[derive(Debug, Clone)] pub struct TransactionExecutionOutput { /// Output of the VM. pub vm: VmExecutionResultAndLogs, + /// Traced calls if requested. + pub call_traces: Vec, /// Execution metrics. pub metrics: TransactionExecutionMetrics, /// Were published bytecodes OK? @@ -99,7 +40,10 @@ pub enum TransactionExecutor { impl TransactionExecutor { pub fn real(missed_storage_invocation_limit: usize) -> Self { - Self::Real(MainOneshotExecutor::new(missed_storage_invocation_limit)) + let mut executor = MainOneshotExecutor::new(missed_storage_invocation_limit); + executor + .set_execution_latency_histogram(&SANDBOX_METRICS.sandbox[&SandboxStage::Execution]); + Self::Real(executor) } /// This method assumes that (block with number `resolved_block_number` is present in DB) @@ -114,7 +58,7 @@ impl TransactionExecutor { connection: Connection<'static, Core>, block_args: BlockArgs, state_override: Option, - tracers: Vec, + tracing_params: OneshotTracingParams, ) -> anyhow::Result { let total_factory_deps = execution_args.transaction.execute.factory_deps.len() as u16; let (env, storage) = @@ -122,64 +66,91 @@ impl TransactionExecutor { let state_override = state_override.unwrap_or_default(); let storage = StorageWithOverrides::new(storage, &state_override); - let (published_bytecodes, execution_result) = self - .inspect_transaction_with_bytecode_compression(storage, env, execution_args, tracers) + let result = self + .inspect_transaction_with_bytecode_compression( + storage, + env, + execution_args, + tracing_params, + ) .await?; drop(vm_permit); let metrics = - vm_metrics::collect_tx_execution_metrics(total_factory_deps, &execution_result); + vm_metrics::collect_tx_execution_metrics(total_factory_deps, &result.tx_result); Ok(TransactionExecutionOutput { - vm: execution_result, + vm: *result.tx_result, + call_traces: result.call_traces, metrics, - are_published_bytecodes_ok: published_bytecodes.is_ok(), + are_published_bytecodes_ok: result.compression_result.is_ok(), }) } } +impl From for TransactionExecutor { + fn from(executor: MockOneshotExecutor) -> Self { + Self::Mock(executor) + } +} + #[async_trait] impl OneshotExecutor for TransactionExecutor where S: ReadStorage + Send + 'static, { - type Tracers = Vec; - - async fn inspect_transaction( + async fn inspect_transaction_with_bytecode_compression( &self, storage: S, env: OneshotEnv, args: TxExecutionArgs, - tracers: Self::Tracers, - ) -> anyhow::Result { + tracing_params: OneshotTracingParams, + ) -> anyhow::Result { match self { Self::Real(executor) => { executor - .inspect_transaction(storage, env, args, tracers) + .inspect_transaction_with_bytecode_compression( + storage, + env, + args, + tracing_params, + ) + .await + } + Self::Mock(executor) => { + executor + .inspect_transaction_with_bytecode_compression( + storage, + env, + args, + tracing_params, + ) .await } - Self::Mock(executor) => executor.inspect_transaction(storage, env, args, ()).await, } } +} - async fn inspect_transaction_with_bytecode_compression( +#[async_trait] +impl TransactionValidator for TransactionExecutor +where + S: ReadStorage + Send + 'static, +{ + async fn validate_transaction( &self, storage: S, env: OneshotEnv, - args: TxExecutionArgs, - tracers: Self::Tracers, - ) -> anyhow::Result<( - Result<(), BytecodeCompressionError>, - VmExecutionResultAndLogs, - )> { + tx: L2Tx, + validation_params: ValidationParams, + ) -> anyhow::Result> { match self { Self::Real(executor) => { executor - .inspect_transaction_with_bytecode_compression(storage, env, args, tracers) + .validate_transaction(storage, env, tx, validation_params) .await } Self::Mock(executor) => { executor - .inspect_transaction_with_bytecode_compression(storage, env, args, ()) + .validate_transaction(storage, env, tx, validation_params) .await } } diff --git a/core/node/api_server/src/execution_sandbox/mod.rs b/core/node/api_server/src/execution_sandbox/mod.rs index faaccf03c96a..79c6123642cc 100644 --- a/core/node/api_server/src/execution_sandbox/mod.rs +++ b/core/node/api_server/src/execution_sandbox/mod.rs @@ -4,23 +4,18 @@ use std::{ }; use anyhow::Context as _; -use async_trait::async_trait; use rand::{thread_rng, Rng}; use zksync_dal::{pruning_dal::PruningInfo, Connection, Core, CoreDal, DalError}; -use zksync_multivm::interface::{ - storage::ReadStorage, BytecodeCompressionError, OneshotEnv, TxExecutionMode, - VmExecutionResultAndLogs, -}; +use zksync_multivm::interface::TxExecutionMode; use zksync_state::PostgresStorageCaches; use zksync_types::{ api, fee_model::BatchFeeInput, AccountTreeId, Address, L1BatchNumber, L2BlockNumber, L2ChainId, }; -pub use self::execute::{TransactionExecutor, TxExecutionArgs}; +pub use self::execute::TransactionExecutor; // FIXME (PLA-1018): remove use self::vm_metrics::SandboxStage; pub(super) use self::{ error::SandboxExecutionError, - tracers::ApiTracer, validate::ValidationError, vm_metrics::{SubmitTxStage, SANDBOX_METRICS}, }; @@ -31,10 +26,8 @@ mod apply; mod error; mod execute; mod storage; -pub mod testonly; #[cfg(test)] mod tests; -mod tracers; mod validate; mod vm_metrics; @@ -184,7 +177,7 @@ impl TxSetupArgs { caches: PostgresStorageCaches::new(1, 1), validation_computational_gas_limit: u32::MAX, chain_id: L2ChainId::default(), - whitelisted_tokens_for_aa: Vec::new(), + whitelisted_tokens_for_aa: vec![], enforced_base_fee: None, } } @@ -417,28 +410,3 @@ impl BlockArgs { ) } } - -/// VM executor capable of executing isolated transactions / calls (as opposed to batch execution). -#[async_trait] -trait OneshotExecutor { - type Tracers: Default; - - async fn inspect_transaction( - &self, - storage: S, - env: OneshotEnv, - args: TxExecutionArgs, - tracers: Self::Tracers, - ) -> anyhow::Result; - - async fn inspect_transaction_with_bytecode_compression( - &self, - storage: S, - env: OneshotEnv, - args: TxExecutionArgs, - tracers: Self::Tracers, - ) -> anyhow::Result<( - Result<(), BytecodeCompressionError>, - VmExecutionResultAndLogs, - )>; -} diff --git a/core/node/api_server/src/execution_sandbox/tests.rs b/core/node/api_server/src/execution_sandbox/tests.rs index da593292e2e1..35103779a49e 100644 --- a/core/node/api_server/src/execution_sandbox/tests.rs +++ b/core/node/api_server/src/execution_sandbox/tests.rs @@ -1,16 +1,31 @@ //! Tests for the VM execution sandbox. +use std::collections::HashMap; + use assert_matches::assert_matches; +use test_casing::test_casing; use zksync_dal::ConnectionPool; +use zksync_multivm::{ + interface::{ + executor::{OneshotExecutor, TransactionValidator}, + tracer::ValidationError, + Halt, OneshotTracingParams, TxExecutionArgs, + }, + utils::derive_base_fee_and_gas_per_pubdata, +}; use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; -use zksync_node_test_utils::{create_l2_block, create_l2_transaction, prepare_recovery_snapshot}; -use zksync_types::{api::state_override::StateOverride, Transaction}; - -use super::*; -use crate::{ - execution_sandbox::{apply::VmSandbox, storage::StorageWithOverrides}, - tx_sender::ApiContracts, +use zksync_node_test_utils::{create_l2_block, prepare_recovery_snapshot}; +use zksync_types::{ + api::state_override::{OverrideAccount, StateOverride}, + fee::Fee, + l2::L2Tx, + transaction_request::PaymasterParams, + K256PrivateKey, Nonce, ProtocolVersionId, Transaction, U256, }; +use zksync_vm_executor::oneshot::MainOneshotExecutor; + +use super::{storage::StorageWithOverrides, *}; +use crate::tx_sender::ApiContracts; #[tokio::test] async fn creating_block_args() { @@ -167,7 +182,7 @@ async fn creating_block_args_after_snapshot_recovery() { } #[tokio::test] -async fn instantiating_vm() { +async fn estimating_gas() { let pool = ConnectionPool::::test_pool().await; let mut connection = pool.connection().await.unwrap(); insert_genesis_batch(&mut connection, &GenesisParams::mock()) @@ -188,24 +203,104 @@ async fn instantiating_vm() { } async fn test_instantiating_vm(connection: Connection<'static, Core>, block_args: BlockArgs) { - let transaction = Transaction::from(create_l2_transaction(10, 100)); let estimate_gas_contracts = ApiContracts::load_from_disk().await.unwrap().estimate_gas; + let mut setup_args = TxSetupArgs::mock(TxExecutionMode::EstimateFee, estimate_gas_contracts); + let (base_fee, gas_per_pubdata) = derive_base_fee_and_gas_per_pubdata( + setup_args.fee_input, + ProtocolVersionId::latest().into(), + ); + setup_args.enforced_base_fee = Some(base_fee); + let transaction = Transaction::from(create_transfer(base_fee, gas_per_pubdata)); let execution_args = TxExecutionArgs::for_gas_estimate(transaction.clone()); - let (env, storage) = apply::prepare_env_and_storage( - connection, - TxSetupArgs::mock(TxExecutionMode::EstimateFee, estimate_gas_contracts), - &block_args, - ) - .await - .unwrap(); + let (env, storage) = apply::prepare_env_and_storage(connection, setup_args, &block_args) + .await + .unwrap(); let storage = StorageWithOverrides::new(storage, &StateOverride::default()); - tokio::task::spawn_blocking(move || { - VmSandbox::new(storage, env, execution_args).apply(|_, received_tx| { - assert_eq!(received_tx, transaction); - }); - }) - .await - .expect("VM execution panicked") + let tracing_params = OneshotTracingParams::default(); + let output = MainOneshotExecutor::new(usize::MAX) + .inspect_transaction_with_bytecode_compression(storage, env, execution_args, tracing_params) + .await + .unwrap(); + output.compression_result.unwrap(); + let tx_result = *output.tx_result; + assert!(!tx_result.result.is_failed(), "{tx_result:#?}"); +} + +fn create_transfer(fee_per_gas: u64, gas_per_pubdata: u64) -> L2Tx { + let fee = Fee { + gas_limit: 200_000.into(), + max_fee_per_gas: fee_per_gas.into(), + max_priority_fee_per_gas: 0_u64.into(), + gas_per_pubdata_limit: gas_per_pubdata.into(), + }; + L2Tx::new_signed( + Address::random(), + vec![], + Nonce(0), + fee, + U256::zero(), + L2ChainId::default(), + &K256PrivateKey::random(), + vec![], + PaymasterParams::default(), + ) + .unwrap() +} + +#[test_casing(2, [false, true])] +#[tokio::test] +async fn validating_transaction(set_balance: bool) { + let pool = ConnectionPool::::test_pool().await; + let mut connection = pool.connection().await.unwrap(); + insert_genesis_batch(&mut connection, &GenesisParams::mock()) + .await + .unwrap(); + + let block_args = BlockArgs::pending(&mut connection).await.unwrap(); + + let call_contracts = ApiContracts::load_from_disk().await.unwrap().eth_call; + let mut setup_args = TxSetupArgs::mock(TxExecutionMode::VerifyExecute, call_contracts); + let (base_fee, gas_per_pubdata) = derive_base_fee_and_gas_per_pubdata( + setup_args.fee_input, + ProtocolVersionId::latest().into(), + ); + setup_args.enforced_base_fee = Some(base_fee); + let transaction = create_transfer(base_fee, gas_per_pubdata); + + let validation_params = + validate::get_validation_params(&mut connection, &transaction, u32::MAX, &[]) + .await + .unwrap(); + let (env, storage) = apply::prepare_env_and_storage(connection, setup_args, &block_args) + .await + .unwrap(); + let state_override = if set_balance { + let account_override = OverrideAccount { + balance: Some(U256::from(1) << 128), + ..OverrideAccount::default() + }; + StateOverride::new(HashMap::from([( + transaction.initiator_account(), + account_override, + )])) + } else { + StateOverride::default() + }; + let storage = StorageWithOverrides::new(storage, &state_override); + + let validation_result = MainOneshotExecutor::new(usize::MAX) + .validate_transaction(storage, env, transaction, validation_params) + .await + .unwrap(); + if set_balance { + validation_result.expect("validation failed"); + } else { + assert_matches!( + validation_result.unwrap_err(), + ValidationError::FailedTx(Halt::ValidationFailed(reason)) + if reason.to_string().contains("Not enough balance") + ); + } } diff --git a/core/node/api_server/src/execution_sandbox/tracers.rs b/core/node/api_server/src/execution_sandbox/tracers.rs deleted file mode 100644 index 6fdc3dbc7b62..000000000000 --- a/core/node/api_server/src/execution_sandbox/tracers.rs +++ /dev/null @@ -1,51 +0,0 @@ -use std::sync::Arc; - -use once_cell::sync::OnceCell; -use zksync_multivm::{ - interface::{storage::WriteStorage, Call}, - tracers::{CallTracer, ValidationTracer, ValidationTracerParams, ViolatedValidationRule}, - vm_latest::HistoryDisabled, - MultiVMTracer, MultiVmTracerPointer, -}; -use zksync_types::ProtocolVersionId; - -/// Custom tracers supported by the API sandbox. -#[derive(Debug)] -pub enum ApiTracer { - CallTracer(Arc>>), - Validation { - params: ValidationTracerParams, - result: Arc>, - }, -} - -impl ApiTracer { - pub fn validation( - params: ValidationTracerParams, - ) -> (Self, Arc>) { - let result = Arc::>::default(); - let this = Self::Validation { - params, - result: result.clone(), - }; - (this, result) - } - - pub(super) fn into_boxed( - self, - protocol_version: ProtocolVersionId, - ) -> MultiVmTracerPointer - where - S: WriteStorage, - { - match self { - Self::CallTracer(traces) => CallTracer::new(traces).into_tracer_pointer(), - Self::Validation { params, result } => { - let (mut tracer, _) = - ValidationTracer::::new(params, protocol_version.into()); - tracer.result = result; - tracer.into_tracer_pointer() - } - } - } -} diff --git a/core/node/api_server/src/execution_sandbox/validate.rs b/core/node/api_server/src/execution_sandbox/validate.rs index a95cf6c3a91e..e9087e608eeb 100644 --- a/core/node/api_server/src/execution_sandbox/validate.rs +++ b/core/node/api_server/src/execution_sandbox/validate.rs @@ -3,9 +3,9 @@ use std::collections::HashSet; use anyhow::Context as _; use tracing::Instrument; use zksync_dal::{Connection, Core, CoreDal}; -use zksync_multivm::{ - interface::ExecutionResult, - tracers::{ValidationError as RawValidationError, ValidationTracerParams}, +use zksync_multivm::interface::{ + executor::TransactionValidator, + tracer::{ValidationError as RawValidationError, ValidationParams}, }; use zksync_types::{ api::state_override::StateOverride, l2::L2Tx, Address, TRUSTED_ADDRESS_SLOTS, @@ -17,7 +17,7 @@ use super::{ execute::TransactionExecutor, storage::StorageWithOverrides, vm_metrics::{SandboxStage, EXECUTION_METRICS, SANDBOX_METRICS}, - ApiTracer, BlockArgs, OneshotExecutor, TxExecutionArgs, TxSetupArgs, VmPermit, + BlockArgs, TxSetupArgs, VmPermit, }; /// Validation error used by the sandbox. Besides validation errors returned by VM, it also includes an internal error @@ -42,7 +42,7 @@ impl TransactionExecutor { computational_gas_limit: u32, ) -> Result<(), ValidationError> { let total_latency = SANDBOX_METRICS.sandbox[&SandboxStage::ValidateInSandbox].start(); - let params = get_validation_params( + let validation_params = get_validation_params( &mut connection, &tx, computational_gas_limit, @@ -55,21 +55,14 @@ impl TransactionExecutor { apply::prepare_env_and_storage(connection, setup_args, &block_args).await?; let storage = StorageWithOverrides::new(storage, &StateOverride::default()); - let execution_args = TxExecutionArgs::for_validation(tx); - let (tracer, validation_result) = ApiTracer::validation(params); let stage_latency = SANDBOX_METRICS.sandbox[&SandboxStage::Validation].start(); - let result = self - .inspect_transaction(storage, env, execution_args, vec![tracer]) + let validation_result = self + .validate_transaction(storage, env, tx, validation_params) .instrument(tracing::debug_span!("validation")) .await?; drop(vm_permit); stage_latency.observe(); - let validation_result = match (result.result, validation_result.get()) { - (_, Some(rule)) => Err(RawValidationError::ViolatedRule(rule.clone())), - (ExecutionResult::Halt { reason }, _) => Err(RawValidationError::FailedTx(reason)), - (_, None) => Ok(()), - }; total_latency.observe(); validation_result.map_err(ValidationError::Vm) } @@ -78,12 +71,12 @@ impl TransactionExecutor { /// Some slots can be marked as "trusted". That is needed for slots which can not be /// trusted to change between validation and execution in general case, but /// sometimes we can safely rely on them to not change often. -async fn get_validation_params( +pub(super) async fn get_validation_params( connection: &mut Connection<'_, Core>, tx: &L2Tx, computational_gas_limit: u32, whitelisted_tokens_for_aa: &[Address], -) -> anyhow::Result { +) -> anyhow::Result { let method_latency = EXECUTION_METRICS.get_validation_params.start(); let user_address = tx.common_data.initiator_address; let paymaster_address = tx.common_data.paymaster_params.paymaster; @@ -122,7 +115,7 @@ async fn get_validation_params( span.exit(); method_latency.observe(); - Ok(ValidationTracerParams { + Ok(ValidationParams { user_address, paymaster_address, trusted_slots, diff --git a/core/node/api_server/src/execution_sandbox/vm_metrics.rs b/core/node/api_server/src/execution_sandbox/vm_metrics.rs index ffe87be899ba..cbfe7e90bd0f 100644 --- a/core/node/api_server/src/execution_sandbox/vm_metrics.rs +++ b/core/node/api_server/src/execution_sandbox/vm_metrics.rs @@ -4,77 +4,14 @@ use vise::{ Buckets, EncodeLabelSet, EncodeLabelValue, Family, Gauge, Histogram, LatencyObserver, Metrics, }; use zksync_multivm::{ - interface::{ - storage::StorageViewMetrics, TransactionExecutionMetrics, VmEvent, - VmExecutionResultAndLogs, VmMemoryMetrics, - }, + interface::{TransactionExecutionMetrics, VmEvent, VmExecutionResultAndLogs}, utils::StorageWritesDeduplicator, }; -use zksync_shared_metrics::InteractionType; use zksync_types::H256; use zksync_utils::bytecode::bytecode_len_in_bytes; use crate::utils::ReportFilter; -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] -#[metrics(label = "type", rename_all = "snake_case")] -enum SizeType { - Inner, - History, -} - -const MEMORY_SIZE_BUCKETS: Buckets = Buckets::values(&[ - 1_000.0, - 10_000.0, - 100_000.0, - 500_000.0, - 1_000_000.0, - 5_000_000.0, - 10_000_000.0, - 50_000_000.0, - 100_000_000.0, - 500_000_000.0, - 1_000_000_000.0, -]); - -#[derive(Debug, Metrics)] -#[metrics(prefix = "runtime_context_memory")] -struct RuntimeContextMemoryMetrics { - #[metrics(buckets = MEMORY_SIZE_BUCKETS)] - event_sink_size: Family>, - #[metrics(buckets = MEMORY_SIZE_BUCKETS)] - memory_size: Family>, - #[metrics(buckets = MEMORY_SIZE_BUCKETS)] - decommitter_size: Family>, - #[metrics(buckets = MEMORY_SIZE_BUCKETS)] - storage_size: Family>, - #[metrics(buckets = MEMORY_SIZE_BUCKETS)] - storage_view_cache_size: Histogram, - #[metrics(buckets = MEMORY_SIZE_BUCKETS)] - full: Histogram, -} - -#[vise::register] -static MEMORY_METRICS: vise::Global = vise::Global::new(); - -const INTERACTION_AMOUNT_BUCKETS: Buckets = Buckets::exponential(10.0..=10_000_000.0, 10.0); - -#[derive(Debug, Metrics)] -#[metrics(prefix = "runtime_context_storage_interaction")] -struct RuntimeContextStorageMetrics { - #[metrics(buckets = INTERACTION_AMOUNT_BUCKETS)] - amount: Family>, - #[metrics(buckets = Buckets::LATENCIES)] - duration: Family>, - #[metrics(buckets = Buckets::LATENCIES)] - duration_per_unit: Family>, - #[metrics(buckets = Buckets::ZERO_TO_ONE)] - ratio: Histogram, -} - -#[vise::register] -static STORAGE_METRICS: vise::Global = vise::Global::new(); - #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] #[metrics(label = "stage", rename_all = "snake_case")] pub(super) enum SandboxStage { @@ -185,84 +122,6 @@ pub(super) struct ExecutionMetrics { #[vise::register] pub(super) static EXECUTION_METRICS: vise::Global = vise::Global::new(); -pub(super) fn report_vm_memory_metrics( - tx_id: &str, - memory_metrics: &VmMemoryMetrics, - vm_execution_took: Duration, - storage_metrics: StorageViewMetrics, -) { - MEMORY_METRICS.event_sink_size[&SizeType::Inner].observe(memory_metrics.event_sink_inner); - MEMORY_METRICS.event_sink_size[&SizeType::History].observe(memory_metrics.event_sink_history); - MEMORY_METRICS.memory_size[&SizeType::Inner].observe(memory_metrics.memory_inner); - MEMORY_METRICS.memory_size[&SizeType::History].observe(memory_metrics.memory_history); - MEMORY_METRICS.decommitter_size[&SizeType::Inner] - .observe(memory_metrics.decommittment_processor_inner); - MEMORY_METRICS.decommitter_size[&SizeType::History] - .observe(memory_metrics.decommittment_processor_history); - MEMORY_METRICS.storage_size[&SizeType::Inner].observe(memory_metrics.storage_inner); - MEMORY_METRICS.storage_size[&SizeType::History].observe(memory_metrics.storage_history); - - MEMORY_METRICS - .storage_view_cache_size - .observe(storage_metrics.cache_size); - MEMORY_METRICS - .full - .observe(memory_metrics.full_size() + storage_metrics.cache_size); - - let total_storage_invocations = storage_metrics.get_value_storage_invocations - + storage_metrics.set_value_storage_invocations; - let total_time_spent_in_storage = - storage_metrics.time_spent_on_get_value + storage_metrics.time_spent_on_set_value; - - STORAGE_METRICS.amount[&InteractionType::Missed] - .observe(storage_metrics.storage_invocations_missed); - STORAGE_METRICS.amount[&InteractionType::GetValue] - .observe(storage_metrics.get_value_storage_invocations); - STORAGE_METRICS.amount[&InteractionType::SetValue] - .observe(storage_metrics.set_value_storage_invocations); - STORAGE_METRICS.amount[&InteractionType::Total].observe(total_storage_invocations); - - STORAGE_METRICS.duration[&InteractionType::Missed] - .observe(storage_metrics.time_spent_on_storage_missed); - STORAGE_METRICS.duration[&InteractionType::GetValue] - .observe(storage_metrics.time_spent_on_get_value); - STORAGE_METRICS.duration[&InteractionType::SetValue] - .observe(storage_metrics.time_spent_on_set_value); - STORAGE_METRICS.duration[&InteractionType::Total].observe(total_time_spent_in_storage); - - if total_storage_invocations > 0 { - STORAGE_METRICS.duration_per_unit[&InteractionType::Total] - .observe(total_time_spent_in_storage.div_f64(total_storage_invocations as f64)); - } - if storage_metrics.storage_invocations_missed > 0 { - let duration_per_unit = storage_metrics - .time_spent_on_storage_missed - .div_f64(storage_metrics.storage_invocations_missed as f64); - STORAGE_METRICS.duration_per_unit[&InteractionType::Missed].observe(duration_per_unit); - } - - STORAGE_METRICS - .ratio - .observe(total_time_spent_in_storage.as_secs_f64() / vm_execution_took.as_secs_f64()); - - const STORAGE_INVOCATIONS_DEBUG_THRESHOLD: usize = 1_000; - - if total_storage_invocations > STORAGE_INVOCATIONS_DEBUG_THRESHOLD { - tracing::info!( - "Tx {tx_id} resulted in {total_storage_invocations} storage_invocations, {} new_storage_invocations, \ - {} get_value_storage_invocations, {} set_value_storage_invocations, \ - vm execution took {vm_execution_took:?}, storage interaction took {total_time_spent_in_storage:?} \ - (missed: {:?} get: {:?} set: {:?})", - storage_metrics.storage_invocations_missed, - storage_metrics.get_value_storage_invocations, - storage_metrics.set_value_storage_invocations, - storage_metrics.time_spent_on_storage_missed, - storage_metrics.time_spent_on_get_value, - storage_metrics.time_spent_on_set_value, - ); - } -} - pub(super) fn collect_tx_execution_metrics( contracts_deployed: u16, result: &VmExecutionResultAndLogs, diff --git a/core/node/api_server/src/tx_sender/mod.rs b/core/node/api_server/src/tx_sender/mod.rs index f0d96118638b..44eaae2e3eee 100644 --- a/core/node/api_server/src/tx_sender/mod.rs +++ b/core/node/api_server/src/tx_sender/mod.rs @@ -10,7 +10,10 @@ use zksync_dal::{ transactions_dal::L2TxSubmissionResult, Connection, ConnectionPool, Core, CoreDal, }; use zksync_multivm::{ - interface::{TransactionExecutionMetrics, TxExecutionMode, VmExecutionResultAndLogs}, + interface::{ + OneshotTracingParams, TransactionExecutionMetrics, TxExecutionArgs, TxExecutionMode, + VmExecutionResultAndLogs, + }, utils::{ adjust_pubdata_price_for_tx, derive_base_fee_and_gas_per_pubdata, derive_overhead, get_max_batch_gas_limit, @@ -41,8 +44,8 @@ pub(super) use self::result::SubmitTxError; use self::{master_pool_sink::MasterPoolSink, tx_sink::TxSink}; use crate::{ execution_sandbox::{ - BlockArgs, SubmitTxStage, TransactionExecutor, TxExecutionArgs, TxSetupArgs, - VmConcurrencyBarrier, VmConcurrencyLimiter, VmPermit, SANDBOX_METRICS, + BlockArgs, SubmitTxStage, TransactionExecutor, TxSetupArgs, VmConcurrencyBarrier, + VmConcurrencyLimiter, VmPermit, SANDBOX_METRICS, }, tx_sender::result::ApiCallResult, }; @@ -396,7 +399,7 @@ impl TxSender { connection, block_args, None, - vec![], + OneshotTracingParams::default(), ) .await?; tracing::info!( @@ -733,7 +736,7 @@ impl TxSender { connection, block_args, state_override, - vec![], + OneshotTracingParams::default(), ) .await?; Ok((execution_output.vm, execution_output.metrics)) @@ -1033,7 +1036,7 @@ impl TxSender { connection, block_args, state_override, - vec![], + OneshotTracingParams::default(), ) .await?; result.vm.into_api_call_result() diff --git a/core/node/api_server/src/tx_sender/tests.rs b/core/node/api_server/src/tx_sender/tests.rs index 5b2ab0495dab..0ac3eb0b4f38 100644 --- a/core/node/api_server/src/tx_sender/tests.rs +++ b/core/node/api_server/src/tx_sender/tests.rs @@ -9,12 +9,10 @@ use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; use zksync_node_test_utils::{create_l2_block, create_l2_transaction, prepare_recovery_snapshot}; use zksync_types::{api, get_nonce_key, L1BatchNumber, L2BlockNumber, StorageLog}; use zksync_utils::u256_to_h256; +use zksync_vm_executor::oneshot::MockOneshotExecutor; use super::*; -use crate::{ - execution_sandbox::{testonly::MockOneshotExecutor, BlockStartInfo}, - web3::testonly::create_test_tx_sender, -}; +use crate::{execution_sandbox::BlockStartInfo, web3::testonly::create_test_tx_sender}; #[tokio::test] async fn getting_nonce_for_account() { diff --git a/core/node/api_server/src/web3/namespaces/debug.rs b/core/node/api_server/src/web3/namespaces/debug.rs index 473391476a3b..ad00f6a878b9 100644 --- a/core/node/api_server/src/web3/namespaces/debug.rs +++ b/core/node/api_server/src/web3/namespaces/debug.rs @@ -1,10 +1,9 @@ -use std::sync::Arc; - use anyhow::Context as _; -use once_cell::sync::OnceCell; use zksync_dal::{CoreDal, DalError}; use zksync_multivm::{ - interface::{Call, CallType, ExecutionResult, TxExecutionMode}, + interface::{ + Call, CallType, ExecutionResult, OneshotTracingParams, TxExecutionArgs, TxExecutionMode, + }, vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, }; use zksync_system_constants::MAX_ENCODED_TX_SIZE; @@ -19,7 +18,7 @@ use zksync_types::{ use zksync_web3_decl::error::Web3Error; use crate::{ - execution_sandbox::{ApiTracer, TxExecutionArgs, TxSetupArgs}, + execution_sandbox::TxSetupArgs, tx_sender::{ApiContracts, TxSenderConfig}, web3::{backend_jsonrpsee::MethodTracer, state::RpcState}, }; @@ -190,11 +189,8 @@ impl DebugNamespace { let vm_permit = vm_permit.context("cannot acquire VM permit")?; // We don't need properly trace if we only need top call - let call_tracer_result = Arc::new(OnceCell::default()); - let custom_tracers = if only_top_call { - vec![] - } else { - vec![ApiTracer::CallTracer(call_tracer_result.clone())] + let tracing_params = OneshotTracingParams { + trace_calls: !only_top_call, }; let connection = self.state.acquire_connection().await?; @@ -207,12 +203,11 @@ impl DebugNamespace { connection, block_args, None, - custom_tracers, + tracing_params, ) - .await? - .vm; + .await?; - let (output, revert_reason) = match result.result { + let (output, revert_reason) = match result.vm.result { ExecutionResult::Success { output, .. } => (output, None), ExecutionResult::Revert { output } => (vec![], Some(output.to_string())), ExecutionResult::Halt { reason } => { @@ -223,19 +218,14 @@ impl DebugNamespace { } }; - // We had only one copy of Arc this arc is already dropped it's safe to unwrap - let trace = Arc::try_unwrap(call_tracer_result) - .unwrap() - .take() - .unwrap_or_default(); let call = Call::new_high_level( tx.common_data.fee.gas_limit.as_u64(), - result.statistics.gas_used, + result.vm.statistics.gas_used, tx.execute.value, tx.execute.calldata, output, revert_reason, - trace, + result.call_traces, ); Ok(Self::map_call(call, false)) } diff --git a/core/node/api_server/src/web3/testonly.rs b/core/node/api_server/src/web3/testonly.rs index 9f6b30b6026e..a77498d4341d 100644 --- a/core/node/api_server/src/web3/testonly.rs +++ b/core/node/api_server/src/web3/testonly.rs @@ -14,12 +14,10 @@ use zksync_types::{ fee_model::{BatchFeeInput, FeeParams}, L2ChainId, }; +use zksync_vm_executor::oneshot::MockOneshotExecutor; use super::{metrics::ApiTransportLabel, *}; -use crate::{ - execution_sandbox::{testonly::MockOneshotExecutor, TransactionExecutor}, - tx_sender::TxSenderConfig, -}; +use crate::{execution_sandbox::TransactionExecutor, tx_sender::TxSenderConfig}; const TEST_TIMEOUT: Duration = Duration::from_secs(90); const POLL_INTERVAL: Duration = Duration::from_millis(50); diff --git a/core/node/api_server/src/web3/tests/mod.rs b/core/node/api_server/src/web3/tests/mod.rs index 5617b097c0c1..635620e9c525 100644 --- a/core/node/api_server/src/web3/tests/mod.rs +++ b/core/node/api_server/src/web3/tests/mod.rs @@ -42,6 +42,7 @@ use zksync_types::{ U256, U64, }; use zksync_utils::u256_to_h256; +use zksync_vm_executor::oneshot::MockOneshotExecutor; use zksync_web3_decl::{ client::{Client, DynClient, L2}, jsonrpsee::{ @@ -57,10 +58,7 @@ use zksync_web3_decl::{ }; use super::*; -use crate::{ - execution_sandbox::testonly::MockOneshotExecutor, - web3::testonly::{spawn_http_server, spawn_ws_server}, -}; +use crate::web3::testonly::{spawn_http_server, spawn_ws_server}; mod debug; mod filters; diff --git a/core/node/api_server/src/web3/tests/vm.rs b/core/node/api_server/src/web3/tests/vm.rs index 5b04250eebf4..d8d1a2c7768e 100644 --- a/core/node/api_server/src/web3/tests/vm.rs +++ b/core/node/api_server/src/web3/tests/vm.rs @@ -11,6 +11,7 @@ use zksync_types::{ L2ChainId, PackedEthSignature, StorageLogKind, StorageLogWithPreviousValue, U256, }; use zksync_utils::u256_to_h256; +use zksync_vm_executor::oneshot::MockOneshotExecutor; use zksync_web3_decl::namespaces::DebugNamespaceClient; use super::*; @@ -327,7 +328,7 @@ impl HttpTest for SendTransactionWithDetailedOutputTest { total_log_queries_count: 0, }; - tx_executor.set_tx_responses_with_logs(move |tx, env| { + tx_executor.set_full_tx_responses(move |tx, env| { assert_eq!(tx.hash(), tx_bytes_and_hash.1); assert_eq!(env.l1_batch.first_l2_block.number, 1); diff --git a/core/node/consensus/Cargo.toml b/core/node/consensus/Cargo.toml index ba52892584d2..707bd957d810 100644 --- a/core/node/consensus/Cargo.toml +++ b/core/node/consensus/Cargo.toml @@ -11,7 +11,6 @@ keywords.workspace = true categories.workspace = true [dependencies] -zksync_basic_types.workspace = true zksync_config.workspace = true zksync_concurrency.workspace = true zksync_consensus_crypto.workspace = true @@ -35,9 +34,7 @@ zksync_utils.workspace = true zksync_web3_decl.workspace = true zksync_node_api_server.workspace = true zksync_state.workspace = true -zksync_storage.workspace = true zksync_vm_interface.workspace = true -zksync_multivm.workspace = true anyhow.workspace = true async-trait.workspace = true secrecy.workspace = true @@ -46,7 +43,6 @@ thiserror.workspace = true tracing.workspace = true hex.workspace = true tokio.workspace = true -jsonrpsee.workspace = true semver.workspace = true [dev-dependencies] diff --git a/core/node/consensus/src/en.rs b/core/node/consensus/src/en.rs index e1f10b8e4e50..cf7e4173e8dc 100644 --- a/core/node/consensus/src/en.rs +++ b/core/node/consensus/src/en.rs @@ -1,7 +1,6 @@ use std::sync::Arc; use anyhow::Context as _; -use jsonrpsee::{core::ClientError, types::error::ErrorCode}; use zksync_concurrency::{ctx, error::Wrap as _, scope, time}; use zksync_consensus_executor::{self as executor, attestation}; use zksync_consensus_roles::{attester, validator}; @@ -12,6 +11,7 @@ use zksync_types::L2BlockNumber; use zksync_web3_decl::{ client::{DynClient, L2}, error::is_retriable, + jsonrpsee::{core::ClientError, types::error::ErrorCode}, namespaces::{EnNamespaceClient as _, EthNamespaceClient as _}, }; diff --git a/core/node/consensus/src/vm.rs b/core/node/consensus/src/vm.rs index f7f14ad8fe0a..11b6b5c67e3b 100644 --- a/core/node/consensus/src/vm.rs +++ b/core/node/consensus/src/vm.rs @@ -1,9 +1,8 @@ use anyhow::Context as _; use zksync_concurrency::{ctx, error::Wrap as _, scope}; use zksync_consensus_roles::attester; -use zksync_multivm::interface::TxExecutionMode; use zksync_node_api_server::{ - execution_sandbox::{TransactionExecutor, TxExecutionArgs, TxSetupArgs, VmConcurrencyLimiter}, + execution_sandbox::{TransactionExecutor, TxSetupArgs, VmConcurrencyLimiter}, tx_sender::MultiVMBaseSystemContracts, }; use zksync_state::PostgresStorageCaches; @@ -11,7 +10,9 @@ use zksync_system_constants::DEFAULT_L2_TX_GAS_PER_PUBDATA_BYTE; use zksync_types::{ ethabi, fee::Fee, fee_model::BatchFeeInput, l2::L2Tx, AccountTreeId, L2ChainId, Nonce, U256, }; -use zksync_vm_interface::ExecutionResult; +use zksync_vm_interface::{ + ExecutionResult, OneshotTracingParams, TxExecutionArgs, TxExecutionMode, +}; use crate::{abi, storage::ConnectionPool}; @@ -46,6 +47,7 @@ impl VM { } } + // FIXME (PLA-1018): switch to oneshot executor pub async fn call( &self, ctx: &ctx::Ctx, @@ -82,7 +84,7 @@ impl VM { conn.0, args, None, - vec![], + OneshotTracingParams::default(), )) .await? .context("execute_tx_in_sandbox()")?; From 52f4f763674d25f8a5e7f3a111354a559f798d52 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Tue, 10 Sep 2024 17:17:30 +0300 Subject: [PATCH 048/116] fix(en): Fix connection starvation during snapshot recovery (#2836) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ - Fixes DB connection starvation during snapshot recovery. Caused by the insufficient number of connections in the DB pool provided to recovery logic. - Additionally, fixes max concurrency of recovery not being set. ## Why ❔ Connection starvation errors degrade UX. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- .../external_node_strategy.rs | 8 +- .../src/external_node/snapshot_recovery.rs | 78 ++++++++++++++++++- 2 files changed, 79 insertions(+), 7 deletions(-) diff --git a/core/node/node_framework/src/implementations/layers/node_storage_init/external_node_strategy.rs b/core/node/node_framework/src/implementations/layers/node_storage_init/external_node_strategy.rs index 317f0b197d83..bdd69214de9a 100644 --- a/core/node/node_framework/src/implementations/layers/node_storage_init/external_node_strategy.rs +++ b/core/node/node_framework/src/implementations/layers/node_storage_init/external_node_strategy.rs @@ -76,16 +76,18 @@ impl WiringLayer for ExternalNodeInitStrategyLayer { }); let snapshot_recovery = match self.snapshot_recovery_config { Some(recovery_config) => { + // Add a connection for checking whether the storage is initialized. let recovery_pool = input .master_pool - .get_custom(self.max_postgres_concurrency.get() as u32) + .get_custom(self.max_postgres_concurrency.get() as u32 + 1) .await?; - let recovery = Arc::new(ExternalNodeSnapshotRecovery { + let recovery: Arc = Arc::new(ExternalNodeSnapshotRecovery { client: client.clone(), pool: recovery_pool, + max_concurrency: self.max_postgres_concurrency, recovery_config, app_health, - }) as Arc; + }); Some(recovery) } None => None, diff --git a/core/node/node_storage_init/src/external_node/snapshot_recovery.rs b/core/node/node_storage_init/src/external_node/snapshot_recovery.rs index d9ba60a1bcbf..9bc065b939cc 100644 --- a/core/node/node_storage_init/src/external_node/snapshot_recovery.rs +++ b/core/node/node_storage_init/src/external_node/snapshot_recovery.rs @@ -1,4 +1,4 @@ -use std::{sync::Arc, time::Instant}; +use std::{num::NonZeroUsize, sync::Arc, time::Instant}; use anyhow::Context as _; use tokio::sync::watch; @@ -17,6 +17,7 @@ use crate::{InitializeStorage, SnapshotRecoveryConfig}; pub struct ExternalNodeSnapshotRecovery { pub client: Box>, pub pool: ConnectionPool, + pub max_concurrency: NonZeroUsize, pub recovery_config: SnapshotRecoveryConfig, pub app_health: Arc, } @@ -24,8 +25,17 @@ pub struct ExternalNodeSnapshotRecovery { #[async_trait::async_trait] impl InitializeStorage for ExternalNodeSnapshotRecovery { async fn initialize_storage(&self, stop_receiver: watch::Receiver) -> anyhow::Result<()> { - let pool = self.pool.clone(); tracing::warn!("Proceeding with snapshot recovery. This is an experimental feature; use at your own risk"); + + let pool_size = self.pool.max_size() as usize; + if pool_size < self.max_concurrency.get() + 1 { + tracing::error!( + "Connection pool has insufficient number of connections ({pool_size} vs concurrency {} + 1 connection for checks). \ + This will likely lead to pool starvation during recovery.", + self.max_concurrency + ); + } + let object_store_config = self.recovery_config.object_store_config.clone().context( "Snapshot object store must be presented if snapshot recovery is activated", @@ -34,10 +44,13 @@ impl InitializeStorage for ExternalNodeSnapshotRecovery { .create_store() .await?; - let config = SnapshotsApplierConfig::default(); + let config = SnapshotsApplierConfig { + max_concurrency: self.max_concurrency, + ..SnapshotsApplierConfig::default() + }; let mut snapshots_applier_task = SnapshotsApplierTask::new( config, - pool, + self.pool.clone(), Box::new(self.client.clone().for_component("snapshot_recovery")), object_store, ); @@ -80,3 +93,60 @@ impl InitializeStorage for ExternalNodeSnapshotRecovery { Ok(completed) } } + +#[cfg(test)] +mod tests { + use std::future; + + use zksync_types::{ + tokens::{TokenInfo, TokenMetadata}, + Address, L2BlockNumber, + }; + use zksync_web3_decl::client::MockClient; + + use super::*; + + #[tokio::test] + async fn recovery_does_not_starve_pool_connections() { + let pool = ConnectionPool::constrained_test_pool(5).await; + let app_health = Arc::new(AppHealthCheck::new(None, None)); + let client = MockClient::builder(L2::default()) + .method("en_syncTokens", |_number: Option| { + Ok(vec![TokenInfo { + l1_address: Address::repeat_byte(1), + l2_address: Address::repeat_byte(2), + metadata: TokenMetadata { + name: "test".to_string(), + symbol: "TEST".to_string(), + decimals: 18, + }, + }]) + }) + .build(); + let recovery = ExternalNodeSnapshotRecovery { + client: Box::new(client), + pool, + max_concurrency: NonZeroUsize::new(4).unwrap(), + recovery_config: SnapshotRecoveryConfig { + snapshot_l1_batch_override: None, + drop_storage_key_preimages: false, + object_store_config: None, + }, + app_health, + }; + + // Emulate recovery by indefinitely holding onto `max_concurrency` connections. In practice, + // the snapshot applier will release connections eventually, but it may require more time than the connection + // acquisition timeout configured for the DB pool. + for _ in 0..recovery.max_concurrency.get() { + let connection = recovery.pool.connection().await.unwrap(); + tokio::spawn(async move { + future::pending::<()>().await; + drop(connection); + }); + } + + // The only token reported by the mock client isn't recovered + assert!(!recovery.is_initialized().await.unwrap()); + } +} From 2d71c7408a0eed3662fc51f70fa9f525d66e4c6f Mon Sep 17 00:00:00 2001 From: Grzegorz Prusak Date: Tue, 10 Sep 2024 16:26:30 +0200 Subject: [PATCH 049/116] fix: handling of HTTP 403 thrown by proxyd (#2835) When a method is missing: proxyd returns HTTP 403: methodnotfound while api server returns HTTP 200: methodnotfound we need to handle both. --- core/node/consensus/src/en.rs | 38 +++++++++++++++++------------------ 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/core/node/consensus/src/en.rs b/core/node/consensus/src/en.rs index cf7e4173e8dc..a52393c0f488 100644 --- a/core/node/consensus/src/en.rs +++ b/core/node/consensus/src/en.rs @@ -57,7 +57,7 @@ impl EN { let global_config = self .fetch_global_config(ctx) .await - .wrap("fetch_genesis()")?; + .wrap("fetch_global_config()")?; let mut conn = self.pool.connection(ctx).await.wrap("connection()")?; conn.try_update_global_config(ctx, &global_config) @@ -90,7 +90,7 @@ impl EN { if let Ok(new) = self.fetch_global_config(ctx).await { if new != old { return Err(anyhow::format_err!( - "genesis changed: old {old:?}, new {new:?}" + "global config changed: old {old:?}, new {new:?}" ) .into()); } @@ -282,29 +282,29 @@ impl EN { match ctx.wait(self.client.consensus_global_config()).await? { Ok(cfg) => { let cfg = cfg.context("main node is not running consensus component")?; - Ok(zksync_protobuf::serde::deserialize(&cfg.0).context("deserialize()")?) - } - Err(ClientError::Call(err)) if err.code() == ErrorCode::MethodNotFound.code() => { - tracing::info!( - "consensus_global_config() not found, calling consensus_genesis() instead" - ); - let genesis = ctx - .wait(self.client.consensus_genesis()) - .await? - .context("consensus_genesis()")? - .context("main node is not running consensus component")?; - Ok(consensus_dal::GlobalConfig { - genesis: zksync_protobuf::serde::deserialize(&genesis.0) - .context("deserialize()")?, - registry_address: None, - }) + return Ok(zksync_protobuf::serde::deserialize(&cfg.0).context("deserialize()")?); } + // For non-whitelisted methods, proxyd returns HTTP 403 with MethodNotFound in the body. + // For some stupid reason ClientError doesn't expose HTTP error codes. + Err(ClientError::Transport(_)) => {} + // For missing methods api server, returns HTTP 200 with MethodNotFound in the body. + Err(ClientError::Call(err)) if err.code() == ErrorCode::MethodNotFound.code() => {} Err(err) => { return Err(err) .context("consensus_global_config()") - .map_err(|err| err.into()) + .map_err(|err| err.into()); } } + tracing::info!("consensus_global_config() not found, calling consensus_genesis() instead"); + let genesis = ctx + .wait(self.client.consensus_genesis()) + .await? + .context("consensus_genesis()")? + .context("main node is not running consensus component")?; + Ok(consensus_dal::GlobalConfig { + genesis: zksync_protobuf::serde::deserialize(&genesis.0).context("deserialize()")?, + registry_address: None, + }) } #[tracing::instrument(skip_all)] From 101a6853999f480d52a447217677be2d7473f5f6 Mon Sep 17 00:00:00 2001 From: Grzegorz Prusak Date: Tue, 10 Sep 2024 17:29:27 +0200 Subject: [PATCH 050/116] chore: log the whole error message when the task fails (#2834) "{err}" is not useful, because it truncates the stack trace of anyhow errors. --- core/node/node_framework/src/service/mod.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/core/node/node_framework/src/service/mod.rs b/core/node/node_framework/src/service/mod.rs index 9e3555f22c21..b6d420093541 100644 --- a/core/node/node_framework/src/service/mod.rs +++ b/core/node/node_framework/src/service/mod.rs @@ -200,7 +200,7 @@ impl ZkStackService { // Report all the errors we've met during the init. if !errors.is_empty() { for (layer, error) in &errors { - tracing::error!("Wiring layer {layer} can't be initialized: {error}"); + tracing::error!("Wiring layer {layer} can't be initialized: {error:?}"); } return Err(ZkStackServiceError::Wiring(errors)); } @@ -302,7 +302,7 @@ impl ZkStackService { tracing::info!("Shutdown hook {name} completed"); } Ok(Err(err)) => { - tracing::error!("Shutdown hook {name} failed: {err}"); + tracing::error!("Shutdown hook {name} failed: {err:?}"); self.errors.push(TaskError::ShutdownHookFailed(name, err)); } Err(_) => { @@ -324,7 +324,7 @@ impl ZkStackService { tracing::info!("Task {task_name} finished"); } Ok(Err(err)) => { - tracing::error!("Task {task_name} failed: {err}"); + tracing::error!("Task {task_name} failed: {err:?}"); self.errors.push(TaskError::TaskFailed(task_name, err)); } Err(panic_err) => { From 57f56fb87a5899ddb2e82f4cfe4d182bdba496cf Mon Sep 17 00:00:00 2001 From: Joonatan Saarhelo Date: Tue, 10 Sep 2024 17:01:41 +0100 Subject: [PATCH 051/116] feat: allow specifying what tests to run with zks (#2841) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit `zks t i` now accepts a `-t` flag that can can be used to specify a pattern. Only matching tests are run. --- .../src/commands/test/args/integration.rs | 4 +++- .../zk_supervisor/src/commands/test/integration.rs | 10 +++++++--- zk_toolbox/crates/zk_supervisor/src/messages.rs | 2 ++ 3 files changed, 12 insertions(+), 4 deletions(-) diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/integration.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/args/integration.rs index 292c7d7d7154..435dddfc360c 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/integration.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/args/integration.rs @@ -1,7 +1,7 @@ use clap::Parser; use serde::{Deserialize, Serialize}; -use crate::messages::{MSG_NO_DEPS_HELP, MSG_TESTS_EXTERNAL_NODE_HELP}; +use crate::messages::{MSG_NO_DEPS_HELP, MSG_TESTS_EXTERNAL_NODE_HELP, MSG_TEST_PATTERN_HELP}; #[derive(Debug, Serialize, Deserialize, Parser)] pub struct IntegrationArgs { @@ -9,4 +9,6 @@ pub struct IntegrationArgs { pub external_node: bool, #[clap(short, long, help = MSG_NO_DEPS_HELP)] pub no_deps: bool, + #[clap(short, long, help = MSG_TEST_PATTERN_HELP)] + pub test_pattern: Option, } diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/integration.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/integration.rs index e1ec932ca7f9..fb3e1436acc3 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/integration.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/integration.rs @@ -39,9 +39,13 @@ pub async fn run(shell: &Shell, args: IntegrationArgs) -> anyhow::Result<()> { .init_test_wallet(&ecosystem_config, &chain_config) .await?; - let mut command = cmd!(shell, "yarn jest --forceExit --testTimeout 120000") - .env("CHAIN_NAME", ecosystem_config.current_chain()) - .env("MASTER_WALLET_PK", wallets.get_test_pk(&chain_config)?); + let test_pattern = args.test_pattern; + let mut command = cmd!( + shell, + "yarn jest --forceExit --testTimeout 120000 -t {test_pattern...}" + ) + .env("CHAIN_NAME", ecosystem_config.current_chain()) + .env("MASTER_WALLET_PK", wallets.get_test_pk(&chain_config)?); if args.external_node { command = command.env("EXTERNAL_NODE", format!("{:?}", args.external_node)) diff --git a/zk_toolbox/crates/zk_supervisor/src/messages.rs b/zk_toolbox/crates/zk_supervisor/src/messages.rs index ff9cc104a505..d64e87cd0eb4 100644 --- a/zk_toolbox/crates/zk_supervisor/src/messages.rs +++ b/zk_toolbox/crates/zk_supervisor/src/messages.rs @@ -92,6 +92,8 @@ pub(super) const MSG_TEST_RUST_OPTIONS_HELP: &str = "Cargo test flags"; pub(super) const MSG_BUILD_ABOUT: &str = "Build all test dependencies"; pub(super) const MSG_TESTS_EXTERNAL_NODE_HELP: &str = "Run tests for external node"; pub(super) const MSG_NO_DEPS_HELP: &str = "Do not install or build dependencies"; +pub(super) const MSG_TEST_PATTERN_HELP: &str = + "Run just the tests matching a pattern. Same as the -t flag on jest."; pub(super) const MSG_NO_KILL_HELP: &str = "The test will not kill all the nodes during execution"; pub(super) const MSG_TESTS_RECOVERY_SNAPSHOT_HELP: &str = "Run recovery from a snapshot instead of genesis"; From 946877f98d0448938a9c6030b0986346e5d93218 Mon Sep 17 00:00:00 2001 From: perekopskiy <53865202+perekopskiy@users.noreply.github.com> Date: Tue, 10 Sep 2024 19:41:51 +0300 Subject: [PATCH 052/116] chore: clean up dependencies (#2839) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Resolves https://github.com/matter-labs/zksync-era/issues/2783 ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- Cargo.lock | 19 --- core/bin/external_node/Cargo.toml | 1 - core/lib/circuit_breaker/Cargo.toml | 3 - core/lib/contract_verifier/Cargo.toml | 1 - core/lib/da_client/Cargo.toml | 4 - core/lib/db_connection/Cargo.toml | 1 - core/lib/tee_verifier/Cargo.toml | 3 - core/lib/utils/Cargo.toml | 1 - core/lib/zksync_core_leftovers/Cargo.toml | 2 - core/node/consistency_checker/Cargo.toml | 1 - core/node/db_pruner/Cargo.toml | 1 - core/node/fee_model/Cargo.toml | 1 - core/node/node_framework/Cargo.toml | 2 - core/node/proof_data_handler/Cargo.toml | 1 - prover/Cargo.lock | 165 ---------------------- 15 files changed, 206 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ff1e44348b68..b07724e23fc7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8143,7 +8143,6 @@ name = "zksync_circuit_breaker" version = "0.1.0" dependencies = [ "anyhow", - "assert_matches", "async-trait", "thiserror", "tokio", @@ -8398,7 +8397,6 @@ dependencies = [ "zksync_contracts", "zksync_dal", "zksync_eth_client", - "zksync_eth_sender", "zksync_health_check", "zksync_l1_contract_interface", "zksync_node_genesis", @@ -8465,7 +8463,6 @@ dependencies = [ "zksync_config", "zksync_contracts", "zksync_dal", - "zksync_env_config", "zksync_queued_job_processor", "zksync_types", "zksync_utils", @@ -8493,9 +8490,7 @@ dependencies = [ "serde_yaml", "tokio", "zksync_config", - "zksync_dal", "zksync_env_config", - "zksync_node_genesis", "zksync_protobuf", "zksync_protobuf_config", ] @@ -8524,9 +8519,6 @@ dependencies = [ "anyhow", "async-trait", "serde", - "tracing", - "zksync_config", - "zksync_types", ] [[package]] @@ -8597,7 +8589,6 @@ dependencies = [ "tracing", "vise", "zksync_basic_types", - "zksync_health_check", ] [[package]] @@ -8721,7 +8712,6 @@ dependencies = [ "envy", "futures 0.3.28", "rustc_version", - "semver", "serde", "serde_json", "tempfile", @@ -9103,7 +9093,6 @@ dependencies = [ "chrono", "serde", "serde_json", - "test-casing", "test-log", "tokio", "tracing", @@ -9130,7 +9119,6 @@ dependencies = [ "zksync_config", "zksync_dal", "zksync_eth_client", - "zksync_node_test_utils", "zksync_types", "zksync_utils", "zksync_web3_decl", @@ -9164,7 +9152,6 @@ dependencies = [ "zksync_da_dispatcher", "zksync_dal", "zksync_db_connection", - "zksync_env_config", "zksync_eth_client", "zksync_eth_sender", "zksync_eth_watch", @@ -9183,7 +9170,6 @@ dependencies = [ "zksync_node_sync", "zksync_object_store", "zksync_proof_data_handler", - "zksync_protobuf_config", "zksync_queued_job_processor", "zksync_reorg_detector", "zksync_state", @@ -9343,7 +9329,6 @@ dependencies = [ "zksync_multivm", "zksync_object_store", "zksync_prover_interface", - "zksync_tee_verifier", "zksync_types", ] @@ -9657,13 +9642,10 @@ dependencies = [ "zksync_config", "zksync_contracts", "zksync_crypto_primitives", - "zksync_dal", - "zksync_db_connection", "zksync_merkle_tree", "zksync_multivm", "zksync_object_store", "zksync_prover_interface", - "zksync_queued_job_processor", "zksync_types", "zksync_utils", ] @@ -9746,7 +9728,6 @@ dependencies = [ "bincode", "futures 0.3.28", "hex", - "itertools 0.10.5", "num", "once_cell", "rand 0.8.5", diff --git a/core/bin/external_node/Cargo.toml b/core/bin/external_node/Cargo.toml index 498b11b279b0..a1d3951ff3d8 100644 --- a/core/bin/external_node/Cargo.toml +++ b/core/bin/external_node/Cargo.toml @@ -59,7 +59,6 @@ envy.workspace = true url.workspace = true clap = { workspace = true, features = ["derive"] } serde_json.workspace = true -semver.workspace = true tracing.workspace = true [dev-dependencies] diff --git a/core/lib/circuit_breaker/Cargo.toml b/core/lib/circuit_breaker/Cargo.toml index 9bc00b475d4a..926002e561c0 100644 --- a/core/lib/circuit_breaker/Cargo.toml +++ b/core/lib/circuit_breaker/Cargo.toml @@ -19,6 +19,3 @@ tokio = { workspace = true, features = ["time"] } anyhow.workspace = true async-trait.workspace = true tracing.workspace = true - -[dev-dependencies] -assert_matches.workspace = true diff --git a/core/lib/contract_verifier/Cargo.toml b/core/lib/contract_verifier/Cargo.toml index 2803e3bb4185..580982c9a700 100644 --- a/core/lib/contract_verifier/Cargo.toml +++ b/core/lib/contract_verifier/Cargo.toml @@ -13,7 +13,6 @@ categories.workspace = true [dependencies] zksync_types.workspace = true zksync_dal.workspace = true -zksync_env_config.workspace = true zksync_config.workspace = true zksync_contracts.workspace = true zksync_queued_job_processor.workspace = true diff --git a/core/lib/da_client/Cargo.toml b/core/lib/da_client/Cargo.toml index 589a077d4bf9..a68d715eb574 100644 --- a/core/lib/da_client/Cargo.toml +++ b/core/lib/da_client/Cargo.toml @@ -12,9 +12,5 @@ categories.workspace = true [dependencies] serde = { workspace = true, features = ["derive"] } -tracing.workspace = true async-trait.workspace = true anyhow.workspace = true - -zksync_config.workspace = true -zksync_types.workspace = true diff --git a/core/lib/db_connection/Cargo.toml b/core/lib/db_connection/Cargo.toml index fa5bb0b20af2..fb535d582325 100644 --- a/core/lib/db_connection/Cargo.toml +++ b/core/lib/db_connection/Cargo.toml @@ -12,7 +12,6 @@ categories.workspace = true [dependencies] zksync_basic_types.workspace = true -zksync_health_check.workspace = true serde = { workspace = true, features = ["derive"] } serde_json.workspace = true diff --git a/core/lib/tee_verifier/Cargo.toml b/core/lib/tee_verifier/Cargo.toml index a56f383bdbad..6828eeef8b10 100644 --- a/core/lib/tee_verifier/Cargo.toml +++ b/core/lib/tee_verifier/Cargo.toml @@ -14,12 +14,9 @@ categories.workspace = true zksync_multivm.workspace = true zksync_config.workspace = true zksync_crypto_primitives.workspace = true -zksync_dal.workspace = true -zksync_db_connection.workspace = true zksync_merkle_tree.workspace = true zksync_object_store.workspace = true zksync_prover_interface.workspace = true -zksync_queued_job_processor.workspace = true zksync_types.workspace = true zksync_utils.workspace = true diff --git a/core/lib/utils/Cargo.toml b/core/lib/utils/Cargo.toml index 5ec27380df5b..593952f16aca 100644 --- a/core/lib/utils/Cargo.toml +++ b/core/lib/utils/Cargo.toml @@ -25,7 +25,6 @@ thiserror.workspace = true futures.workspace = true hex.workspace = true reqwest = { workspace = true, features = ["blocking"] } -itertools.workspace = true serde_json.workspace = true once_cell.workspace = true diff --git a/core/lib/zksync_core_leftovers/Cargo.toml b/core/lib/zksync_core_leftovers/Cargo.toml index 4eab88234749..6aa6e6a8b43a 100644 --- a/core/lib/zksync_core_leftovers/Cargo.toml +++ b/core/lib/zksync_core_leftovers/Cargo.toml @@ -11,12 +11,10 @@ keywords.workspace = true categories.workspace = true [dependencies] -zksync_dal.workspace = true zksync_config.workspace = true zksync_protobuf.workspace = true zksync_protobuf_config.workspace = true zksync_env_config.workspace = true -zksync_node_genesis.workspace = true anyhow.workspace = true tokio = { workspace = true, features = ["time"] } diff --git a/core/node/consistency_checker/Cargo.toml b/core/node/consistency_checker/Cargo.toml index 769690b493a4..ed2cbd5bbd79 100644 --- a/core/node/consistency_checker/Cargo.toml +++ b/core/node/consistency_checker/Cargo.toml @@ -14,7 +14,6 @@ categories.workspace = true zksync_contracts.workspace = true zksync_dal.workspace = true zksync_eth_client.workspace = true -zksync_eth_sender.workspace = true zksync_health_check.workspace = true zksync_l1_contract_interface.workspace = true zksync_shared_metrics.workspace = true diff --git a/core/node/db_pruner/Cargo.toml b/core/node/db_pruner/Cargo.toml index eb21e3e476db..98eba1b6c0ef 100644 --- a/core/node/db_pruner/Cargo.toml +++ b/core/node/db_pruner/Cargo.toml @@ -26,7 +26,6 @@ serde_json.workspace = true [dev-dependencies] assert_matches.workspace = true -test-casing.workspace = true test-log.workspace = true zksync_node_genesis.workspace = true diff --git a/core/node/fee_model/Cargo.toml b/core/node/fee_model/Cargo.toml index 09048515e7a0..8760b97d9db3 100644 --- a/core/node/fee_model/Cargo.toml +++ b/core/node/fee_model/Cargo.toml @@ -27,4 +27,3 @@ tracing.workspace = true [dev-dependencies] test-casing.workspace = true -zksync_node_test_utils.workspace = true diff --git a/core/node/node_framework/Cargo.toml b/core/node/node_framework/Cargo.toml index d5b19a1d4b01..2288c0ddbe8f 100644 --- a/core/node/node_framework/Cargo.toml +++ b/core/node/node_framework/Cargo.toml @@ -18,7 +18,6 @@ zksync_health_check.workspace = true zksync_dal.workspace = true zksync_db_connection.workspace = true zksync_config.workspace = true -zksync_protobuf_config.workspace = true zksync_state.workspace = true zksync_object_store.workspace = true zksync_storage.workspace = true @@ -67,7 +66,6 @@ ctrlc.workspace = true semver.workspace = true [dev-dependencies] -zksync_env_config.workspace = true assert_matches.workspace = true # For running UI tests for proc macro trybuild.workspace = true diff --git a/core/node/proof_data_handler/Cargo.toml b/core/node/proof_data_handler/Cargo.toml index 31a0e8437ba5..82063b23fdb5 100644 --- a/core/node/proof_data_handler/Cargo.toml +++ b/core/node/proof_data_handler/Cargo.toml @@ -16,7 +16,6 @@ zksync_config.workspace = true zksync_dal.workspace = true zksync_object_store.workspace = true zksync_prover_interface.workspace = true -zksync_tee_verifier.workspace = true zksync_types.workspace = true anyhow.workspace = true axum.workspace = true diff --git a/prover/Cargo.lock b/prover/Cargo.lock index c3cfada3a1a9..e77bb4f488bb 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -500,27 +500,6 @@ dependencies = [ "which", ] -[[package]] -name = "bindgen" -version = "0.65.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfdf7b466f9a4903edc73f95d6d2bcd5baf8ae620638762244d3f60143643cc5" -dependencies = [ - "bitflags 1.3.2", - "cexpr", - "clang-sys", - "lazy_static", - "lazycell", - "peeking_take_while", - "prettyplease", - "proc-macro2 1.0.85", - "quote 1.0.36", - "regex", - "rustc-hash", - "shlex", - "syn 2.0.66", -] - [[package]] name = "bindgen" version = "0.69.4" @@ -826,17 +805,6 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" -[[package]] -name = "bzip2-sys" -version = "0.1.11+1.0.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "736a955f3fa7875102d57c82b8cac37ec45224a07fd32d58f9f7a186b6cd4cdc" -dependencies = [ - "cc", - "libc", - "pkg-config", -] - [[package]] name = "cc" version = "1.1.14" @@ -3238,12 +3206,6 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" -[[package]] -name = "leb128" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67" - [[package]] name = "libc" version = "0.2.155" @@ -3266,22 +3228,6 @@ version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" -[[package]] -name = "librocksdb-sys" -version = "0.11.0+8.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3386f101bcb4bd252d8e9d2fb41ec3b0862a15a62b478c355b2982efa469e3e" -dependencies = [ - "bindgen 0.65.1", - "bzip2-sys", - "cc", - "glob", - "libc", - "libz-sys", - "lz4-sys", - "zstd-sys", -] - [[package]] name = "libsqlite3-sys" version = "0.30.1" @@ -3293,17 +3239,6 @@ dependencies = [ "vcpkg", ] -[[package]] -name = "libz-sys" -version = "1.1.18" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c15da26e5af7e25c90b37a2d75cdbf940cf4a55316de9d84c679c9b8bfabf82e" -dependencies = [ - "cc", - "pkg-config", - "vcpkg", -] - [[package]] name = "linux-raw-sys" version = "0.4.14" @@ -3370,16 +3305,6 @@ dependencies = [ "logos-codegen", ] -[[package]] -name = "lz4-sys" -version = "1.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57d27b317e207b10f69f5e75494119e391a96f48861ae870d1da6edac98ca900" -dependencies = [ - "cc", - "libc", -] - [[package]] name = "match_cfg" version = "0.1.0" @@ -4957,16 +4882,6 @@ dependencies = [ "rustc-hex", ] -[[package]] -name = "rocksdb" -version = "0.21.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb6f170a4041d50a0ce04b0d2e14916d6ca863ea2e422689a5b694395d299ffe" -dependencies = [ - "libc", - "librocksdb-sys", -] - [[package]] name = "rsa" version = "0.9.6" @@ -7754,9 +7669,7 @@ dependencies = [ "serde_yaml", "tokio", "zksync_config", - "zksync_dal", "zksync_env_config", - "zksync_node_genesis", "zksync_protobuf", "zksync_protobuf_config", ] @@ -7824,7 +7737,6 @@ dependencies = [ "tracing", "vise", "zksync_basic_types", - "zksync_health_check", ] [[package]] @@ -7865,20 +7777,6 @@ dependencies = [ "zksync_types", ] -[[package]] -name = "zksync_health_check" -version = "0.1.0" -dependencies = [ - "async-trait", - "futures 0.3.30", - "serde", - "serde_json", - "thiserror", - "tokio", - "tracing", - "vise", -] - [[package]] name = "zksync_kzg" version = "0.150.4" @@ -7896,25 +7794,6 @@ dependencies = [ "zkevm_circuits 0.150.4", ] -[[package]] -name = "zksync_merkle_tree" -version = "0.1.0" -dependencies = [ - "anyhow", - "leb128", - "once_cell", - "rayon", - "thiserror", - "thread_local", - "tracing", - "vise", - "zksync_crypto_primitives", - "zksync_prover_interface", - "zksync_storage", - "zksync_types", - "zksync_utils", -] - [[package]] name = "zksync_mini_merkle_tree" version = "0.1.0" @@ -7954,27 +7833,6 @@ dependencies = [ "zksync_vm_interface", ] -[[package]] -name = "zksync_node_genesis" -version = "0.1.0" -dependencies = [ - "anyhow", - "itertools 0.10.5", - "thiserror", - "tokio", - "tracing", - "vise", - "zksync_config", - "zksync_contracts", - "zksync_dal", - "zksync_eth_client", - "zksync_merkle_tree", - "zksync_multivm", - "zksync_system_constants", - "zksync_types", - "zksync_utils", -] - [[package]] name = "zksync_object_store" version = "0.1.0" @@ -8252,18 +8110,6 @@ dependencies = [ "zksync_utils", ] -[[package]] -name = "zksync_storage" -version = "0.1.0" -dependencies = [ - "num_cpus", - "once_cell", - "rocksdb", - "thread_local", - "tracing", - "vise", -] - [[package]] name = "zksync_system_constants" version = "0.1.0" @@ -8314,7 +8160,6 @@ dependencies = [ "bigdecimal", "futures 0.3.30", "hex", - "itertools 0.10.5", "num", "once_cell", "reqwest 0.12.5", @@ -8471,13 +8316,3 @@ dependencies = [ "zksync_utils", "zksync_vlog", ] - -[[package]] -name = "zstd-sys" -version = "2.0.10+zstd.1.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c253a4914af5bafc8fa8c86ee400827e83cf6ec01195ec1f1ed8441bf00d65aa" -dependencies = [ - "cc", - "pkg-config", -] From d2560928cc67b40a97a5497ac8542915bf6f91a9 Mon Sep 17 00:00:00 2001 From: Igor Aleksanov Date: Wed, 11 Sep 2024 09:12:18 +0400 Subject: [PATCH 053/116] feat(utils): Rework locate_workspace, introduce Workspace type (#2830) ## What - Removes `locate_workspace` and `workspace_dir_or_current_dir` methods. - Instead, introduces `Workspace` type that is aware of different Cargo workspaces in the codebase. ## Why The approach with a single `locate_workspace` doesn't work well for our codebase, since we have multiple workspaces. It resulted in some very implicit and convoluted code (see the removed `get_base_dir` in prover workspace). New approach handles all 3 workspaces _plus_ the lack of a workspace. --- Cargo.lock | 1 + core/bin/contract-verifier/src/main.rs | 4 +- .../system-constants-generator/src/main.rs | 4 +- core/lib/contract_verifier/src/lib.rs | 8 +- core/lib/contracts/src/lib.rs | 6 +- core/lib/utils/Cargo.toml | 1 + core/lib/utils/src/env.rs | 188 +++++++++++++++--- core/lib/utils/src/lib.rs | 4 +- core/tests/loadnext/src/config.rs | 4 +- core/tests/loadnext/src/fs_utils.rs | 4 +- prover/Cargo.lock | 1 + .../crates/bin/prover_cli/src/config/mod.rs | 4 +- prover/crates/bin/prover_cli/src/helper.rs | 16 +- .../Cargo.toml | 1 + .../src/vk_commitment_helper.rs | 6 +- prover/crates/lib/keystore/src/keystore.rs | 55 ++--- prover/crates/lib/keystore/src/utils.rs | 17 +- 17 files changed, 223 insertions(+), 101 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b07724e23fc7..b98d343564b8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9724,6 +9724,7 @@ name = "zksync_utils" version = "0.1.0" dependencies = [ "anyhow", + "assert_matches", "bigdecimal", "bincode", "futures 0.3.28", diff --git a/core/bin/contract-verifier/src/main.rs b/core/bin/contract-verifier/src/main.rs index 36640049e446..a8162de13e9d 100644 --- a/core/bin/contract-verifier/src/main.rs +++ b/core/bin/contract-verifier/src/main.rs @@ -9,14 +9,14 @@ use zksync_contract_verifier_lib::ContractVerifier; use zksync_core_leftovers::temp_config_store::{load_database_secrets, load_general_config}; use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_queued_job_processor::JobProcessor; -use zksync_utils::{wait_for_tasks::ManagedTasks, workspace_dir_or_current_dir}; +use zksync_utils::{env::Workspace, wait_for_tasks::ManagedTasks}; use zksync_vlog::prometheus::PrometheusExporterConfig; async fn update_compiler_versions(connection_pool: &ConnectionPool) { let mut storage = connection_pool.connection().await.unwrap(); let mut transaction = storage.start_transaction().await.unwrap(); - let zksync_home = workspace_dir_or_current_dir(); + let zksync_home = Workspace::locate().core(); let zksolc_path = zksync_home.join("etc/zksolc-bin/"); let zksolc_versions: Vec = std::fs::read_dir(zksolc_path) diff --git a/core/bin/system-constants-generator/src/main.rs b/core/bin/system-constants-generator/src/main.rs index 7ada47302248..cc2e031106b8 100644 --- a/core/bin/system-constants-generator/src/main.rs +++ b/core/bin/system-constants-generator/src/main.rs @@ -17,7 +17,7 @@ use zksync_types::{ IntrinsicSystemGasConstants, ProtocolVersionId, GUARANTEED_PUBDATA_IN_TX, L1_GAS_PER_PUBDATA_BYTE, MAX_NEW_FACTORY_DEPS, REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, }; -use zksync_utils::workspace_dir_or_current_dir; +use zksync_utils::env::Workspace; // For configs we will use the default value of `800_000` to represent the rough amount of L1 gas // needed to cover the batch expenses. @@ -210,7 +210,7 @@ fn generate_rust_fee_constants(intrinsic_gas_constants: &IntrinsicSystemGasConst } fn save_file(path_in_repo: &str, content: String) { - let zksync_home = workspace_dir_or_current_dir(); + let zksync_home = Workspace::locate().core(); let fee_constants_path = zksync_home.join(path_in_repo); fs::write(fee_constants_path, content) diff --git a/core/lib/contract_verifier/src/lib.rs b/core/lib/contract_verifier/src/lib.rs index 82751d4c9754..c8d9b89d834c 100644 --- a/core/lib/contract_verifier/src/lib.rs +++ b/core/lib/contract_verifier/src/lib.rs @@ -1,6 +1,6 @@ use std::{ collections::HashMap, - path::Path, + path::{Path, PathBuf}, time::{Duration, Instant}, }; @@ -20,7 +20,7 @@ use zksync_types::{ }, Address, }; -use zksync_utils::workspace_dir_or_current_dir; +use zksync_utils::env::Workspace; use crate::{ error::ContractVerifierError, @@ -38,8 +38,8 @@ lazy_static! { static ref DEPLOYER_CONTRACT: Contract = zksync_contracts::deployer_contract(); } -fn home_path() -> &'static Path { - workspace_dir_or_current_dir() +fn home_path() -> PathBuf { + Workspace::locate().core() } #[derive(Debug)] diff --git a/core/lib/contracts/src/lib.rs b/core/lib/contracts/src/lib.rs index f10e557a642d..f57649c9d695 100644 --- a/core/lib/contracts/src/lib.rs +++ b/core/lib/contracts/src/lib.rs @@ -16,7 +16,7 @@ use ethabi::{ }; use once_cell::sync::Lazy; use serde::{Deserialize, Serialize}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, workspace_dir_or_current_dir}; +use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, env::Workspace}; pub mod test_contracts; @@ -64,8 +64,8 @@ const LOADNEXT_CONTRACT_FILE: &str = const LOADNEXT_SIMPLE_CONTRACT_FILE: &str = "etc/contracts-test-data/artifacts-zk/contracts/loadnext/loadnext_contract.sol/Foo.json"; -fn home_path() -> &'static Path { - workspace_dir_or_current_dir() +fn home_path() -> PathBuf { + Workspace::locate().core() } fn read_file_to_json_value(path: impl AsRef + std::fmt::Debug) -> serde_json::Value { diff --git a/core/lib/utils/Cargo.toml b/core/lib/utils/Cargo.toml index 593952f16aca..b87b2ad98964 100644 --- a/core/lib/utils/Cargo.toml +++ b/core/lib/utils/Cargo.toml @@ -32,3 +32,4 @@ once_cell.workspace = true rand.workspace = true tokio = { workspace = true, features = ["macros", "rt"] } bincode.workspace = true +assert_matches.workspace = true diff --git a/core/lib/utils/src/env.rs b/core/lib/utils/src/env.rs index 0eddc6c2cd64..5ae07caf1486 100644 --- a/core/lib/utils/src/env.rs +++ b/core/lib/utils/src/env.rs @@ -8,6 +8,87 @@ use once_cell::sync::OnceCell; static WORKSPACE: OnceCell> = OnceCell::new(); +/// Represents Cargo workspaces available in the repository. +#[derive(Debug, Clone, Copy)] +pub enum Workspace<'a> { + /// Workspace was not found. + /// Assumes that the code is running in a binary. + /// Will use the current directory as a fallback. + None, + /// Root folder. + Core(&'a Path), + /// `prover` folder. + Prover(&'a Path), + /// `toolbox` folder. + Toolbox(&'a Path), +} + +impl Workspace<'static> { + /// Find the location of the current workspace, if this code works in workspace + /// then it will return the correct folder if, it's binary e.g. in docker container + /// you have to use fallback to another directory + /// The code has been inspired by `insta` + /// `https://github.com/mitsuhiko/insta/blob/master/insta/src/env.rs` + pub fn locate() -> Self { + // Since `locate_workspace_inner()` should be deterministic, it makes little sense to call + // `OnceCell::get_or_try_init()` here; the repeated calls are just as unlikely to succeed as the initial call. + // Instead, we store `None` in the `OnceCell` if initialization failed. + let path: Option<&'static Path> = WORKSPACE + .get_or_init(|| { + let result = locate_workspace_inner(); + // If the workspace is not found, we store `None` in the `OnceCell`. + // It doesn't make sense to log it, since in most production cases the workspace + // is not present. + result.ok() + }) + .as_deref(); + path.map_or(Self::None, Self::from) + } +} + +impl<'a> Workspace<'a> { + const PROVER_DIRECTORY_NAME: &'static str = "prover"; + const TOOLBOX_DIRECTORY_NAME: &'static str = "zk_toolbox"; + + /// Returns the path of the core workspace. + /// For `Workspace::None`, considers the current directory to represent core workspace. + pub fn core(self) -> PathBuf { + match self { + Self::None => PathBuf::from("."), + Self::Core(path) => path.into(), + Self::Prover(path) | Self::Toolbox(path) => path.parent().unwrap().into(), + } + } + + /// Returns the path of the `prover` workspace. + pub fn prover(self) -> PathBuf { + match self { + Self::Prover(path) => path.into(), + _ => self.core().join(Self::PROVER_DIRECTORY_NAME), + } + } + + /// Returns the path of the `zk_toolbox`` workspace. + pub fn toolbox(self) -> PathBuf { + match self { + Self::Toolbox(path) => path.into(), + _ => self.core().join(Self::TOOLBOX_DIRECTORY_NAME), + } + } +} + +impl<'a> From<&'a Path> for Workspace<'a> { + fn from(path: &'a Path) -> Self { + if path.ends_with(Self::PROVER_DIRECTORY_NAME) { + Self::Prover(path) + } else if path.ends_with(Self::TOOLBOX_DIRECTORY_NAME) { + Self::Toolbox(path) + } else { + Self::Core(path) + } + } +} + fn locate_workspace_inner() -> anyhow::Result { let output = std::process::Command::new( std::env::var("CARGO") @@ -40,31 +121,86 @@ fn locate_workspace_inner() -> anyhow::Result { .to_path_buf()) } -/// Find the location of the current workspace, if this code works in workspace -/// then it will return the correct folder if, it's binary e.g. in docker container -/// you have to use fallback to another directory -/// The code has been inspired by `insta` -/// `https://github.com/mitsuhiko/insta/blob/master/insta/src/env.rs` -pub fn locate_workspace() -> Option<&'static Path> { - // Since `locate_workspace_inner()` should be deterministic, it makes little sense to call - // `OnceCell::get_or_try_init()` here; the repeated calls are just as unlikely to succeed as the initial call. - // Instead, we store `None` in the `OnceCell` if initialization failed. - WORKSPACE - .get_or_init(|| { - let result = locate_workspace_inner(); - if result.is_err() { - // `get_or_init()` is guaranteed to call the provided closure once per `OnceCell`; - // i.e., we won't spam logs here. - tracing::info!( - "locate_workspace() failed. You are using an already compiled version" - ); - } - result.ok() - }) - .as_deref() -} +#[cfg(test)] +mod tests { + use assert_matches::assert_matches; + + use super::*; + + /// Will reset the pwd on drop. + /// This is needed to make sure that even if the test fails, the env + /// for other tests is left intact. + struct PwdProtector(PathBuf); + + impl PwdProtector { + fn new() -> Self { + let pwd = std::env::current_dir().unwrap(); + Self(pwd) + } + } + + impl Drop for PwdProtector { + fn drop(&mut self) { + std::env::set_current_dir(self.0.clone()).unwrap(); + } + } + + #[test] + fn test_workspace_locate() { + let _pwd_protector = PwdProtector::new(); + + // Core. + + let workspace = Workspace::locate(); + assert_matches!(workspace, Workspace::Core(_)); + let core_path = workspace.core(); + // Check if prover and toolbox directories exist. + assert!(workspace.prover().exists()); + assert_matches!( + Workspace::from(workspace.prover().as_path()), + Workspace::Prover(_) + ); + assert!(workspace.toolbox().exists()); + assert_matches!( + Workspace::from(workspace.toolbox().as_path()), + Workspace::Toolbox(_) + ); + + // Prover. + + // We use `cargo-nextest` for running tests, which runs each test in parallel, + // so we can safely alter the global env, assuming that we will restore it after + // the test. + std::env::set_current_dir(workspace.prover()).unwrap(); + let workspace_path = locate_workspace_inner().unwrap(); + let workspace = Workspace::from(workspace_path.as_path()); + assert_matches!(workspace, Workspace::Prover(_)); + let prover_path = workspace.prover(); + assert_eq!(workspace.core(), core_path); + assert_matches!( + Workspace::from(workspace.core().as_path()), + Workspace::Core(_) + ); + assert!(workspace.toolbox().exists()); + assert_matches!( + Workspace::from(workspace.toolbox().as_path()), + Workspace::Toolbox(_) + ); -/// Returns [`locate_workspace()`] output with the "." fallback. -pub fn workspace_dir_or_current_dir() -> &'static Path { - locate_workspace().unwrap_or_else(|| Path::new(".")) + // Toolbox. + std::env::set_current_dir(workspace.toolbox()).unwrap(); + let workspace_path = locate_workspace_inner().unwrap(); + let workspace = Workspace::from(workspace_path.as_path()); + assert_matches!(workspace, Workspace::Toolbox(_)); + assert_eq!(workspace.core(), core_path); + assert_matches!( + Workspace::from(workspace.core().as_path()), + Workspace::Core(_) + ); + assert_eq!(workspace.prover(), prover_path); + assert_matches!( + Workspace::from(workspace.prover().as_path()), + Workspace::Prover(_) + ); + } } diff --git a/core/lib/utils/src/lib.rs b/core/lib/utils/src/lib.rs index 7f9304e3110c..92a1d7a0c470 100644 --- a/core/lib/utils/src/lib.rs +++ b/core/lib/utils/src/lib.rs @@ -2,7 +2,7 @@ pub mod bytecode; mod convert; -mod env; +pub mod env; pub mod http_with_retries; pub mod misc; pub mod panic_extractor; @@ -10,4 +10,4 @@ mod serde_wrappers; pub mod time; pub mod wait_for_tasks; -pub use self::{convert::*, env::*, misc::*, serde_wrappers::*}; +pub use self::{convert::*, misc::*, serde_wrappers::*}; diff --git a/core/tests/loadnext/src/config.rs b/core/tests/loadnext/src/config.rs index a9648edb00ae..ab578ecfdc6b 100644 --- a/core/tests/loadnext/src/config.rs +++ b/core/tests/loadnext/src/config.rs @@ -4,7 +4,7 @@ use serde::Deserialize; use tokio::sync::Semaphore; use zksync_contracts::test_contracts::LoadnextContractExecutionParams; use zksync_types::{network::Network, Address, L2ChainId, H160}; -use zksync_utils::workspace_dir_or_current_dir; +use zksync_utils::env::Workspace; use crate::fs_utils::read_tokens; @@ -190,7 +190,7 @@ fn default_main_token() -> H160 { } fn default_test_contracts_path() -> PathBuf { - let test_contracts_path = workspace_dir_or_current_dir().join("etc/contracts-test-data"); + let test_contracts_path = Workspace::locate().core().join("etc/contracts-test-data"); tracing::info!("Test contracts path: {}", test_contracts_path.display()); test_contracts_path } diff --git a/core/tests/loadnext/src/fs_utils.rs b/core/tests/loadnext/src/fs_utils.rs index 8af9df8afee7..c4472a00531c 100644 --- a/core/tests/loadnext/src/fs_utils.rs +++ b/core/tests/loadnext/src/fs_utils.rs @@ -5,7 +5,7 @@ use std::{fs::File, io::BufReader, path::Path}; use serde::Deserialize; use zksync_types::{ethabi::Contract, network::Network, Address}; -use zksync_utils::workspace_dir_or_current_dir; +use zksync_utils::env::Workspace; /// A token stored in `etc/tokens/{network}.json` files. #[derive(Debug, Deserialize)] @@ -27,7 +27,7 @@ pub struct TestContract { } pub fn read_tokens(network: Network) -> anyhow::Result> { - let home = workspace_dir_or_current_dir(); + let home = Workspace::locate().core(); let path = home.join(format!("etc/tokens/{network}.json")); let file = File::open(path)?; let reader = BufReader::new(file); diff --git a/prover/Cargo.lock b/prover/Cargo.lock index e77bb4f488bb..21e2ea8b21de 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -8189,6 +8189,7 @@ dependencies = [ "zksync_prover_fri_types", "zksync_prover_keystore", "zksync_types", + "zksync_utils", "zksync_vlog", ] diff --git a/prover/crates/bin/prover_cli/src/config/mod.rs b/prover/crates/bin/prover_cli/src/config/mod.rs index 3d99f2be3b2c..b3df2e7d2c56 100644 --- a/prover/crates/bin/prover_cli/src/config/mod.rs +++ b/prover/crates/bin/prover_cli/src/config/mod.rs @@ -1,12 +1,12 @@ use std::{io::Write, path::PathBuf}; -use crate::helper::core_workspace_dir_or_current_dir; +use zksync_utils::env::Workspace; pub fn get_envfile() -> anyhow::Result { if let Ok(envfile) = std::env::var("PLI__CONFIG") { return Ok(envfile.into()); } - Ok(core_workspace_dir_or_current_dir().join("etc/pliconfig")) + Ok(Workspace::locate().core().join("etc/pliconfig")) } pub fn load_envfile(path: impl AsRef) -> anyhow::Result<()> { diff --git a/prover/crates/bin/prover_cli/src/helper.rs b/prover/crates/bin/prover_cli/src/helper.rs index 352a789baed7..7fe0c990e4e0 100644 --- a/prover/crates/bin/prover_cli/src/helper.rs +++ b/prover/crates/bin/prover_cli/src/helper.rs @@ -1,10 +1,7 @@ -use std::{ - fs::File, - path::{Path, PathBuf}, -}; +use std::{fs::File, path::PathBuf}; use zksync_types::ethabi::Contract; -use zksync_utils::locate_workspace; +use zksync_utils::env::Workspace; const ZKSYNC_HYPERCHAIN_CONTRACT_FILE: &str = "contracts/l1-contracts/artifacts/contracts/state-transition/chain-interfaces/IZkSyncHyperchain.sol/IZkSyncHyperchain.json"; @@ -27,8 +24,7 @@ fn read_file_to_json_value(path: &PathBuf) -> serde_json::Value { } fn load_contract_if_present(path: &str) -> Contract { - let home = core_workspace_dir_or_current_dir(); - let path = Path::new(&home).join(path); + let path = Workspace::locate().core().join(path); path.exists() .then(|| { serde_json::from_value(read_file_to_json_value(&path)["abi"].take()).unwrap_or_else( @@ -39,9 +35,3 @@ fn load_contract_if_present(path: &str) -> Contract { panic!("Failed to load contract from {:?}", path); }) } - -pub fn core_workspace_dir_or_current_dir() -> PathBuf { - locate_workspace() - .map(|a| a.join("..")) - .unwrap_or_else(|| PathBuf::from(".")) -} diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/Cargo.toml b/prover/crates/bin/vk_setup_data_generator_server_fri/Cargo.toml index 7c17e845450c..4830f2277a79 100644 --- a/prover/crates/bin/vk_setup_data_generator_server_fri/Cargo.toml +++ b/prover/crates/bin/vk_setup_data_generator_server_fri/Cargo.toml @@ -20,6 +20,7 @@ zksync_vlog.workspace = true zksync_types.workspace = true zksync_prover_fri_types.workspace = true zksync_prover_keystore.workspace = true +zksync_utils.workspace = true zkevm_test_harness.workspace = true circuit_definitions = { workspace = true, features = ["log_tracing"] } diff --git a/prover/crates/bin/vk_setup_data_generator_server_fri/src/vk_commitment_helper.rs b/prover/crates/bin/vk_setup_data_generator_server_fri/src/vk_commitment_helper.rs index 02cbe6e0c4de..2753799dc722 100644 --- a/prover/crates/bin/vk_setup_data_generator_server_fri/src/vk_commitment_helper.rs +++ b/prover/crates/bin/vk_setup_data_generator_server_fri/src/vk_commitment_helper.rs @@ -2,7 +2,7 @@ use std::{fs, path::PathBuf}; use anyhow::Context as _; use toml_edit::{Document, Item, Value}; -use zksync_prover_keystore::utils::core_workspace_dir_or_current_dir; +use zksync_utils::env::Workspace; pub fn get_toml_formatted_value(string_value: String) -> Item { let mut value = Value::from(string_value); @@ -23,5 +23,7 @@ pub fn read_contract_toml() -> anyhow::Result { } pub fn get_contract_toml_path() -> PathBuf { - core_workspace_dir_or_current_dir().join("etc/env/base/contracts.toml") + Workspace::locate() + .core() + .join("etc/env/base/contracts.toml") } diff --git a/prover/crates/lib/keystore/src/keystore.rs b/prover/crates/lib/keystore/src/keystore.rs index ff14387bfda7..28ce989287cc 100644 --- a/prover/crates/lib/keystore/src/keystore.rs +++ b/prover/crates/lib/keystore/src/keystore.rs @@ -18,10 +18,11 @@ use serde::{Deserialize, Serialize}; use zkevm_test_harness::data_source::{in_memory_data_source::InMemoryDataSource, SetupDataSource}; use zksync_basic_types::basic_fri_types::AggregationRound; use zksync_prover_fri_types::ProverServiceDataKey; +use zksync_utils::env::Workspace; #[cfg(feature = "gpu")] use crate::GoldilocksGpuProverSetupData; -use crate::{utils::core_workspace_dir_or_current_dir, GoldilocksProverSetupData, VkCommitments}; +use crate::{GoldilocksProverSetupData, VkCommitments}; pub enum ProverServiceDataType { VerificationKey, @@ -42,31 +43,6 @@ pub struct Keystore { setup_data_path: PathBuf, } -fn get_base_path() -> PathBuf { - // This will return the path to the _core_ workspace locally, - // otherwise (e.g. in Docker) it will return `.` (which is usually equivalent to `/`). - // - // Note: at the moment of writing this function, it locates the prover workspace, and uses - // `..` to get to the core workspace, so the path returned is something like: - // `/path/to/workspace/zksync-era/prover/..` (or `.` for binaries). - let path = core_workspace_dir_or_current_dir(); - - // Check if we're in the folder equivalent to the core workspace root. - // Path we're actually checking is: - // `/path/to/workspace/zksync-era/prover/../prover/data/keys` - let new_path = path.join("prover/data/keys"); - if new_path.exists() { - return new_path; - } - - let mut components = path.components(); - // This removes the last component of `path`, so: - // for local workspace, we're removing `..` and putting ourselves back to the prover workspace. - // for binaries, we're removing `.` and getting the empty path. - components.next_back().unwrap(); - components.as_path().join("prover/data/keys") -} - impl Keystore { /// Base-dir is the location of smaller keys (like verification keys and finalization hints). /// Setup data path is used for the large setup keys. @@ -79,8 +55,33 @@ impl Keystore { /// Uses automatic detection of the base path, and assumes that setup keys /// are stored in the same directory. + /// + /// The "base" path is considered to be equivalent to the `prover/data/keys` + /// directory in the repository. pub fn locate() -> Self { - let base_path = get_base_path(); + // There might be several cases: + // - We're running from the prover workspace. + // - We're running from the core workspace. + // - We're running the binary from the docker. + let data_dir_path = match Workspace::locate() { + Workspace::None => { + // We're running a binary, likely in a docker. + // Keys can be in one of a few paths. + // We want to be very conservative here, and checking + // more locations than we likely need to not accidentally + // break something. + let paths = ["./prover/data", "./data", "/prover/data", "/data"]; + paths.iter().map(PathBuf::from).find(|path| path.exists()).unwrap_or_else(|| { + panic!("Failed to locate the prover data directory. Locations checked: {paths:?}") + }) + } + ws => { + // If we're running in the Cargo workspace, the data *must* be in `prover/data`. + ws.prover().join("data") + } + }; + let base_path = data_dir_path.join("keys"); + Self { basedir: base_path.clone(), setup_data_path: base_path, diff --git a/prover/crates/lib/keystore/src/utils.rs b/prover/crates/lib/keystore/src/utils.rs index 5cebf7aef77a..d9bb3b47dbb0 100644 --- a/prover/crates/lib/keystore/src/utils.rs +++ b/prover/crates/lib/keystore/src/utils.rs @@ -1,5 +1,3 @@ -use std::path::PathBuf; - use anyhow::Context as _; use circuit_definitions::{ circuit_definitions::aux_layer::ZkSyncSnarkWrapperCircuit, @@ -22,7 +20,6 @@ use zksync_prover_fri_types::circuit_definitions::{ scheduler::aux::BaseLayerCircuitType, }, }; -use zksync_utils::locate_workspace; use crate::keystore::Keystore; @@ -115,24 +112,16 @@ pub fn calculate_snark_vk_hash(keystore: &Keystore) -> anyhow::Result { Ok(H256::from_slice(&computed_vk_hash)) } -/// Returns workspace of the core component, we assume that prover is one folder deeper. -/// Or fallback to current dir -pub fn core_workspace_dir_or_current_dir() -> PathBuf { - locate_workspace() - .map(|a| a.join("..")) - .unwrap_or_else(|| PathBuf::from(".")) -} - #[cfg(test)] mod tests { - use std::{path::PathBuf, str::FromStr}; + use std::str::FromStr; + use zksync_utils::env::Workspace; use super::*; #[test] fn test_keyhash_generation() { - let mut path_to_input = PathBuf::from(std::env::var("CARGO_MANIFEST_DIR").unwrap()); - path_to_input.push("../../../data/historical_data"); + let path_to_input = Workspace::locate().prover().join("data/historical_data"); for entry in std::fs::read_dir(path_to_input.clone()).unwrap().flatten() { if entry.metadata().unwrap().is_dir() { From 3506731d1702bdec8c6b5b41cabca9a257f0269b Mon Sep 17 00:00:00 2001 From: Artem Fomiuk <88630083+Artemka374@users.noreply.github.com> Date: Wed, 11 Sep 2024 11:11:03 +0300 Subject: [PATCH 054/116] feat(zk_toolbox): `zk_supervisor prover` subcommand (#2820) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Add prover subcommand for `zk_supervisor`. Add the following subcommand: * `zk_supervisor prover info` - Prints information about current prover setup. * `zk_supervisor prover insert-version` - Insert new protocol version in prover database(integration with `prover_cli`). * `zk_supervisor prover insert-batch` - Insert new batch in prover database(integration with `prover_cli`). Add automatic creation of `prover/artifacts/witness_inputs` dirs if the storage is file backed on init. ## Why ❔ To improve UX of working with provers. ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- prover/crates/bin/prover_cli/README.md | 6 ++ zk_toolbox/crates/common/src/lib.rs | 4 +- zk_toolbox/crates/common/src/prerequisites.rs | 10 +- zk_toolbox/crates/config/src/ecosystem.rs | 7 ++ .../src/commands/prover/compressor_keys.rs | 8 +- .../zk_inception/src/commands/prover/gcs.rs | 4 +- .../zk_inception/src/commands/prover/init.rs | 45 +++++++-- .../zk_inception/src/commands/prover/mod.rs | 1 - .../zk_inception/src/commands/prover/run.rs | 7 +- .../src/commands/prover/setup_keys.rs | 8 +- .../zk_inception/src/commands/prover/utils.rs | 10 -- zk_toolbox/crates/zk_supervisor/README.md | 34 ++++++- .../src/commands/database/reset.rs | 4 +- .../crates/zk_supervisor/src/commands/mod.rs | 2 +- .../src/commands/prover/args/insert_batch.rs | 40 ++++++++ .../commands/prover/args/insert_version.rs | 49 ++++++++++ .../src/commands/prover/args/mod.rs | 2 + .../zk_supervisor/src/commands/prover/info.rs | 95 +++++++++++++++++++ .../src/commands/prover/insert_batch.rs | 38 ++++++++ .../src/commands/prover/insert_version.rs | 38 ++++++++ .../zk_supervisor/src/commands/prover/mod.rs | 22 +++++ .../src/commands/prover_version.rs | 40 -------- zk_toolbox/crates/zk_supervisor/src/main.rs | 8 +- 23 files changed, 391 insertions(+), 91 deletions(-) delete mode 100644 zk_toolbox/crates/zk_inception/src/commands/prover/utils.rs create mode 100644 zk_toolbox/crates/zk_supervisor/src/commands/prover/args/insert_batch.rs create mode 100644 zk_toolbox/crates/zk_supervisor/src/commands/prover/args/insert_version.rs create mode 100644 zk_toolbox/crates/zk_supervisor/src/commands/prover/args/mod.rs create mode 100644 zk_toolbox/crates/zk_supervisor/src/commands/prover/info.rs create mode 100644 zk_toolbox/crates/zk_supervisor/src/commands/prover/insert_batch.rs create mode 100644 zk_toolbox/crates/zk_supervisor/src/commands/prover/insert_version.rs create mode 100644 zk_toolbox/crates/zk_supervisor/src/commands/prover/mod.rs delete mode 100644 zk_toolbox/crates/zk_supervisor/src/commands/prover_version.rs diff --git a/prover/crates/bin/prover_cli/README.md b/prover/crates/bin/prover_cli/README.md index 2d57e0b56495..e0dd1697bf6d 100644 --- a/prover/crates/bin/prover_cli/README.md +++ b/prover/crates/bin/prover_cli/README.md @@ -9,6 +9,12 @@ git clone git@github.com:matter-labs/zksync-era.git cargo install prover_cli ``` +Or + +``` +cargo +nightly-2024-08-01 install --git https://github.com/matter-labs/zksync-era/ --locked prover_cli --force +``` + ## Usage ``` diff --git a/zk_toolbox/crates/common/src/lib.rs b/zk_toolbox/crates/common/src/lib.rs index 5a6f63e3a51f..7be4af740700 100644 --- a/zk_toolbox/crates/common/src/lib.rs +++ b/zk_toolbox/crates/common/src/lib.rs @@ -15,8 +15,8 @@ pub mod server; pub mod wallets; pub use prerequisites::{ - check_general_prerequisites, check_prerequisites, GCLOUD_PREREQUISITES, GPU_PREREQUISITES, - WGET_PREREQUISITES, + check_general_prerequisites, check_prerequisites, GCLOUD_PREREQUISITE, GPU_PREREQUISITES, + PROVER_CLI_PREREQUISITE, WGET_PREREQUISITE, }; pub use prompt::{init_prompt_theme, Prompt, PromptConfirm, PromptSelect}; pub use term::{error, logger, spinner}; diff --git a/zk_toolbox/crates/common/src/prerequisites.rs b/zk_toolbox/crates/common/src/prerequisites.rs index 87ec396d0e63..665096d8486e 100644 --- a/zk_toolbox/crates/common/src/prerequisites.rs +++ b/zk_toolbox/crates/common/src/prerequisites.rs @@ -45,16 +45,22 @@ pub const GPU_PREREQUISITES: [Prerequisite; 3] = [ }, // CUDA GPU driver ]; -pub const WGET_PREREQUISITES: [Prerequisite; 1] = [Prerequisite { +pub const WGET_PREREQUISITE: [Prerequisite; 1] = [Prerequisite { name: "wget", download_link: "https://www.gnu.org/software/wget/", }]; -pub const GCLOUD_PREREQUISITES: [Prerequisite; 1] = [Prerequisite { +pub const GCLOUD_PREREQUISITE: [Prerequisite; 1] = [Prerequisite { name: "gcloud", download_link: "https://cloud.google.com/sdk/docs/install", }]; +pub const PROVER_CLI_PREREQUISITE: [Prerequisite; 1] = [Prerequisite { + name: "prover_cli", + download_link: + "https://github.com/matter-labs/zksync-era/tree/main/prover/crates/bin/prover_cli", +}]; + pub struct Prerequisite { name: &'static str, download_link: &'static str, diff --git a/zk_toolbox/crates/config/src/ecosystem.rs b/zk_toolbox/crates/config/src/ecosystem.rs index 76d85bb41e92..a0412fbc4733 100644 --- a/zk_toolbox/crates/config/src/ecosystem.rs +++ b/zk_toolbox/crates/config/src/ecosystem.rs @@ -290,3 +290,10 @@ fn find_file(shell: &Shell, path_buf: PathBuf, file_name: &str) -> Result PathBuf { + let link_to_code = config.link_to_code.clone(); + let mut link_to_prover = link_to_code.into_os_string(); + link_to_prover.push("/prover"); + link_to_prover.into() +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/compressor_keys.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/compressor_keys.rs index 1f39c91a2e2e..fd83fccfebfa 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/compressor_keys.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/compressor_keys.rs @@ -1,11 +1,11 @@ use anyhow::Context; use common::{ - check_prerequisites, cmd::Cmd, config::global_config, spinner::Spinner, WGET_PREREQUISITES, + check_prerequisites, cmd::Cmd, config::global_config, spinner::Spinner, WGET_PREREQUISITE, }; -use config::{EcosystemConfig, GeneralConfig}; +use config::{get_link_to_prover, EcosystemConfig, GeneralConfig}; use xshell::{cmd, Shell}; -use super::{args::compressor_keys::CompressorKeysArgs, utils::get_link_to_prover}; +use super::args::compressor_keys::CompressorKeysArgs; use crate::messages::{ MSG_CHAIN_NOT_FOUND_ERR, MSG_DOWNLOADING_SETUP_COMPRESSOR_KEY_SPINNER, MSG_PROOF_COMPRESSOR_CONFIG_NOT_FOUND_ERR, MSG_SETUP_KEY_PATH_ERROR, @@ -37,7 +37,7 @@ pub(crate) fn download_compressor_key( general_config: &mut GeneralConfig, path: &str, ) -> anyhow::Result<()> { - check_prerequisites(shell, &WGET_PREREQUISITES, false); + check_prerequisites(shell, &WGET_PREREQUISITE, false); let spinner = Spinner::new(MSG_DOWNLOADING_SETUP_COMPRESSOR_KEY_SPINNER); let mut compressor_config: zksync_config::configs::FriProofCompressorConfig = general_config .proof_compressor_config diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/gcs.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/gcs.rs index 700209f5ffc8..f28c44504b56 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/gcs.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/gcs.rs @@ -1,4 +1,4 @@ -use common::{check_prerequisites, cmd::Cmd, logger, spinner::Spinner, GCLOUD_PREREQUISITES}; +use common::{check_prerequisites, cmd::Cmd, logger, spinner::Spinner, GCLOUD_PREREQUISITE}; use xshell::{cmd, Shell}; use zksync_config::{configs::object_store::ObjectStoreMode, ObjectStoreConfig}; @@ -14,7 +14,7 @@ pub(crate) fn create_gcs_bucket( shell: &Shell, config: ProofStorageGCSCreateBucket, ) -> anyhow::Result { - check_prerequisites(shell, &GCLOUD_PREREQUISITES, false); + check_prerequisites(shell, &GCLOUD_PREREQUISITE, false); let bucket_name = config.bucket_name; let location = config.location; diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/init.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/init.rs index c8636381f203..1d92357635c5 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/init.rs @@ -2,13 +2,17 @@ use std::path::PathBuf; use anyhow::Context; use common::{ + cmd::Cmd, config::global_config, db::{drop_db_if_exists, init_db, migrate_db, DatabaseConfig}, logger, spinner::Spinner, }; -use config::{copy_configs, set_prover_database, traits::SaveConfigWithBasePath, EcosystemConfig}; -use xshell::Shell; +use config::{ + copy_configs, get_link_to_prover, set_prover_database, traits::SaveConfigWithBasePath, + EcosystemConfig, +}; +use xshell::{cmd, Shell}; use zksync_config::{configs::object_store::ObjectStoreMode, ObjectStoreConfig}; use super::{ @@ -19,6 +23,7 @@ use super::{ setup_keys, }; use crate::{ + commands::prover::args::init::ProofStorageFileBacked, consts::{PROVER_MIGRATIONS, PROVER_STORE_MAX_RETRIES}, messages::{ MSG_CHAIN_NOT_FOUND_ERR, MSG_FAILED_TO_DROP_PROVER_DATABASE_ERR, @@ -105,13 +110,11 @@ fn get_object_store_config( config: Option, ) -> anyhow::Result> { let object_store = match config { - Some(ProofStorageConfig::FileBacked(config)) => Some(ObjectStoreConfig { - mode: ObjectStoreMode::FileBacked { - file_backed_base_path: config.proof_store_dir, - }, - max_retries: PROVER_STORE_MAX_RETRIES, - local_mirror_path: None, - }), + Some(ProofStorageConfig::FileBacked(config)) => Some(init_file_backed_proof_storage( + shell, + &EcosystemConfig::from_file(shell)?, + config, + )?), Some(ProofStorageConfig::GCS(config)) => Some(ObjectStoreConfig { mode: ObjectStoreMode::GCSWithCredentialFile { bucket_base_url: config.bucket_base_url, @@ -154,3 +157,27 @@ async fn initialize_prover_database( Ok(()) } + +fn init_file_backed_proof_storage( + shell: &Shell, + ecosystem_config: &EcosystemConfig, + config: ProofStorageFileBacked, +) -> anyhow::Result { + let proof_store_dir = config.proof_store_dir; + let prover_path = get_link_to_prover(ecosystem_config); + + let proof_store_dir = prover_path.join(proof_store_dir).join("witness_inputs"); + + let cmd = Cmd::new(cmd!(shell, "mkdir -p {proof_store_dir}")); + cmd.run()?; + + let object_store_config = ObjectStoreConfig { + mode: ObjectStoreMode::FileBacked { + file_backed_base_path: proof_store_dir.into_os_string().into_string().unwrap(), + }, + max_retries: PROVER_STORE_MAX_RETRIES, + local_mirror_path: None, + }; + + Ok(object_store_config) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/mod.rs index 2b771c8ad201..d9e443cdae0d 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/mod.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/mod.rs @@ -14,7 +14,6 @@ mod init; mod init_bellman_cuda; mod run; mod setup_keys; -mod utils; #[derive(Subcommand, Debug)] pub enum ProverCommands { diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/run.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/run.rs index 78116e40d6c7..8f72da03f3b3 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/run.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/run.rs @@ -2,13 +2,10 @@ use std::path::PathBuf; use anyhow::{anyhow, Context}; use common::{check_prerequisites, cmd::Cmd, config::global_config, logger, GPU_PREREQUISITES}; -use config::EcosystemConfig; +use config::{get_link_to_prover, EcosystemConfig}; use xshell::{cmd, Shell}; -use super::{ - args::run::{ProverComponent, ProverRunArgs}, - utils::get_link_to_prover, -}; +use super::args::run::{ProverComponent, ProverRunArgs}; use crate::messages::{ MSG_BELLMAN_CUDA_DIR_ERR, MSG_CHAIN_NOT_FOUND_ERR, MSG_MISSING_COMPONENT_ERR, MSG_RUNNING_COMPRESSOR, MSG_RUNNING_COMPRESSOR_ERR, MSG_RUNNING_PROVER, MSG_RUNNING_PROVER_ERR, diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/setup_keys.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/setup_keys.rs index 09d9f76a47cf..ae0480e872dd 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/setup_keys.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/setup_keys.rs @@ -1,12 +1,10 @@ use anyhow::Ok; use common::{ - check_prerequisites, cmd::Cmd, logger, spinner::Spinner, GCLOUD_PREREQUISITES, - GPU_PREREQUISITES, + check_prerequisites, cmd::Cmd, logger, spinner::Spinner, GCLOUD_PREREQUISITE, GPU_PREREQUISITES, }; -use config::EcosystemConfig; +use config::{get_link_to_prover, EcosystemConfig}; use xshell::{cmd, Shell}; -use super::utils::get_link_to_prover; use crate::{ commands::prover::args::setup_keys::{Mode, Region, SetupKeysArgs}, messages::{MSG_GENERATING_SK_SPINNER, MSG_SK_GENERATED}, @@ -33,7 +31,7 @@ pub(crate) async fn run(args: SetupKeysArgs, shell: &Shell) -> anyhow::Result<() spinner.finish(); logger::outro(MSG_SK_GENERATED); } else { - check_prerequisites(shell, &GCLOUD_PREREQUISITES, false); + check_prerequisites(shell, &GCLOUD_PREREQUISITE, false); let link_to_setup_keys = get_link_to_prover(&ecosystem_config).join("data/keys"); let path_to_keys_buckets = diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/utils.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/utils.rs deleted file mode 100644 index 4dae70863dc9..000000000000 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/utils.rs +++ /dev/null @@ -1,10 +0,0 @@ -use std::path::PathBuf; - -use config::EcosystemConfig; - -pub(crate) fn get_link_to_prover(config: &EcosystemConfig) -> PathBuf { - let link_to_code = config.link_to_code.clone(); - let mut link_to_prover = link_to_code.into_os_string(); - link_to_prover.push("/prover"); - link_to_prover.into() -} diff --git a/zk_toolbox/crates/zk_supervisor/README.md b/zk_toolbox/crates/zk_supervisor/README.md index c3fac876ace6..865bd2f0d579 100644 --- a/zk_toolbox/crates/zk_supervisor/README.md +++ b/zk_toolbox/crates/zk_supervisor/README.md @@ -32,7 +32,9 @@ This document contains the help content for the `zk_supervisor` command-line pro - [`zk_supervisor fmt rustfmt`↴](#zk_supervisor-fmt-rustfmt) - [`zk_supervisor fmt contract`↴](#zk_supervisor-fmt-contract) - [`zk_supervisor fmt prettier`↴](#zk_supervisor-fmt-prettier) -- [`zk_supervisor prover-version`↴](#zk_supervisor-prover-version) +- [`zk_supervisor prover info`↴](#zk_supervisor-prover-info) +- [`zk_supervisor prover insert-version`↴](#zk_supervisor-prover-insert-version) +- [`zk_supervisor prover insert-batch`↴](#zk_supervisor-prover-insert-batch) ## `zk_supervisor` @@ -348,11 +350,35 @@ Format code Possible values: `md`, `sol`, `js`, `ts`, `rs` -## `zk_supervisor prover-version` +## `zk_supervisor prover info` -Protocol version used by provers +Prints prover protocol version, snark wrapper and prover database URL -**Usage:** `zk_supervisor prover-version` +**Usage:** `zk_supervisor prover info` + +## `zk_supervisor prover insert-version` + +Inserts protocol version into prover database. + +**Usage:** `zk_supervisor prover insert-version [OPTIONS]` + +###### **Options:** + +- `--version ` — Protocol version in semantic format(`x.y.z`). Major version should be 0. +- `--snark-wrapper ` — Snark wrapper hash. +- `--default` - use default values for protocol version and snark wrapper hash (the ones found in zksync-era). + +## `zk_supervisor prover insert-batch` + +Inserts batch into prover database. + +**Usage:** `zk_supervisor prover insert-batch` + +###### **Options:** + +- `--number ` — Number of the batch to insert. +- `--version ` — Protocol version in semantic format(`x.y.z`). Major version should be 0. +- `--default` - use default value for protocol version (the one found in zksync-era).
diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/reset.rs b/zk_toolbox/crates/zk_supervisor/src/commands/database/reset.rs index 5e32a8e5ae4e..f0262cecb959 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/database/reset.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/database/reset.rs @@ -20,14 +20,14 @@ pub async fn run(shell: &Shell, args: DatabaseCommonArgs) -> anyhow::Result<()> return Ok(()); } - let ecoseystem_config = EcosystemConfig::from_file(shell)?; + let ecosystem_config = EcosystemConfig::from_file(shell)?; logger::info(msg_database_info(MSG_DATABASE_RESET_GERUND)); let dals = get_dals(shell, &args.selected_dals, &args.urls)?; for dal in dals { logger::info(msg_database_loading(MSG_DATABASE_RESET_GERUND, &dal.path)); - reset_database(shell, ecoseystem_config.link_to_code.clone(), dal).await?; + reset_database(shell, ecosystem_config.link_to_code.clone(), dal).await?; } logger::outro(msg_database_success(MSG_DATABASE_RESET_PAST)); diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs b/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs index e45512d50d89..875f2982c959 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs @@ -4,6 +4,6 @@ pub mod database; pub mod fmt; pub mod lint; pub(crate) mod lint_utils; -pub mod prover_version; +pub mod prover; pub mod snapshot; pub mod test; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/prover/args/insert_batch.rs b/zk_toolbox/crates/zk_supervisor/src/commands/prover/args/insert_batch.rs new file mode 100644 index 000000000000..e837bbe9eb86 --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/commands/prover/args/insert_batch.rs @@ -0,0 +1,40 @@ +use clap::Parser; + +#[derive(Debug, Parser)] +pub struct InsertBatchArgs { + #[clap(long)] + pub number: Option, + #[clap(long, default_value = "false")] + pub default: bool, + #[clap(long)] + pub version: Option, +} + +#[derive(Debug)] +pub struct InsertBatchArgsFinal { + pub number: u32, + pub version: String, +} + +impl InsertBatchArgs { + pub(crate) fn fill_values_with_prompts(self, era_version: String) -> InsertBatchArgsFinal { + let number = self.number.unwrap_or_else(|| { + common::Prompt::new("Enter the number of the batch to insert").ask() + }); + + if self.default { + return InsertBatchArgsFinal { + number, + version: era_version, + }; + } + + let version = self.version.unwrap_or_else(|| { + common::Prompt::new("Enter the version of the batch to insert") + .default(&era_version) + .ask() + }); + + InsertBatchArgsFinal { number, version } + } +} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/prover/args/insert_version.rs b/zk_toolbox/crates/zk_supervisor/src/commands/prover/args/insert_version.rs new file mode 100644 index 000000000000..97e60fb38f8c --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/commands/prover/args/insert_version.rs @@ -0,0 +1,49 @@ +use clap::Parser; + +#[derive(Debug, Parser)] +pub struct InsertVersionArgs { + #[clap(long, default_value = "false")] + pub default: bool, + #[clap(long)] + pub version: Option, + #[clap(long)] + pub snark_wrapper: Option, +} + +#[derive(Debug)] +pub struct InsertVersionArgsFinal { + pub snark_wrapper: String, + pub version: String, +} + +impl InsertVersionArgs { + pub(crate) fn fill_values_with_prompts( + self, + era_version: String, + snark_wrapper: String, + ) -> InsertVersionArgsFinal { + if self.default { + return InsertVersionArgsFinal { + snark_wrapper, + version: era_version, + }; + } + + let version = self.version.unwrap_or_else(|| { + common::Prompt::new("Enter the version of the protocol to insert") + .default(&era_version) + .ask() + }); + + let snark_wrapper = self.snark_wrapper.unwrap_or_else(|| { + common::Prompt::new("Enter the snark wrapper of the protocol to insert") + .default(&snark_wrapper) + .ask() + }); + + InsertVersionArgsFinal { + snark_wrapper, + version, + } + } +} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/prover/args/mod.rs b/zk_toolbox/crates/zk_supervisor/src/commands/prover/args/mod.rs new file mode 100644 index 000000000000..0984546136c9 --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/commands/prover/args/mod.rs @@ -0,0 +1,2 @@ +pub mod insert_batch; +pub mod insert_version; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/prover/info.rs b/zk_toolbox/crates/zk_supervisor/src/commands/prover/info.rs new file mode 100644 index 000000000000..05964cf689fd --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/commands/prover/info.rs @@ -0,0 +1,95 @@ +use std::{ + fs, + path::{Path, PathBuf}, +}; + +use anyhow::Context as _; +use common::{config::global_config, logger}; +use config::EcosystemConfig; +use xshell::{cmd, Shell}; + +use crate::messages::MSG_CHAIN_NOT_FOUND_ERR; + +pub async fn run(shell: &Shell) -> anyhow::Result<()> { + let link_to_code = EcosystemConfig::from_file(shell)?.link_to_code; + let link_to_prover = link_to_code.join("prover"); + + let protocol_version = get_protocol_version(shell, &link_to_prover).await?; + let snark_wrapper = get_snark_wrapper(&link_to_prover).await?; + let prover_url = get_database_url(shell).await?; + + logger::info(format!( + " +=============================== \n +Current prover setup information: \n +Protocol version: {} \n +Snark wrapper: {} \n +Database URL: {}\n +===============================", + protocol_version, snark_wrapper, prover_url + )); + + Ok(()) +} + +pub(crate) async fn get_protocol_version( + shell: &Shell, + link_to_prover: &PathBuf, +) -> anyhow::Result { + shell.change_dir(link_to_prover); + let protocol_version = cmd!(shell, "cargo run --release --bin prover_version").read()?; + + Ok(protocol_version) +} + +pub(crate) async fn get_snark_wrapper(link_to_prover: &Path) -> anyhow::Result { + let path = link_to_prover.join("data/keys/commitments.json"); + let file = fs::File::open(path).expect("Could not find commitments file in zksync-era"); + let json: serde_json::Value = + serde_json::from_reader(file).expect("Could not parse commitments.json"); + + let snark_wrapper = json + .get("snark_wrapper") + .expect("Could not find snark_wrapper in commitments.json"); + + let mut snark_wrapper = snark_wrapper.to_string(); + snark_wrapper.pop(); + snark_wrapper.remove(0); + + Ok(snark_wrapper) +} + +pub(crate) async fn get_database_url(shell: &Shell) -> anyhow::Result { + let ecosystem = EcosystemConfig::from_file(shell)?; + let chain_config = ecosystem + .load_chain(global_config().chain_name.clone()) + .context(MSG_CHAIN_NOT_FOUND_ERR)?; + + let prover_url = chain_config + .get_secrets_config()? + .database + .context("Database secrets not found")? + .prover_url()? + .expose_url() + .to_string(); + Ok(prover_url) +} + +pub fn parse_version(version: &str) -> anyhow::Result<(&str, &str)> { + let splitted: Vec<&str> = version.split(".").collect(); + + assert_eq!(splitted.len(), 3, "Invalid version format"); + assert_eq!(splitted[0], "0", "Invalid major version, expected 0"); + + splitted[1] + .parse::() + .context("Could not parse minor version")?; + splitted[2] + .parse::() + .context("Could not parse patch version")?; + + let minor = splitted[1]; + let patch = splitted[2]; + + Ok((minor, patch)) +} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/prover/insert_batch.rs b/zk_toolbox/crates/zk_supervisor/src/commands/prover/insert_batch.rs new file mode 100644 index 000000000000..2c4a1cf97513 --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/commands/prover/insert_batch.rs @@ -0,0 +1,38 @@ +use common::{check_prerequisites, cmd::Cmd, logger, PROVER_CLI_PREREQUISITE}; +use config::{get_link_to_prover, EcosystemConfig}; +use xshell::{cmd, Shell}; + +use crate::commands::prover::{ + args::insert_batch::{InsertBatchArgs, InsertBatchArgsFinal}, + info, +}; + +pub async fn run(shell: &Shell, args: InsertBatchArgs) -> anyhow::Result<()> { + check_prerequisites(shell, &PROVER_CLI_PREREQUISITE, false); + + let ecosystem_config = EcosystemConfig::from_file(shell)?; + + let version = info::get_protocol_version(shell, &get_link_to_prover(&ecosystem_config)).await?; + let prover_url = info::get_database_url(shell).await?; + + let InsertBatchArgsFinal { number, version } = args.fill_values_with_prompts(version); + + let (minor, patch) = info::parse_version(&version)?; + + logger::info(format!( + "Inserting protocol version {}, batch number {} into the database", + version, number + )); + + let number = number.to_string(); + + let cmd = Cmd::new(cmd!( + shell, + "prover_cli {prover_url} insert-batch --version={minor} --patch={patch} --number={number}" + )); + cmd.run()?; + + logger::info("Done."); + + Ok(()) +} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/prover/insert_version.rs b/zk_toolbox/crates/zk_supervisor/src/commands/prover/insert_version.rs new file mode 100644 index 000000000000..ab28efca9446 --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/commands/prover/insert_version.rs @@ -0,0 +1,38 @@ +use common::{check_prerequisites, cmd::Cmd, logger, PROVER_CLI_PREREQUISITE}; +use config::{get_link_to_prover, EcosystemConfig}; +use xshell::{cmd, Shell}; + +use crate::commands::prover::{ + args::insert_version::{InsertVersionArgs, InsertVersionArgsFinal}, + info, +}; + +pub async fn run(shell: &Shell, args: InsertVersionArgs) -> anyhow::Result<()> { + check_prerequisites(shell, &PROVER_CLI_PREREQUISITE, false); + + let ecosystem_config = EcosystemConfig::from_file(shell)?; + + let version = info::get_protocol_version(shell, &get_link_to_prover(&ecosystem_config)).await?; + let snark_wrapper = info::get_snark_wrapper(&get_link_to_prover(&ecosystem_config)).await?; + + let prover_url = info::get_database_url(shell).await?; + + let InsertVersionArgsFinal { + version, + snark_wrapper, + } = args.fill_values_with_prompts(version, snark_wrapper); + + let (minor, patch) = info::parse_version(&version)?; + + logger::info(format!( + "Inserting protocol version {}, snark wrapper {} into the database", + version, snark_wrapper + )); + + let cmd = Cmd::new(cmd!(shell, "prover_cli {prover_url} insert-version --version={minor} --patch={patch} --snark-wrapper={snark_wrapper}")); + cmd.run()?; + + logger::info("Done."); + + Ok(()) +} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/prover/mod.rs b/zk_toolbox/crates/zk_supervisor/src/commands/prover/mod.rs new file mode 100644 index 000000000000..364f8fe93efc --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/commands/prover/mod.rs @@ -0,0 +1,22 @@ +use clap::Subcommand; +use xshell::Shell; + +mod args; +pub mod info; +pub mod insert_batch; +pub mod insert_version; + +#[derive(Subcommand, Debug)] +pub enum ProverCommands { + Info, + InsertBatch(args::insert_batch::InsertBatchArgs), + InsertVersion(args::insert_version::InsertVersionArgs), +} + +pub async fn run(shell: &Shell, args: ProverCommands) -> anyhow::Result<()> { + match args { + ProverCommands::Info => info::run(shell).await, + ProverCommands::InsertBatch(args) => insert_batch::run(shell, args).await, + ProverCommands::InsertVersion(args) => insert_version::run(shell, args).await, + } +} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/prover_version.rs b/zk_toolbox/crates/zk_supervisor/src/commands/prover_version.rs deleted file mode 100644 index 8740e7c873a9..000000000000 --- a/zk_toolbox/crates/zk_supervisor/src/commands/prover_version.rs +++ /dev/null @@ -1,40 +0,0 @@ -use std::{fs, path::Path}; - -use common::logger; -use config::EcosystemConfig; -use xshell::{cmd, Shell}; - -pub async fn run(shell: &Shell) -> anyhow::Result<()> { - let link_to_code = EcosystemConfig::from_file(shell)?.link_to_code; - let link_to_prover = link_to_code.join("prover"); - - let protocol_version = get_protocol_version(shell, &link_to_prover).await?; - let snark_wrapper = get_snark_wrapper(&link_to_prover).await?; - - logger::info(format!( - "Current protocol version found in zksync-era: {}, snark_wrapper: {}", - protocol_version, snark_wrapper - )); - - Ok(()) -} - -async fn get_protocol_version(shell: &Shell, link_to_prover: &Path) -> anyhow::Result { - shell.change_dir(link_to_prover); - let protocol_version = cmd!(shell, "cargo run --release --bin prover_version").read()?; - - Ok(protocol_version) -} - -async fn get_snark_wrapper(link_to_prover: &Path) -> anyhow::Result { - let path = link_to_prover.join("data/keys/commitments.json"); - let file = fs::File::open(path).expect("Could not find commitments file in zksync-era"); - let json: serde_json::Value = - serde_json::from_reader(file).expect("Could not parse commitments.json"); - - let snark_wrapper = json - .get("snark_wrapper") - .expect("Could not find snark_wrapper in commitments.json"); - - Ok(snark_wrapper.to_string()) -} diff --git a/zk_toolbox/crates/zk_supervisor/src/main.rs b/zk_toolbox/crates/zk_supervisor/src/main.rs index a8722787b5ff..32aefa7fcad9 100644 --- a/zk_toolbox/crates/zk_supervisor/src/main.rs +++ b/zk_toolbox/crates/zk_supervisor/src/main.rs @@ -1,6 +1,6 @@ use clap::{Parser, Subcommand}; use commands::{ - contracts::ContractsArgs, database::DatabaseCommands, lint::LintArgs, + contracts::ContractsArgs, database::DatabaseCommands, lint::LintArgs, prover::ProverCommands, snapshot::SnapshotCommands, test::TestCommands, }; use common::{ @@ -49,8 +49,8 @@ enum SupervisorSubcommands { Fmt(FmtArgs), #[command(hide = true)] Markdown, - #[command(about = MSG_PROVER_VERSION_ABOUT)] - ProverVersion, + #[command(subcommand, about = MSG_PROVER_VERSION_ABOUT)] + Prover(ProverCommands), #[command(about = MSG_CONTRACTS_ABOUT)] Contracts(ContractsArgs), } @@ -109,7 +109,7 @@ async fn run_subcommand(args: Supervisor, shell: &Shell) -> anyhow::Result<()> { } SupervisorSubcommands::Lint(args) => commands::lint::run(shell, args)?, SupervisorSubcommands::Fmt(args) => commands::fmt::run(shell.clone(), args).await?, - SupervisorSubcommands::ProverVersion => commands::prover_version::run(shell).await?, + SupervisorSubcommands::Prover(command) => commands::prover::run(shell, command).await?, SupervisorSubcommands::Contracts(args) => commands::contracts::run(shell, args)?, } Ok(()) From 89fcb3a4a29c9831141234a1b4ca6b1d4df48b98 Mon Sep 17 00:00:00 2001 From: Danil Date: Wed, 11 Sep 2024 12:32:12 +0200 Subject: [PATCH 055/116] fix(zk-toolbox): Make token multiplier optional (#2843) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. Signed-off-by: Danil --- .../crates/config/src/wallet_creation.rs | 6 ++- zk_toolbox/crates/config/src/wallets.rs | 6 +-- .../zk_inception/src/commands/chain/init.rs | 37 ++++++++++--------- .../chain/set_token_multiplier_setter.rs | 3 +- .../crates/zk_inception/src/messages.rs | 2 + 5 files changed, 32 insertions(+), 22 deletions(-) diff --git a/zk_toolbox/crates/config/src/wallet_creation.rs b/zk_toolbox/crates/config/src/wallet_creation.rs index a27d55f6f46b..6cfdf08a36d3 100644 --- a/zk_toolbox/crates/config/src/wallet_creation.rs +++ b/zk_toolbox/crates/config/src/wallet_creation.rs @@ -58,6 +58,10 @@ pub fn create_localhost_wallets( blob_operator: Wallet::from_mnemonic(ð_mnemonic.test_mnemonic, &base_path, 2)?, fee_account: Wallet::from_mnemonic(ð_mnemonic.test_mnemonic, &base_path, 3)?, governor: Wallet::from_mnemonic(ð_mnemonic.test_mnemonic, &base_path, 4)?, - token_multiplier_setter: Wallet::from_mnemonic(ð_mnemonic.test_mnemonic, &base_path, 5)?, + token_multiplier_setter: Some(Wallet::from_mnemonic( + ð_mnemonic.test_mnemonic, + &base_path, + 5, + )?), }) } diff --git a/zk_toolbox/crates/config/src/wallets.rs b/zk_toolbox/crates/config/src/wallets.rs index a2e5be87440a..9c87453954ec 100644 --- a/zk_toolbox/crates/config/src/wallets.rs +++ b/zk_toolbox/crates/config/src/wallets.rs @@ -15,7 +15,7 @@ pub struct WalletsConfig { pub blob_operator: Wallet, pub fee_account: Wallet, pub governor: Wallet, - pub token_multiplier_setter: Wallet, + pub token_multiplier_setter: Option, } impl WalletsConfig { @@ -27,7 +27,7 @@ impl WalletsConfig { blob_operator: Wallet::random(rng), fee_account: Wallet::random(rng), governor: Wallet::random(rng), - token_multiplier_setter: Wallet::random(rng), + token_multiplier_setter: Some(Wallet::random(rng)), } } @@ -39,7 +39,7 @@ impl WalletsConfig { blob_operator: Wallet::empty(), fee_account: Wallet::empty(), governor: Wallet::empty(), - token_multiplier_setter: Wallet::empty(), + token_multiplier_setter: Some(Wallet::empty()), } } pub fn deployer_private_key(&self) -> Option { diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/init.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/init.rs index 793fbbf31aee..a5f57981d583 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/init.rs @@ -36,7 +36,7 @@ use crate::{ MSG_CHAIN_NOT_FOUND_ERR, MSG_DISTRIBUTING_ETH_SPINNER, MSG_GENESIS_DATABASE_ERR, MSG_MINT_BASE_TOKEN_SPINNER, MSG_PORTAL_FAILED_TO_CREATE_CONFIG_ERR, MSG_REGISTERING_CHAIN_SPINNER, MSG_SELECTED_CONFIG, - MSG_UPDATING_TOKEN_MULTIPLIER_SETTER_SPINNER, + MSG_UPDATING_TOKEN_MULTIPLIER_SETTER_SPINNER, MSG_WALLET_TOKEN_MULTIPLIER_SETTER_NOT_FOUND, }, utils::forge::{check_the_balance, fill_forge_private_key}, }; @@ -112,22 +112,25 @@ pub async fn init( .await?; spinner.finish(); - let spinner = Spinner::new(MSG_UPDATING_TOKEN_MULTIPLIER_SETTER_SPINNER); - set_token_multiplier_setter( - shell, - ecosystem_config, - chain_config.get_wallets_config()?.governor_private_key(), - contracts_config.l1.chain_admin_addr, - ecosystem_config - .get_wallets() - .unwrap() - .token_multiplier_setter - .address, - &init_args.forge_args.clone(), - init_args.l1_rpc_url.clone(), - ) - .await?; - spinner.finish(); + if chain_config.base_token != BaseToken::eth() { + let spinner = Spinner::new(MSG_UPDATING_TOKEN_MULTIPLIER_SETTER_SPINNER); + set_token_multiplier_setter( + shell, + ecosystem_config, + chain_config.get_wallets_config()?.governor_private_key(), + contracts_config.l1.chain_admin_addr, + chain_config + .get_wallets_config() + .unwrap() + .token_multiplier_setter + .context(MSG_WALLET_TOKEN_MULTIPLIER_SETTER_NOT_FOUND)? + .address, + &init_args.forge_args.clone(), + init_args.l1_rpc_url.clone(), + ) + .await?; + spinner.finish(); + } deploy_l2_contracts::deploy_l2_contracts( shell, diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/set_token_multiplier_setter.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/set_token_multiplier_setter.rs index 0ab0d451f1f7..f92391c22f47 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/set_token_multiplier_setter.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/set_token_multiplier_setter.rs @@ -15,7 +15,7 @@ use crate::{ messages::{ MSG_CHAIN_NOT_INITIALIZED, MSG_L1_SECRETS_MUST_BE_PRESENTED, MSG_TOKEN_MULTIPLIER_SETTER_UPDATED_TO, MSG_UPDATING_TOKEN_MULTIPLIER_SETTER_SPINNER, - MSG_WALLETS_CONFIG_MUST_BE_PRESENT, + MSG_WALLETS_CONFIG_MUST_BE_PRESENT, MSG_WALLET_TOKEN_MULTIPLIER_SETTER_NOT_FOUND, }, utils::forge::{check_the_balance, fill_forge_private_key}, }; @@ -47,6 +47,7 @@ pub async fn run(args: ForgeScriptArgs, shell: &Shell) -> anyhow::Result<()> { .get_wallets() .context(MSG_WALLETS_CONFIG_MUST_BE_PRESENT)? .token_multiplier_setter + .context(MSG_WALLET_TOKEN_MULTIPLIER_SETTER_NOT_FOUND)? .address; let spinner = Spinner::new(MSG_UPDATING_TOKEN_MULTIPLIER_SETTER_SPINNER); diff --git a/zk_toolbox/crates/zk_inception/src/messages.rs b/zk_toolbox/crates/zk_inception/src/messages.rs index 6f94a7b102a4..827aa03d7ba8 100644 --- a/zk_toolbox/crates/zk_inception/src/messages.rs +++ b/zk_toolbox/crates/zk_inception/src/messages.rs @@ -157,6 +157,8 @@ pub(super) const MSG_CHAIN_ID_VALIDATOR_ERR: &str = "Invalid chain id"; pub(super) const MSG_BASE_TOKEN_ADDRESS_VALIDATOR_ERR: &str = "Invalid base token address"; pub(super) const MSG_WALLET_CREATION_VALIDATOR_ERR: &str = "Localhost wallet is not supported for external networks"; +pub(super) const MSG_WALLET_TOKEN_MULTIPLIER_SETTER_NOT_FOUND: &str = + "Token Multiplier Setter not found. Specify it in a wallet config"; /// Chain genesis related messages pub(super) const MSG_L1_SECRETS_MUST_BE_PRESENTED: &str = "L1 secret must be presented"; From ffb38380f132f15095ee710181512aef05b9ed64 Mon Sep 17 00:00:00 2001 From: Marcin M <128217157+mm-zk@users.noreply.github.com> Date: Wed, 11 Sep 2024 20:33:40 +0200 Subject: [PATCH 056/116] feat: Smaller zk_environment image (#1920) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ * Reduced uncompressed size from 7.5GB to 4.81 GB. * Changes to make zk_environment image smaller * compile solc with release mode * remove unnecessary components from google cli * build things first, and then only copy the artifacts. ## Why ❔ * This image is used on ALL of our CI builds - so reducing its size will speed up the CIs. --- docker/zk-environment/Dockerfile | 76 ++++++++++++++++++++++---------- 1 file changed, 52 insertions(+), 24 deletions(-) diff --git a/docker/zk-environment/Dockerfile b/docker/zk-environment/Dockerfile index 375384bf7fca..53e532653111 100644 --- a/docker/zk-environment/Dockerfile +++ b/docker/zk-environment/Dockerfile @@ -25,10 +25,34 @@ RUN git submodule update --init --recursive # Build Solidity WORKDIR /solidity/build -RUN cmake .. -RUN make +# The default compilation is Release with Debug symbols, which is quite large. +RUN cmake .. -DCMAKE_BUILD_TYPE="Release" +RUN make -j -FROM debian:bookworm as rust-lightweight +# Rust binaries - with a separate builder. +FROM rust:slim-bookworm as rust-builder + +ARG ARCH=amd64 +RUN apt-get update && apt-get install -y \ + libssl-dev \ + pkg-config \ + libclang-15-dev \ + g++ \ + cmake \ + git + +RUN cargo install --version=0.8.0 sqlx-cli +RUN cargo install cargo-nextest +RUN cargo install cargo-spellcheck +RUN cargo install sccache + +RUN git clone https://github.com/matter-labs/foundry-zksync +RUN cd foundry-zksync && cargo build --release --bins +RUN mv ./foundry-zksync/target/release/forge /usr/local/cargo/bin/ +RUN mv ./foundry-zksync/target/release/cast /usr/local/cargo/bin/ + +# Main builder. +FROM debian:bookworm as rust-lightweight-base ARG ARCH=amd64 @@ -69,7 +93,7 @@ RUN apt-get update && \ lldb-15 \ lld-15 \ liburing-dev \ - libclang-dev + libclang-15-dev # Install Docker RUN apt-get update && \ @@ -97,27 +121,28 @@ ENV RUSTUP_HOME=/usr/local/rustup \ PATH=/usr/local/cargo/bin:$PATH # Install gloud for GCR/GAR login +# Google was super lazy, and their package is around 1 GB. +# So we trim it a little bit based on info from `https://github.com/GoogleCloudPlatform/gsutil/issues/1732` ENV GCLOUD_VERSION=451.0.1 RUN echo "deb [arch=${ARCH}] http://packages.cloud.google.com/apt cloud-sdk main" > /etc/apt/sources.list.d/google-cloud-sdk.list && \ wget -c -O - https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - && \ apt-get update -y && apt-get install google-cloud-cli=${GCLOUD_VERSION}-0 --no-install-recommends -y && \ gcloud config set core/disable_usage_reporting true && \ gcloud config set component_manager/disable_update_check true && \ - gcloud config set metrics/environment github_docker_image - -RUN wget -c -O - https://sh.rustup.rs | bash -s -- -y && \ - rustup default stable - -RUN cargo install --version=0.8.0 sqlx-cli -RUN cargo install cargo-nextest - -# Installing foundry-zksync from git is failing, we will build it from sources -# Install foundry -RUN git clone https://github.com/matter-labs/foundry-zksync -RUN cd foundry-zksync && cargo build --release --bins -RUN mv ./foundry-zksync/target/release/forge /usr/local/bin/ -RUN mv ./foundry-zksync/target/release/cast /usr/local/bin/ - + gcloud config set metrics/environment github_docker_image && \ + rm -rf $(find /usr/lib/google-cloud-sdk/ -regex ".*/__pycache__") && \ + rm -rf /usr/lib/google-cloud-sdk/bin/anthoscli && \ + rm -rf /usr/lib/google-cloud-sdk/platform/bundledpythonunix && \ + rm -rf /usr/lib/google-cloud-sdk/data/gcloud.json + +COPY --from=rust-builder /usr/local/cargo/bin/sqlx \ + /usr/local/cargo/bin/cargo-sqlx \ + /usr/local/cargo/bin/cargo-nextest \ + /usr/local/cargo/bin/cargo-spellcheck \ + /usr/local/cargo/bin/sccache \ + /usr/local/cargo/bin/forge \ + /usr/local/cargo/bin/cast /usr/local/cargo/bin/ + # Copy compiler (both solc and zksolc) binaries # Obtain `solc` 0.8.20. COPY --from=solidity-builder /solidity/build/solc/solc /usr/bin/ @@ -133,7 +158,7 @@ RUN apt-get remove valgrind -y # We need valgrind 3.20, which is unavailable in repos or ppa, so we will build it from source RUN wget -c https://sourceware.org/pub/valgrind/valgrind-3.20.0.tar.bz2 && \ tar -xf valgrind-3.20.0.tar.bz2 && \ - cd valgrind-3.20.0 && ./configure && make && make install && \ + cd valgrind-3.20.0 && ./configure && make -j && make install && \ cd ../ && rm -rf valgrind-3.20.0.tar.bz2 && rm -rf valgrind-3.20.0 @@ -141,10 +166,13 @@ RUN wget -c https://sourceware.org/pub/valgrind/valgrind-3.20.0.tar.bz2 && \ ENV ZKSYNC_HOME=/usr/src/zksync ENV PATH="${ZKSYNC_HOME}/bin:${PATH}" ENV CI=1 -RUN cargo install sccache ENV RUSTC_WRAPPER=/usr/local/cargo/bin/sccache -FROM rust-lightweight as rust-lightweight-nightly +# If target is 'main' - then install default rust. +FROM rust-lightweight-base as rust-lightweight +RUN wget -c -O - https://sh.rustup.rs | bash -s -- -y + -RUN rustup install nightly-2024-08-01 && \ - rustup default nightly-2024-08-01 +# If target is nightly - then install only nightly rust. +FROM rust-lightweight-base as rust-lightweight-nightly +RUN wget -c -O - https://sh.rustup.rs | bash -s -- -y --default-toolchain nightly-2024-08-01 \ No newline at end of file From 3b5e4a69d7dbc43ea3460f4c7c57cf3ef6847b11 Mon Sep 17 00:00:00 2001 From: Artem Fomiuk <88630083+Artemka374@users.noreply.github.com> Date: Thu, 12 Sep 2024 10:18:04 +0300 Subject: [PATCH 057/116] fix(zk_toolbox): secrets path, artifacts path (#2850) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Fix getting chain config in zk_supervisor prover Fix artifacts path when initializing. Setup data path for provers ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- .../zk_inception/src/commands/prover/init.rs | 4 ++-- .../zk_inception/src/commands/prover/run.rs | 15 +++++++++++++- .../zk_supervisor/src/commands/prover/info.rs | 20 +++++++++---------- .../src/commands/prover/insert_batch.rs | 18 ++++++++++++----- .../src/commands/prover/insert_version.rs | 18 ++++++++++++----- 5 files changed, 52 insertions(+), 23 deletions(-) diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/init.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/init.rs index 1d92357635c5..20e682745870 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/init.rs @@ -163,7 +163,7 @@ fn init_file_backed_proof_storage( ecosystem_config: &EcosystemConfig, config: ProofStorageFileBacked, ) -> anyhow::Result { - let proof_store_dir = config.proof_store_dir; + let proof_store_dir = config.proof_store_dir.clone(); let prover_path = get_link_to_prover(ecosystem_config); let proof_store_dir = prover_path.join(proof_store_dir).join("witness_inputs"); @@ -173,7 +173,7 @@ fn init_file_backed_proof_storage( let object_store_config = ObjectStoreConfig { mode: ObjectStoreMode::FileBacked { - file_backed_base_path: proof_store_dir.into_os_string().into_string().unwrap(), + file_backed_base_path: config.proof_store_dir, }, max_retries: PROVER_STORE_MAX_RETRIES, local_mirror_path: None, diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/run.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/run.rs index 8f72da03f3b3..5f4bf2f4a671 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/run.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/run.rs @@ -2,7 +2,7 @@ use std::path::PathBuf; use anyhow::{anyhow, Context}; use common::{check_prerequisites, cmd::Cmd, config::global_config, logger, GPU_PREREQUISITES}; -use config::{get_link_to_prover, EcosystemConfig}; +use config::{get_link_to_prover, ChainConfig, EcosystemConfig}; use xshell::{cmd, Shell}; use super::args::run::{ProverComponent, ProverRunArgs}; @@ -69,6 +69,7 @@ pub(crate) async fn run(args: ProverRunArgs, shell: &Shell) -> anyhow::Result<() if in_docker { let path_to_configs = chain.configs.clone(); let path_to_prover = get_link_to_prover(&ecosystem_config); + update_setup_data_path(&chain, "prover/data/keys".to_string())?; run_dockerized_component( shell, component.image_name(), @@ -80,6 +81,7 @@ pub(crate) async fn run(args: ProverRunArgs, shell: &Shell) -> anyhow::Result<() &path_to_prover, )? } else { + update_setup_data_path(&chain, "data/keys".to_string())?; run_binary_component( shell, component.binary_name(), @@ -132,3 +134,14 @@ fn run_binary_component( cmd = cmd.with_force_run(); cmd.run().context(error) } + +fn update_setup_data_path(chain: &ChainConfig, path: String) -> anyhow::Result<()> { + let mut general_config = chain.get_general_config()?; + general_config + .prover_config + .as_mut() + .expect("Prover config not found") + .setup_data_path = path; + chain.save_general_config(&general_config)?; + Ok(()) +} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/prover/info.rs b/zk_toolbox/crates/zk_supervisor/src/commands/prover/info.rs index 05964cf689fd..6a7d7ddeda8a 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/prover/info.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/prover/info.rs @@ -5,18 +5,23 @@ use std::{ use anyhow::Context as _; use common::{config::global_config, logger}; -use config::EcosystemConfig; +use config::{ChainConfig, EcosystemConfig}; use xshell::{cmd, Shell}; use crate::messages::MSG_CHAIN_NOT_FOUND_ERR; pub async fn run(shell: &Shell) -> anyhow::Result<()> { - let link_to_code = EcosystemConfig::from_file(shell)?.link_to_code; + let ecosystem_config = EcosystemConfig::from_file(shell)?; + let chain_config = ecosystem_config + .load_chain(global_config().chain_name.clone()) + .expect(MSG_CHAIN_NOT_FOUND_ERR); + + let link_to_code = ecosystem_config.link_to_code; let link_to_prover = link_to_code.join("prover"); let protocol_version = get_protocol_version(shell, &link_to_prover).await?; let snark_wrapper = get_snark_wrapper(&link_to_prover).await?; - let prover_url = get_database_url(shell).await?; + let prover_url = get_database_url(&chain_config).await?; logger::info(format!( " @@ -59,13 +64,8 @@ pub(crate) async fn get_snark_wrapper(link_to_prover: &Path) -> anyhow::Result anyhow::Result { - let ecosystem = EcosystemConfig::from_file(shell)?; - let chain_config = ecosystem - .load_chain(global_config().chain_name.clone()) - .context(MSG_CHAIN_NOT_FOUND_ERR)?; - - let prover_url = chain_config +pub(crate) async fn get_database_url(chain: &ChainConfig) -> anyhow::Result { + let prover_url = chain .get_secrets_config()? .database .context("Database secrets not found")? diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/prover/insert_batch.rs b/zk_toolbox/crates/zk_supervisor/src/commands/prover/insert_batch.rs index 2c4a1cf97513..b1c02c9a9fea 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/prover/insert_batch.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/prover/insert_batch.rs @@ -1,19 +1,27 @@ -use common::{check_prerequisites, cmd::Cmd, logger, PROVER_CLI_PREREQUISITE}; +use common::{ + check_prerequisites, cmd::Cmd, config::global_config, logger, PROVER_CLI_PREREQUISITE, +}; use config::{get_link_to_prover, EcosystemConfig}; use xshell::{cmd, Shell}; -use crate::commands::prover::{ - args::insert_batch::{InsertBatchArgs, InsertBatchArgsFinal}, - info, +use crate::{ + commands::prover::{ + args::insert_batch::{InsertBatchArgs, InsertBatchArgsFinal}, + info, + }, + messages::MSG_CHAIN_NOT_FOUND_ERR, }; pub async fn run(shell: &Shell, args: InsertBatchArgs) -> anyhow::Result<()> { check_prerequisites(shell, &PROVER_CLI_PREREQUISITE, false); let ecosystem_config = EcosystemConfig::from_file(shell)?; + let chain_config = ecosystem_config + .load_chain(global_config().chain_name.clone()) + .expect(MSG_CHAIN_NOT_FOUND_ERR); let version = info::get_protocol_version(shell, &get_link_to_prover(&ecosystem_config)).await?; - let prover_url = info::get_database_url(shell).await?; + let prover_url = info::get_database_url(&chain_config).await?; let InsertBatchArgsFinal { number, version } = args.fill_values_with_prompts(version); diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/prover/insert_version.rs b/zk_toolbox/crates/zk_supervisor/src/commands/prover/insert_version.rs index ab28efca9446..16bbdf13df4f 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/prover/insert_version.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/prover/insert_version.rs @@ -1,21 +1,29 @@ -use common::{check_prerequisites, cmd::Cmd, logger, PROVER_CLI_PREREQUISITE}; +use common::{ + check_prerequisites, cmd::Cmd, config::global_config, logger, PROVER_CLI_PREREQUISITE, +}; use config::{get_link_to_prover, EcosystemConfig}; use xshell::{cmd, Shell}; -use crate::commands::prover::{ - args::insert_version::{InsertVersionArgs, InsertVersionArgsFinal}, - info, +use crate::{ + commands::prover::{ + args::insert_version::{InsertVersionArgs, InsertVersionArgsFinal}, + info, + }, + messages::MSG_CHAIN_NOT_FOUND_ERR, }; pub async fn run(shell: &Shell, args: InsertVersionArgs) -> anyhow::Result<()> { check_prerequisites(shell, &PROVER_CLI_PREREQUISITE, false); let ecosystem_config = EcosystemConfig::from_file(shell)?; + let chain_config = ecosystem_config + .load_chain(global_config().chain_name.clone()) + .expect(MSG_CHAIN_NOT_FOUND_ERR); let version = info::get_protocol_version(shell, &get_link_to_prover(&ecosystem_config)).await?; let snark_wrapper = info::get_snark_wrapper(&get_link_to_prover(&ecosystem_config)).await?; - let prover_url = info::get_database_url(shell).await?; + let prover_url = info::get_database_url(&chain_config).await?; let InsertVersionArgsFinal { version, From 527b5ab8052bfb5e7ff7c3a54747b1470c69fafa Mon Sep 17 00:00:00 2001 From: perekopskiy <53865202+perekopskiy@users.noreply.github.com> Date: Thu, 12 Sep 2024 11:37:57 +0300 Subject: [PATCH 058/116] ci: fix using cargo nextest (#2855) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ `cargo install --list` doesn't work properly in CI because we copy only binaries to zk-environment also increase reth block-time and fixes upgrade-test which improves tests stability ## Why ❔ we rely on cargo-nextest in tests ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- .github/workflows/ci.yml | 1 + core/tests/upgrade-test/tests/upgrade.test.ts | 14 ++++++++----- docker-compose-cpu-runner.yml | 2 +- docker-compose-gpu-runner-cuda-12-0.yml | 2 +- docker-compose-gpu-runner.yml | 2 +- docker-compose.yml | 2 +- zk_toolbox/Cargo.lock | 1 - .../zk_supervisor/src/commands/test/rust.rs | 20 +++---------------- .../crates/zk_supervisor/src/messages.rs | 1 - 9 files changed, 17 insertions(+), 28 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 53c169114915..e05b84cda971 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -76,6 +76,7 @@ jobs: - 'etc/**' - 'contracts/**' - 'infrastructure/zk/**' + - 'docker/zk-environment/**' - '!**/*.md' - '!**/*.MD' diff --git a/core/tests/upgrade-test/tests/upgrade.test.ts b/core/tests/upgrade-test/tests/upgrade.test.ts index 0f70e751b844..2e223b9d7441 100644 --- a/core/tests/upgrade-test/tests/upgrade.test.ts +++ b/core/tests/upgrade-test/tests/upgrade.test.ts @@ -89,25 +89,29 @@ describe('Upgrade test', function () { alice = tester.emptyWallet(); if (fileConfig.loadFromFile) { - let walletConfig = loadConfig({ pathToHome, chain: fileConfig.chain, config: 'wallets.yaml' }); + const chainWalletConfig = loadConfig({ pathToHome, chain: fileConfig.chain, config: 'wallets.yaml' }); - adminGovWallet = new ethers.Wallet(walletConfig.governor.private_key, alice._providerL1()); + adminGovWallet = new ethers.Wallet(chainWalletConfig.governor.private_key, alice._providerL1()); - walletConfig = loadConfig({ + const ecosystemWalletConfig = loadConfig({ pathToHome, chain: fileConfig.chain, configsFolder: '../../configs/', config: 'wallets.yaml' }); - ecosystemGovWallet = new ethers.Wallet(walletConfig.governor.private_key, alice._providerL1()); + if (ecosystemWalletConfig.governor.private_key == chainWalletConfig.governor.private_key) { + ecosystemGovWallet = adminGovWallet; + } else { + ecosystemGovWallet = new ethers.Wallet(ecosystemWalletConfig.governor.private_key, alice._providerL1()); + } } else { let govMnemonic = ethers.Mnemonic.fromPhrase( require('../../../../etc/test_config/constant/eth.json').mnemonic ); let govWalletHD = ethers.HDNodeWallet.fromMnemonic(govMnemonic, "m/44'/60'/0'/0/1"); adminGovWallet = new ethers.Wallet(govWalletHD.privateKey, alice._providerL1()); - ecosystemGovWallet = new ethers.Wallet(govWalletHD.privateKey, alice._providerL1()); + ecosystemGovWallet = adminGovWallet; } logs = fs.createWriteStream('upgrade.log', { flags: 'a' }); diff --git a/docker-compose-cpu-runner.yml b/docker-compose-cpu-runner.yml index beb54f3ade98..e0f751130eb0 100644 --- a/docker-compose-cpu-runner.yml +++ b/docker-compose-cpu-runner.yml @@ -11,7 +11,7 @@ services: source: ./etc/reth/chaindata target: /chaindata - command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 300ms --chain /chaindata/reth_config + command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 600ms --chain /chaindata/reth_config ports: - 127.0.0.1:8545:8545 diff --git a/docker-compose-gpu-runner-cuda-12-0.yml b/docker-compose-gpu-runner-cuda-12-0.yml index 35a0faeb9620..f2089446a41d 100644 --- a/docker-compose-gpu-runner-cuda-12-0.yml +++ b/docker-compose-gpu-runner-cuda-12-0.yml @@ -11,7 +11,7 @@ services: source: ./etc/reth/chaindata target: /chaindata - command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 300ms --chain /chaindata/reth_config + command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 600ms --chain /chaindata/reth_config ports: - 127.0.0.1:8545:8545 diff --git a/docker-compose-gpu-runner.yml b/docker-compose-gpu-runner.yml index f95ae0d5f544..35c6c3778f22 100644 --- a/docker-compose-gpu-runner.yml +++ b/docker-compose-gpu-runner.yml @@ -11,7 +11,7 @@ services: source: ./etc/reth/chaindata target: /chaindata - command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 300ms --chain /chaindata/reth_config + command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 600ms --chain /chaindata/reth_config ports: - 127.0.0.1:8545:8545 diff --git a/docker-compose.yml b/docker-compose.yml index 1e3a273ec9a4..7e1b52f83347 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -13,7 +13,7 @@ services: source: ./etc/reth/chaindata target: /chaindata - command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 300ms --chain /chaindata/reth_config + command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 600ms --chain /chaindata/reth_config postgres: diff --git a/zk_toolbox/Cargo.lock b/zk_toolbox/Cargo.lock index 75859021979f..7c53e2747daf 100644 --- a/zk_toolbox/Cargo.lock +++ b/zk_toolbox/Cargo.lock @@ -6539,7 +6539,6 @@ dependencies = [ "bigdecimal", "futures", "hex", - "itertools 0.10.5", "num", "once_cell", "reqwest 0.12.5", diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/rust.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/rust.rs index ad1318cfa768..c42f95e8e3b5 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/rust.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/rust.rs @@ -12,7 +12,7 @@ use crate::{ dals::{Dal, CORE_DAL_PATH, PROVER_DAL_PATH}, defaults::{TEST_DATABASE_PROVER_URL, TEST_DATABASE_SERVER_URL}, messages::{ - MSG_CARGO_NEXTEST_MISSING_ERR, MSG_CHAIN_NOT_FOUND_ERR, MSG_POSTGRES_CONFIG_NOT_FOUND_ERR, + MSG_CHAIN_NOT_FOUND_ERR, MSG_POSTGRES_CONFIG_NOT_FOUND_ERR, MSG_RESETTING_TEST_DATABASES, MSG_UNIT_TESTS_RUN_SUCCESS, MSG_USING_CARGO_NEXTEST, }, }; @@ -61,13 +61,8 @@ pub async fn run(shell: &Shell, args: RustArgs) -> anyhow::Result<()> { let _dir_guard = shell.push_dir(&link_to_code); - let cmd = if nextest_is_installed(shell)? { - logger::info(MSG_USING_CARGO_NEXTEST); - cmd!(shell, "cargo nextest run --release") - } else { - logger::error(MSG_CARGO_NEXTEST_MISSING_ERR); - cmd!(shell, "cargo test --release") - }; + logger::info(MSG_USING_CARGO_NEXTEST); + let cmd = cmd!(shell, "cargo nextest run --release"); let cmd = if let Some(options) = args.options { Cmd::new(cmd.args(options.split_whitespace())).with_force_run() @@ -84,15 +79,6 @@ pub async fn run(shell: &Shell, args: RustArgs) -> anyhow::Result<()> { Ok(()) } -fn nextest_is_installed(shell: &Shell) -> anyhow::Result { - let out = String::from_utf8( - Cmd::new(cmd!(shell, "cargo install --list")) - .run_with_output()? - .stdout, - )?; - Ok(out.contains("cargo-nextest")) -} - async fn reset_test_databases( shell: &Shell, link_to_code: &Path, diff --git a/zk_toolbox/crates/zk_supervisor/src/messages.rs b/zk_toolbox/crates/zk_supervisor/src/messages.rs index d64e87cd0eb4..89cf8c1d9b60 100644 --- a/zk_toolbox/crates/zk_supervisor/src/messages.rs +++ b/zk_toolbox/crates/zk_supervisor/src/messages.rs @@ -99,7 +99,6 @@ pub(super) const MSG_TESTS_RECOVERY_SNAPSHOT_HELP: &str = "Run recovery from a snapshot instead of genesis"; pub(super) const MSG_UNIT_TESTS_RUN_SUCCESS: &str = "Unit tests ran successfully"; pub(super) const MSG_USING_CARGO_NEXTEST: &str = "Using cargo-nextest for running tests"; -pub(super) const MSG_CARGO_NEXTEST_MISSING_ERR: &str = "cargo-nextest is missing, please run 'cargo install cargo-nextest'. Falling back to 'cargo test'"; pub(super) const MSG_L1_CONTRACTS_ABOUT: &str = "Run L1 contracts tests"; pub(super) const MSG_L1_CONTRACTS_TEST_SUCCESS: &str = "L1 contracts tests ran successfully"; pub(super) const MSG_PROVER_TEST_ABOUT: &str = "Run prover tests"; From 9218612fdb2b63c20841e2e2e5a45bbd23c01fbc Mon Sep 17 00:00:00 2001 From: Dima Zhornyk <55756184+dimazhornyk@users.noreply.github.com> Date: Thu, 12 Sep 2024 11:39:49 +0200 Subject: [PATCH 059/116] feat: add da clients (#2743) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ This PR changes the approach to managing 3rd party DA clients. It was assumed before that they will be stored in a separate repository (hyperchain-da), but to simplify the processes and solve a recursive dependency problem, we decided to manage those within `zksync-era`. The config now defines which DA client will be used, for proto-based configuration it requires adding these lines to general.yaml: ``` da_client: avail: api_node_url: wss://turing-rpc.avail.so/ws bridge_api_url: undefined seed: SEED_PHRASE app_id: 82 timeout: 3 max_retries: 5 ``` for env-based: ``` DA_CLIENT="Avail" DA_API_NODE_URL="localhost:12345" DA_BRIDGE_API_URL="localhost:54321" DA_SEED="SEED_PHRASE" DA_APP_ID=1 DA_TIMEOUT=2 DA_MAX_RETRIES=3 ``` If no config is provided - the default behavior is to use NoDA client (same as now). The `da_client` config might be merged with `da_dispatcher` at some point as the second depends on the first one, so their separation does not make much sense (apart from simplification of the configs). But I'd prefer to do it as a separate PR in case we decide to merge them. The client was reimplemented using only lightweight libraries from crates.io, so it doesn't have any visible impact on build time. ## Why ❔ To enable seamless integration with 3rd party DA clients in `zksync-era`. ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- Cargo.lock | 1545 +++++++++++++++-- Cargo.toml | 13 +- core/bin/zksync_server/Cargo.toml | 2 +- core/bin/zksync_server/src/main.rs | 7 +- core/bin/zksync_server/src/node_builder.rs | 40 +- .../lib/config/src/configs/da_client/avail.rs | 11 + core/lib/config/src/configs/da_client/mod.rs | 20 + core/lib/config/src/configs/general.rs | 2 + core/lib/config/src/configs/mod.rs | 2 + core/lib/config/src/lib.rs | 7 +- core/lib/config/src/testonly.rs | 24 +- core/lib/da_client/src/types.rs | 6 + core/lib/default_da_clients/README.md | 11 - core/lib/default_da_clients/src/no_da/mod.rs | 2 - .../src/object_store/config.rs | 12 - .../src/object_store/mod.rs | 4 - .../src/object_store/types.rs | 38 - core/lib/env_config/src/da_client.rs | 115 ++ core/lib/env_config/src/lib.rs | 2 + core/lib/protobuf_config/src/da_client.rs | 61 + core/lib/protobuf_config/src/general.rs | 2 + core/lib/protobuf_config/src/lib.rs | 1 + .../src/proto/config/da_client.proto | 22 + .../src/proto/config/general.proto | 2 + core/lib/protobuf_config/src/tests.rs | 1 + .../src/temp_config_store/mod.rs | 7 +- .../da_clients}/Cargo.toml | 20 +- core/node/da_clients/README.md | 10 + core/node/da_clients/src/avail/client.rs | 85 + core/node/da_clients/src/avail/mod.rs | 4 + core/node/da_clients/src/avail/sdk.rs | 371 ++++ .../da_clients}/src/lib.rs | 1 + .../da_clients/src/no_da.rs} | 0 .../da_clients/src/object_store.rs} | 88 +- .../src/test_data/l1_batch_123_pubdata.gzip | Bin 0 -> 2511 bytes core/node/node_framework/Cargo.toml | 1 + .../layers/da_clients/avail.rs | 45 + .../implementations/layers/da_clients/mod.rs | 3 + .../layers/da_clients/no_da.rs} | 7 +- .../layers/da_clients/object_store.rs} | 7 +- .../src/implementations/layers/mod.rs | 1 + deny.toml | 2 + 42 files changed, 2352 insertions(+), 252 deletions(-) create mode 100644 core/lib/config/src/configs/da_client/avail.rs create mode 100644 core/lib/config/src/configs/da_client/mod.rs delete mode 100644 core/lib/default_da_clients/README.md delete mode 100644 core/lib/default_da_clients/src/no_da/mod.rs delete mode 100644 core/lib/default_da_clients/src/object_store/config.rs delete mode 100644 core/lib/default_da_clients/src/object_store/mod.rs delete mode 100644 core/lib/default_da_clients/src/object_store/types.rs create mode 100644 core/lib/env_config/src/da_client.rs create mode 100644 core/lib/protobuf_config/src/da_client.rs create mode 100644 core/lib/protobuf_config/src/proto/config/da_client.proto rename core/{lib/default_da_clients => node/da_clients}/Cargo.toml (51%) create mode 100644 core/node/da_clients/README.md create mode 100644 core/node/da_clients/src/avail/client.rs create mode 100644 core/node/da_clients/src/avail/mod.rs create mode 100644 core/node/da_clients/src/avail/sdk.rs rename core/{lib/default_da_clients => node/da_clients}/src/lib.rs (71%) rename core/{lib/default_da_clients/src/no_da/client.rs => node/da_clients/src/no_da.rs} (100%) rename core/{lib/default_da_clients/src/object_store/client.rs => node/da_clients/src/object_store.rs} (51%) create mode 100644 core/node/da_clients/src/test_data/l1_batch_123_pubdata.gzip create mode 100644 core/node/node_framework/src/implementations/layers/da_clients/avail.rs create mode 100644 core/node/node_framework/src/implementations/layers/da_clients/mod.rs rename core/{lib/default_da_clients/src/no_da/wiring_layer.rs => node/node_framework/src/implementations/layers/da_clients/no_da.rs} (90%) rename core/{lib/default_da_clients/src/object_store/wiring_layer.rs => node/node_framework/src/implementations/layers/da_clients/object_store.rs} (91%) diff --git a/Cargo.lock b/Cargo.lock index b98d343564b8..8f8d588c8fcf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -239,6 +239,121 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" +[[package]] +name = "async-channel" +version = "2.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89b47800b0be77592da0afd425cc03468052844aff33b84e33cc696f64e77b6a" +dependencies = [ + "concurrent-queue", + "event-listener-strategy", + "futures-core", + "pin-project-lite", +] + +[[package]] +name = "async-executor" +version = "1.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "30ca9a001c1e8ba5149f91a74362376cc6bc5b919d92d988668657bd570bdcec" +dependencies = [ + "async-task", + "concurrent-queue", + "fastrand", + "futures-lite", + "slab", +] + +[[package]] +name = "async-fs" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebcd09b382f40fcd159c2d695175b2ae620ffa5f3bd6f664131efff4e8b9e04a" +dependencies = [ + "async-lock", + "blocking", + "futures-lite", +] + +[[package]] +name = "async-io" +version = "2.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "444b0228950ee6501b3568d3c93bf1176a1fdbc3b758dcd9475046d30f4dc7e8" +dependencies = [ + "async-lock", + "cfg-if 1.0.0", + "concurrent-queue", + "futures-io", + "futures-lite", + "parking", + "polling", + "rustix", + "slab", + "tracing", + "windows-sys 0.59.0", +] + +[[package]] +name = "async-lock" +version = "3.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff6e472cdea888a4bd64f342f09b3f50e1886d32afe8df3d663c01140b811b18" +dependencies = [ + "event-listener 5.3.1", + "event-listener-strategy", + "pin-project-lite", +] + +[[package]] +name = "async-net" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b948000fad4873c1c9339d60f2623323a0cfd3816e5181033c6a5cb68b2accf7" +dependencies = [ + "async-io", + "blocking", + "futures-lite", +] + +[[package]] +name = "async-process" +version = "2.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8a07789659a4d385b79b18b9127fc27e1a59e1e89117c78c5ea3b806f016374" +dependencies = [ + "async-channel", + "async-io", + "async-lock", + "async-signal", + "async-task", + "blocking", + "cfg-if 1.0.0", + "event-listener 5.3.1", + "futures-lite", + "rustix", + "tracing", + "windows-sys 0.59.0", +] + +[[package]] +name = "async-signal" +version = "0.2.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "637e00349800c0bdf8bfc21ebbc0b6524abea702b0da4168ac00d070d0c0b9f3" +dependencies = [ + "async-io", + "async-lock", + "atomic-waker", + "cfg-if 1.0.0", + "futures-core", + "futures-io", + "rustix", + "signal-hook-registry", + "slab", + "windows-sys 0.59.0", +] + [[package]] name = "async-stream" version = "0.3.5" @@ -261,6 +376,12 @@ dependencies = [ "syn 2.0.72", ] +[[package]] +name = "async-task" +version = "4.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" + [[package]] name = "async-trait" version = "0.1.74" @@ -281,6 +402,12 @@ dependencies = [ "num-traits", ] +[[package]] +name = "atomic-take" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8ab6b55fe97976e46f91ddbed8d147d966475dc29b2032757ba47e02376fbc3" + [[package]] name = "atomic-waker" version = "1.1.2" @@ -426,6 +553,12 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" +[[package]] +name = "base58" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6107fe1be6682a68940da878d9e9f5e90ca5745b3dec9fd1bb393c8777d4f581" + [[package]] name = "base64" version = "0.13.1" @@ -481,7 +614,7 @@ dependencies = [ "byteorder", "cfg-if 1.0.0", "crossbeam 0.7.3", - "futures 0.3.28", + "futures 0.3.30", "hex", "lazy_static", "num_cpus", @@ -558,6 +691,17 @@ dependencies = [ "which", ] +[[package]] +name = "bip39" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93f2635620bf0b9d4576eb7bb9a38a55df78bd1205d26fa994b25911a69f212f" +dependencies = [ + "bitcoin_hashes", + "serde", + "unicode-normalization", +] + [[package]] name = "bit-vec" version = "0.6.3" @@ -567,6 +711,12 @@ dependencies = [ "serde", ] +[[package]] +name = "bitcoin_hashes" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90064b8dee6815a6470d60bad07bbbaee885c0e12d04177138fa3291a01b7bc4" + [[package]] name = "bitflags" version = "1.3.2" @@ -623,6 +773,16 @@ dependencies = [ "digest 0.10.7", ] +[[package]] +name = "blake2-rfc" +version = "0.2.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d6d530bdd2d52966a6d03b7a964add7ae1a288d25214066fd4b600f0f796400" +dependencies = [ + "arrayvec 0.4.12", + "constant_time_eq 0.1.5", +] + [[package]] name = "blake2-rfc_bellman_edition" version = "0.0.1" @@ -631,7 +791,7 @@ checksum = "fdc60350286c7c3db13b98e91dbe5c8b6830a6821bc20af5b0c310ce94d74915" dependencies = [ "arrayvec 0.4.12", "byteorder", - "constant_time_eq", + "constant_time_eq 0.1.5", ] [[package]] @@ -643,6 +803,17 @@ dependencies = [ "digest 0.10.7", ] +[[package]] +name = "blake2b_simd" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23285ad32269793932e830392f2fe2f83e26488fd3ec778883a93c8323735780" +dependencies = [ + "arrayref", + "arrayvec 0.7.4", + "constant_time_eq 0.3.1", +] + [[package]] name = "blake2s_const" version = "0.7.0" @@ -651,7 +822,7 @@ checksum = "f39d933cb38939f885001867874c65699c36f30f0c78aae9f4c9f01b3e4b306a" dependencies = [ "arrayref", "arrayvec 0.5.2", - "constant_time_eq", + "constant_time_eq 0.1.5", ] [[package]] @@ -662,7 +833,7 @@ checksum = "9e461a7034e85b211a4acb57ee2e6730b32912b06c08cc242243c39fc21ae6a2" dependencies = [ "arrayref", "arrayvec 0.5.2", - "constant_time_eq", + "constant_time_eq 0.1.5", ] [[package]] @@ -709,6 +880,19 @@ dependencies = [ "zksync_vlog", ] +[[package]] +name = "blocking" +version = "1.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "703f41c54fc768e63e091340b424302bb1c29ef4aa0c7f10fe849dfb114d29ea" +dependencies = [ + "async-channel", + "async-task", + "futures-io", + "futures-lite", + "piper", +] + [[package]] name = "blst" version = "0.3.13" @@ -731,7 +915,7 @@ dependencies = [ "bincode", "blake2 0.10.6", "const_format", - "convert_case", + "convert_case 0.6.0", "crossbeam 0.8.4", "crypto-bigint 0.5.3", "cs_derive", @@ -776,6 +960,15 @@ dependencies = [ "syn_derive", ] +[[package]] +name = "bs58" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf88ba1141d185c399bee5288d850d63b8369520c1eafc32a0430b5b6c287bf4" +dependencies = [ + "tinyvec", +] + [[package]] name = "build_html" version = "2.5.0" @@ -1305,6 +1498,18 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" +[[package]] +name = "constant_time_eq" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c74b8349d32d297c9134b8c88677813a227df8f779daa29bfc29c183fe3dca6" + +[[package]] +name = "convert_case" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" + [[package]] name = "convert_case" version = "0.6.0" @@ -1652,8 +1857,28 @@ version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a01d95850c592940db9b8194bc39f4bc0e89dee5c4265e4b1807c34a9aba453c" dependencies = [ - "darling_core", - "darling_macro", + "darling_core 0.13.4", + "darling_macro 0.13.4", +] + +[[package]] +name = "darling" +version = "0.14.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b750cb3417fd1b327431a470f388520309479ab0bf5e323505daf0290cd3850" +dependencies = [ + "darling_core 0.14.4", + "darling_macro 0.14.4", +] + +[[package]] +name = "darling" +version = "0.20.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f63b86c8a8826a49b8c21f08a2d07338eec8d900540f8630dc76284be802989" +dependencies = [ + "darling_core 0.20.10", + "darling_macro 0.20.10", ] [[package]] @@ -1670,17 +1895,67 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "darling_core" +version = "0.14.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "109c1ca6e6b7f82cc233a97004ea8ed7ca123a9af07a8230878fcfda9b158bf0" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2 1.0.86", + "quote 1.0.36", + "strsim 0.10.0", + "syn 1.0.109", +] + +[[package]] +name = "darling_core" +version = "0.20.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95133861a8032aaea082871032f5815eb9e98cef03fa916ab4500513994df9e5" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2 1.0.86", + "quote 1.0.36", + "strsim 0.11.1", + "syn 2.0.72", +] + [[package]] name = "darling_macro" version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" dependencies = [ - "darling_core", + "darling_core 0.13.4", "quote 1.0.36", "syn 1.0.109", ] +[[package]] +name = "darling_macro" +version = "0.14.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" +dependencies = [ + "darling_core 0.14.4", + "quote 1.0.36", + "syn 1.0.109", +] + +[[package]] +name = "darling_macro" +version = "0.20.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" +dependencies = [ + "darling_core 0.20.10", + "quote 1.0.36", + "syn 2.0.72", +] + [[package]] name = "dashmap" version = "5.5.3" @@ -1746,6 +2021,19 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "derive_more" +version = "0.99.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f33878137e4dafd7fa914ad4e259e18a4e8e532b9617a2d0150262bf53abfce" +dependencies = [ + "convert_case 0.4.0", + "proc-macro2 1.0.86", + "quote 1.0.36", + "rustc_version", + "syn 2.0.72", +] + [[package]] name = "derive_more" version = "1.0.0-beta.6" @@ -1800,6 +2088,12 @@ version = "0.15.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b" +[[package]] +name = "downcast-rs" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75b325c5dbd37f80359721ad39aca5a29fb04c89279657cffdda8736d0c0b9d2" + [[package]] name = "dtoa" version = "1.0.9" @@ -1862,6 +2156,21 @@ dependencies = [ "zeroize", ] +[[package]] +name = "ed25519-zebra" +version = "4.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d9ce6874da5d4415896cd45ffbc4d1cfc0c4f9c079427bd870742c30f2f65a9" +dependencies = [ + "curve25519-dalek", + "ed25519", + "hashbrown 0.14.5", + "hex", + "rand_core 0.6.4", + "sha2 0.10.8", + "zeroize", +] + [[package]] name = "either" version = "1.9.0" @@ -2063,6 +2372,16 @@ dependencies = [ "uint", ] +[[package]] +name = "event-listener" +version = "4.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b215c49b2b248c855fb73579eb1f4f26c38ffdc12973e20e07b91d78d5646e" +dependencies = [ + "concurrent-queue", + "pin-project-lite", +] + [[package]] name = "event-listener" version = "5.3.1" @@ -2074,6 +2393,16 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "event-listener-strategy" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" +dependencies = [ + "event-listener 5.3.1", + "pin-project-lite", +] + [[package]] name = "fastrand" version = "2.0.1" @@ -2220,9 +2549,9 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "form_urlencoded" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a62bc1cf6f830c2ec14a513a9fb124d0a213a629668a4186f329db21fe045652" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" dependencies = [ "percent-encoding", ] @@ -2237,6 +2566,29 @@ dependencies = [ "num", ] +[[package]] +name = "frame-metadata" +version = "15.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "878babb0b136e731cc77ec2fd883ff02745ff21e6fb662729953d44923df009c" +dependencies = [ + "cfg-if 1.0.0", + "parity-scale-codec", + "scale-info", +] + +[[package]] +name = "frame-metadata" +version = "16.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87cf1549fba25a6fcac22785b61698317d958e96cac72a59102ea45b9ae64692" +dependencies = [ + "cfg-if 1.0.0", + "parity-scale-codec", + "scale-info", + "serde", +] + [[package]] name = "franklin-crypto" version = "0.1.0" @@ -2294,9 +2646,9 @@ checksum = "3a471a38ef8ed83cd6e40aa59c1ffe17db6855c18e3604d9c4ed8c08ebc28678" [[package]] name = "futures" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23342abe12aba583913b2e62f22225ff9c950774065e4bfb61a19cd9770fec40" +checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" dependencies = [ "futures-channel", "futures-core", @@ -2309,9 +2661,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "955518d47e09b25bbebc7a18df10b81f0c766eaf4c4f1cccef2fca5f2a4fb5f2" +checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" dependencies = [ "futures-core", "futures-sink", @@ -2319,15 +2671,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bca583b7e26f571124fe5b7561d49cb2868d79116cfa0eefce955557c6fee8c" +checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" [[package]] name = "futures-executor" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccecee823288125bd88b4d7f565c9e58e41858e47ab72e8ea2d64e93624386e0" +checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" dependencies = [ "futures-core", "futures-task", @@ -2348,15 +2700,28 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fff74096e71ed47f8e023204cfd0aa1289cd54ae5430a9523be060cdb849964" +checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" + +[[package]] +name = "futures-lite" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5" +dependencies = [ + "fastrand", + "futures-core", + "futures-io", + "parking", + "pin-project-lite", +] [[package]] name = "futures-macro" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89ca545a94061b6365f2c7355b4b32bd20df3ff95f02da9329b34ccc3bd6ee72" +checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.36", @@ -2365,15 +2730,15 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f43be4fe21a13b9781a69afa4985b0f6ee0e1afab2c6f454a8cf30e2b2237b6e" +checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" [[package]] name = "futures-task" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76d3d132be6c0e6aa1534069c705a74a5997a356c0dc2f86a47765e5617c5b65" +checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" [[package]] name = "futures-timer" @@ -2387,9 +2752,9 @@ dependencies = [ [[package]] name = "futures-util" -version = "0.3.28" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26b01e40b772d54cf6c6d721c1d1abd0647a0106a12ecaa1c186273392a69533" +checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" dependencies = [ "futures 0.1.31", "futures-channel", @@ -2421,7 +2786,7 @@ version = "0.1.0" dependencies = [ "anyhow", "clap 4.4.6", - "futures 0.3.28", + "futures 0.3.30", "serde", "serde_json", "serde_yaml", @@ -2452,6 +2817,16 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "getrandom_or_panic" +version = "0.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ea1015b5a70616b688dc230cfe50c8af89d972cb132d5a622814d29773b10b9" +dependencies = [ + "rand 0.8.5", + "rand_core 0.6.4", +] + [[package]] name = "ghash" version = "0.5.0" @@ -2602,7 +2977,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19775995ee20209163239355bc3ad2f33f83da35d9ef72dea26e5af753552c87" dependencies = [ "dashmap", - "futures 0.3.28", + "futures 0.3.30", "futures-timer", "no-std-compat", "nonzero_ext", @@ -2709,6 +3084,7 @@ checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" dependencies = [ "ahash 0.8.7", "allocator-api2", + "serde", ] [[package]] @@ -2756,6 +3132,12 @@ version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" +[[package]] +name = "hermit-abi" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fbf6a919d6cf397374f7dfeeea91d974c7c0a7221d0d0f4f20d859d329e53fcc" + [[package]] name = "hex" version = "0.4.3" @@ -2768,7 +3150,17 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "791a029f6b9fc27657f6f188ec6e5e43f6911f6f878e0dc5501396e09809d437" dependencies = [ - "hmac", + "hmac 0.12.1", +] + +[[package]] +name = "hmac" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "126888268dcc288495a26bf004b38c5fdbb31682f992c84ceb046a1f0fe38840" +dependencies = [ + "crypto-mac", + "digest 0.9.0", ] [[package]] @@ -2780,6 +3172,17 @@ dependencies = [ "digest 0.10.7", ] +[[package]] +name = "hmac-drbg" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17ea0a1394df5b6574da6e0c1ade9e78868c9fb0a4e5ef4428e32da4676b85b1" +dependencies = [ + "digest 0.9.0", + "generic-array", + "hmac 0.8.1", +] + [[package]] name = "home" version = "0.5.5" @@ -2913,6 +3316,22 @@ dependencies = [ "want", ] +[[package]] +name = "hyper-rustls" +version = "0.24.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" +dependencies = [ + "futures-util", + "http 0.2.9", + "hyper 0.14.29", + "log", + "rustls 0.21.12", + "rustls-native-certs 0.6.3", + "tokio", + "tokio-rustls 0.24.1", +] + [[package]] name = "hyper-rustls" version = "0.27.2" @@ -2924,10 +3343,10 @@ dependencies = [ "hyper 1.3.1", "hyper-util", "log", - "rustls", + "rustls 0.23.10", "rustls-pki-types", "tokio", - "tokio-rustls", + "tokio-rustls 0.26.0", "tower-service", ] @@ -3030,9 +3449,9 @@ checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" [[package]] name = "idna" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d20d6b07bfbc108882d88ed8e37d39636dcc260e15e30c45e6ba089610b917c" +checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" dependencies = [ "unicode-bidi", "unicode-normalization", @@ -3110,6 +3529,12 @@ dependencies = [ "hashbrown 0.14.5", ] +[[package]] +name = "indexmap-nostd" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e04e2fd2b8188ea827b32ef11de88377086d690286ab35747ef7f9bf3ccb590" + [[package]] name = "inout" version = "0.1.3" @@ -3133,6 +3558,15 @@ dependencies = [ "yaml-rust", ] +[[package]] +name = "instant" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" +dependencies = [ + "cfg-if 1.0.0", +] + [[package]] name = "ipnet" version = "2.9.0" @@ -3210,24 +3644,57 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "jsonrpsee" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9579d0ca9fb30da026bac2f0f7d9576ec93489aeb7cd4971dd5b4617d82c79b2" +dependencies = [ + "jsonrpsee-client-transport 0.21.0", + "jsonrpsee-core 0.21.0", + "jsonrpsee-http-client 0.21.0", + "jsonrpsee-types 0.21.0", +] + [[package]] name = "jsonrpsee" version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62b089779ad7f80768693755a031cc14a7766aba707cbe886674e3f79e9b7e47" dependencies = [ - "jsonrpsee-client-transport", - "jsonrpsee-core", - "jsonrpsee-http-client", + "jsonrpsee-client-transport 0.23.2", + "jsonrpsee-core 0.23.2", + "jsonrpsee-http-client 0.23.2", "jsonrpsee-proc-macros", "jsonrpsee-server", - "jsonrpsee-types", + "jsonrpsee-types 0.23.2", "jsonrpsee-wasm-client", "jsonrpsee-ws-client", "tokio", "tracing", ] +[[package]] +name = "jsonrpsee-client-transport" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f9f9ed46590a8d5681975f126e22531698211b926129a40a2db47cbca429220" +dependencies = [ + "futures-util", + "http 0.2.9", + "jsonrpsee-core 0.21.0", + "pin-project", + "rustls-native-certs 0.7.0", + "rustls-pki-types", + "soketto 0.7.1", + "thiserror", + "tokio", + "tokio-rustls 0.25.0", + "tokio-util", + "tracing", + "url", +] + [[package]] name = "jsonrpsee-client-transport" version = "0.23.2" @@ -3239,20 +3706,44 @@ dependencies = [ "futures-util", "gloo-net", "http 1.1.0", - "jsonrpsee-core", + "jsonrpsee-core 0.23.2", "pin-project", - "rustls", + "rustls 0.23.10", "rustls-pki-types", "rustls-platform-verifier", - "soketto", + "soketto 0.8.0", "thiserror", "tokio", - "tokio-rustls", + "tokio-rustls 0.26.0", "tokio-util", "tracing", "url", ] +[[package]] +name = "jsonrpsee-core" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "776d009e2f591b78c038e0d053a796f94575d66ca4e77dd84bfc5e81419e436c" +dependencies = [ + "anyhow", + "async-lock", + "async-trait", + "beef", + "futures-timer", + "futures-util", + "hyper 0.14.29", + "jsonrpsee-types 0.21.0", + "pin-project", + "rustc-hash", + "serde", + "serde_json", + "thiserror", + "tokio", + "tokio-stream", + "tracing", +] + [[package]] name = "jsonrpsee-core" version = "0.23.2" @@ -3268,7 +3759,7 @@ dependencies = [ "http 1.1.0", "http-body 1.0.0", "http-body-util", - "jsonrpsee-types", + "jsonrpsee-types 0.23.2", "parking_lot", "pin-project", "rand 0.8.5", @@ -3282,6 +3773,26 @@ dependencies = [ "wasm-bindgen-futures", ] +[[package]] +name = "jsonrpsee-http-client" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78b7de9f3219d95985eb77fd03194d7c1b56c19bce1abfcc9d07462574b15572" +dependencies = [ + "async-trait", + "hyper 0.14.29", + "hyper-rustls 0.24.2", + "jsonrpsee-core 0.21.0", + "jsonrpsee-types 0.21.0", + "serde", + "serde_json", + "thiserror", + "tokio", + "tower", + "tracing", + "url", +] + [[package]] name = "jsonrpsee-http-client" version = "0.23.2" @@ -3292,11 +3803,11 @@ dependencies = [ "base64 0.22.1", "http-body 1.0.0", "hyper 1.3.1", - "hyper-rustls", + "hyper-rustls 0.27.2", "hyper-util", - "jsonrpsee-core", - "jsonrpsee-types", - "rustls", + "jsonrpsee-core 0.23.2", + "jsonrpsee-types 0.23.2", + "rustls 0.23.10", "rustls-platform-verifier", "serde", "serde_json", @@ -3333,13 +3844,13 @@ dependencies = [ "http-body-util", "hyper 1.3.1", "hyper-util", - "jsonrpsee-core", - "jsonrpsee-types", + "jsonrpsee-core 0.23.2", + "jsonrpsee-types 0.23.2", "pin-project", "route-recognizer", "serde", "serde_json", - "soketto", + "soketto 0.8.0", "thiserror", "tokio", "tokio-stream", @@ -3348,6 +3859,19 @@ dependencies = [ "tracing", ] +[[package]] +name = "jsonrpsee-types" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3266dfb045c9174b24c77c2dfe0084914bb23a6b2597d70c9dc6018392e1cd1b" +dependencies = [ + "anyhow", + "beef", + "serde", + "serde_json", + "thiserror", +] + [[package]] name = "jsonrpsee-types" version = "0.23.2" @@ -3367,9 +3891,9 @@ version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4727ac037f834c6f04c0912cada7532dbddb54e92fbc64e33d6cb8c24af313c9" dependencies = [ - "jsonrpsee-client-transport", - "jsonrpsee-core", - "jsonrpsee-types", + "jsonrpsee-client-transport 0.23.2", + "jsonrpsee-core 0.23.2", + "jsonrpsee-types 0.23.2", ] [[package]] @@ -3379,9 +3903,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1c28759775f5cb2f1ea9667672d3fe2b0e701d1f4b7b67954e60afe7fd058b5e" dependencies = [ "http 1.1.0", - "jsonrpsee-client-transport", - "jsonrpsee-core", - "jsonrpsee-types", + "jsonrpsee-client-transport 0.23.2", + "jsonrpsee-core 0.23.2", + "jsonrpsee-types 0.23.2", "url", ] @@ -3494,6 +4018,54 @@ dependencies = [ "zstd-sys", ] +[[package]] +name = "libsecp256k1" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95b09eff1b35ed3b33b877ced3a691fc7a481919c7e29c53c906226fcf55e2a1" +dependencies = [ + "arrayref", + "base64 0.13.1", + "digest 0.9.0", + "hmac-drbg", + "libsecp256k1-core", + "libsecp256k1-gen-ecmult", + "libsecp256k1-gen-genmult", + "rand 0.8.5", + "serde", + "sha2 0.9.9", + "typenum", +] + +[[package]] +name = "libsecp256k1-core" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5be9b9bb642d8522a44d533eab56c16c738301965504753b03ad1de3425d5451" +dependencies = [ + "crunchy", + "digest 0.9.0", + "subtle", +] + +[[package]] +name = "libsecp256k1-gen-ecmult" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3038c808c55c87e8a172643a7d87187fc6c4174468159cb3090659d55bcb4809" +dependencies = [ + "libsecp256k1-core", +] + +[[package]] +name = "libsecp256k1-gen-genmult" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3db8d6ba2cec9eacc40e6e8ccc98931840301f1006e95647ceb2dd5c3aa06f7c" +dependencies = [ + "libsecp256k1-core", +] + [[package]] name = "libsqlite3-sys" version = "0.30.1" @@ -3535,7 +4107,7 @@ dependencies = [ "anyhow", "async-trait", "envy", - "futures 0.3.28", + "futures 0.3.30", "hex", "num", "once_cell", @@ -3613,6 +4185,9 @@ name = "lru" version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2994eeba8ed550fd9b47a0b38f0242bc3344e496483c6180b69139cc2fa5d1d7" +dependencies = [ + "hashbrown 0.14.5", +] [[package]] name = "lz4-sys" @@ -3700,6 +4275,18 @@ dependencies = [ "zksync_vlog", ] +[[package]] +name = "merlin" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58c38e2799fc0978b65dfff8023ec7843e2330bb462f19198840b34b6582397d" +dependencies = [ + "byteorder", + "keccak", + "rand_core 0.6.4", + "zeroize", +] + [[package]] name = "miette" version = "5.10.0" @@ -3845,6 +4432,12 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b93853da6d84c2e3c7d730d6473e8817692dd89be387eb01b94d7f108ecb5b8c" +[[package]] +name = "no-std-net" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43794a0ace135be66a25d3ae77d41b91615fb68ae937f904090203e81f755b65" + [[package]] name = "nodrop" version = "0.1.14" @@ -4289,9 +4882,9 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.6.5" +version = "3.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dec8a8073036902368c2cdc0387e85ff9a37054d7e7c98e592145e0c92cd4fb" +checksum = "306800abfa29c7f16596b5970a588435e3d5b3149683d00c12b699cc19f895ee" dependencies = [ "arrayvec 0.7.4", "bitvec", @@ -4303,11 +4896,11 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "3.6.5" +version = "3.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "312270ee71e1cd70289dacf597cab7b207aa107d2f28191c2ae45b2ece18a260" +checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" dependencies = [ - "proc-macro-crate 1.3.1", + "proc-macro-crate 3.1.0", "proc-macro2 1.0.86", "quote 1.0.36", "syn 1.0.109", @@ -4348,6 +4941,15 @@ version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" +[[package]] +name = "pbkdf2" +version = "0.12.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8ed6a7761f76e3b9f92dfb0a60a6a6477c61024b775147ff0973a02653abaf2" +dependencies = [ + "digest 0.10.7", +] + [[package]] name = "peeking_take_while" version = "0.1.2" @@ -4375,9 +4977,9 @@ dependencies = [ [[package]] name = "percent-encoding" -version = "2.3.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b2a4787296e9989611394c33f193f676704af1686e70b8f8033ab5ba9a35a94" +checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" @@ -4466,6 +5068,17 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "piper" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96c8c490f422ef9a4efd2cb5b42b76c8613d7e7dfc1caf667b8a3350a5acc066" +dependencies = [ + "atomic-waker", + "fastrand", + "futures-io", +] + [[package]] name = "pkcs1" version = "0.7.5" @@ -4531,6 +5144,21 @@ dependencies = [ "plotters-backend", ] +[[package]] +name = "polling" +version = "3.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc2790cd301dec6cd3b7a025e4815cf825724a51c98dccfe6a3e55f05ffb6511" +dependencies = [ + "cfg-if 1.0.0", + "concurrent-queue", + "hermit-abi 0.4.0", + "pin-project-lite", + "rustix", + "tracing", + "windows-sys 0.59.0", +] + [[package]] name = "poly1305" version = "0.8.0" @@ -4605,6 +5233,7 @@ dependencies = [ "impl-codec", "impl-rlp", "impl-serde", + "scale-info", "uint", ] @@ -5029,13 +5658,13 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.2" +version = "1.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "380b951a9c5e80ddfd6136919eef32310721aa4aacd4889a8d39124b026ab343" +checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.3", + "regex-automata 0.4.7", "regex-syntax 0.8.2", ] @@ -5050,9 +5679,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.3" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f804c7828047e88b2d32e2d7fe5a105da8ee3264f01902f796c8e067dc2483f" +checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" dependencies = [ "aho-corasick", "memchr", @@ -5135,7 +5764,7 @@ dependencies = [ "http-body 1.0.0", "http-body-util", "hyper 1.3.1", - "hyper-rustls", + "hyper-rustls 0.27.2", "hyper-tls 0.6.0", "hyper-util", "ipnet", @@ -5147,7 +5776,7 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", - "rustls-pemfile", + "rustls-pemfile 2.0.0", "serde", "serde_json", "serde_urlencoded", @@ -5208,7 +5837,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7743f17af12fa0b03b803ba12cd6a8d9483a587e89c69445e3909655c0b9fabb" dependencies = [ "crypto-bigint 0.4.9", - "hmac", + "hmac 0.12.1", "zeroize", ] @@ -5218,7 +5847,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8dd2a808d456c4a54e300a23e9f5a67e122c3024119acbfd73e3bf664491cb2" dependencies = [ - "hmac", + "hmac 0.12.1", "subtle", ] @@ -5367,6 +5996,32 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "rustls" +version = "0.21.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" +dependencies = [ + "log", + "ring", + "rustls-webpki 0.101.7", + "sct", +] + +[[package]] +name = "rustls" +version = "0.22.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf4ef73721ac7bcd79b2b315da7779d8fc09718c6b3d2d1b2d94850eb8c18432" +dependencies = [ + "log", + "ring", + "rustls-pki-types", + "rustls-webpki 0.102.4", + "subtle", + "zeroize", +] + [[package]] name = "rustls" version = "0.23.10" @@ -5378,11 +6033,23 @@ dependencies = [ "once_cell", "ring", "rustls-pki-types", - "rustls-webpki", + "rustls-webpki 0.102.4", "subtle", "zeroize", ] +[[package]] +name = "rustls-native-certs" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" +dependencies = [ + "openssl-probe", + "rustls-pemfile 1.0.4", + "schannel", + "security-framework", +] + [[package]] name = "rustls-native-certs" version = "0.7.0" @@ -5390,12 +6057,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f1fb85efa936c42c6d5fc28d2629bb51e4b2f4b8a5211e297d599cc5a093792" dependencies = [ "openssl-probe", - "rustls-pemfile", + "rustls-pemfile 2.0.0", "rustls-pki-types", "schannel", "security-framework", ] +[[package]] +name = "rustls-pemfile" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" +dependencies = [ + "base64 0.21.5", +] + [[package]] name = "rustls-pemfile" version = "2.0.0" @@ -5423,10 +6099,10 @@ dependencies = [ "jni", "log", "once_cell", - "rustls", - "rustls-native-certs", + "rustls 0.23.10", + "rustls-native-certs 0.7.0", "rustls-platform-verifier-android", - "rustls-webpki", + "rustls-webpki 0.102.4", "security-framework", "security-framework-sys", "webpki-roots", @@ -5439,6 +6115,16 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "84e217e7fdc8466b5b35d30f8c0a30febd29173df4a3a0c2115d306b9c4117ad" +[[package]] +name = "rustls-webpki" +version = "0.101.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" +dependencies = [ + "ring", + "untrusted", +] + [[package]] name = "rustls-webpki" version = "0.102.4" @@ -5457,6 +6143,17 @@ version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" +[[package]] +name = "ruzstd" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58c4eb8a81997cf040a091d1f7e1938aeab6749d3a0dfa73af43cdc32393483d" +dependencies = [ + "byteorder", + "derive_more 0.99.18", + "twox-hash", +] + [[package]] name = "ryu" version = "1.0.15" @@ -5472,6 +6169,132 @@ dependencies = [ "winapi-util", ] +[[package]] +name = "scale-bits" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "036575c29af9b6e4866ffb7fa055dbf623fe7a9cc159b33786de6013a6969d89" +dependencies = [ + "parity-scale-codec", + "scale-info", + "serde", +] + +[[package]] +name = "scale-decode" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7caaf753f8ed1ab4752c6afb20174f03598c664724e0e32628e161c21000ff76" +dependencies = [ + "derive_more 0.99.18", + "parity-scale-codec", + "primitive-types", + "scale-bits", + "scale-decode-derive", + "scale-info", + "smallvec", +] + +[[package]] +name = "scale-decode-derive" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3475108a1b62c7efd1b5c65974f30109a598b2f45f23c9ae030acb9686966db" +dependencies = [ + "darling 0.14.4", + "proc-macro-crate 1.3.1", + "proc-macro2 1.0.86", + "quote 1.0.36", + "syn 1.0.109", +] + +[[package]] +name = "scale-encode" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d70cb4b29360105483fac1ed567ff95d65224a14dd275b6303ed0a654c78de5" +dependencies = [ + "derive_more 0.99.18", + "parity-scale-codec", + "primitive-types", + "scale-bits", + "scale-encode-derive", + "scale-info", + "smallvec", +] + +[[package]] +name = "scale-encode-derive" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "995491f110efdc6bea96d6a746140e32bfceb4ea47510750a5467295a4707a25" +dependencies = [ + "darling 0.14.4", + "proc-macro-crate 1.3.1", + "proc-macro2 1.0.86", + "quote 1.0.36", + "syn 1.0.109", +] + +[[package]] +name = "scale-info" +version = "2.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eca070c12893629e2cc820a9761bedf6ce1dcddc9852984d1dc734b8bd9bd024" +dependencies = [ + "bitvec", + "cfg-if 1.0.0", + "derive_more 0.99.18", + "parity-scale-codec", + "scale-info-derive", + "serde", +] + +[[package]] +name = "scale-info-derive" +version = "2.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d35494501194174bda522a32605929eefc9ecf7e0a326c26db1fdd85881eb62" +dependencies = [ + "proc-macro-crate 3.1.0", + "proc-macro2 1.0.86", + "quote 1.0.36", + "syn 1.0.109", +] + +[[package]] +name = "scale-typegen" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "00860983481ac590ac87972062909bef0d6a658013b592ccc0f2feb272feab11" +dependencies = [ + "proc-macro2 1.0.86", + "quote 1.0.36", + "scale-info", + "syn 2.0.72", + "thiserror", +] + +[[package]] +name = "scale-value" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58223c7691bf0bd46b43c9aea6f0472d1067f378d574180232358d7c6e0a8089" +dependencies = [ + "base58", + "blake2 0.10.6", + "derive_more 0.99.18", + "either", + "frame-metadata 15.1.0", + "parity-scale-codec", + "scale-bits", + "scale-decode", + "scale-encode", + "scale-info", + "serde", + "yap", +] + [[package]] name = "schannel" version = "0.1.22" @@ -5481,12 +6304,41 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "schnorrkel" +version = "0.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8de18f6d8ba0aad7045f5feae07ec29899c1112584a38509a84ad7b04451eaa0" +dependencies = [ + "aead", + "arrayref", + "arrayvec 0.7.4", + "curve25519-dalek", + "getrandom_or_panic", + "merlin", + "rand_core 0.6.4", + "serde_bytes", + "sha2 0.10.8", + "subtle", + "zeroize", +] + [[package]] name = "scopeguard" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" +[[package]] +name = "sct" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" +dependencies = [ + "ring", + "untrusted", +] + [[package]] name = "seahash" version = "4.1.0" @@ -5721,6 +6573,15 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_bytes" +version = "0.11.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "387cc504cb06bb40a96c8e04e951fe01854cf6bc921053c954e4a606d9675c6a" +dependencies = [ + "serde", +] + [[package]] name = "serde_derive" version = "1.0.208" @@ -5734,11 +6595,12 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.107" +version = "1.0.128" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b420ce6e3d8bd882e9b243c6eed35dbc9a6110c9769e74b584e0d68d1f20c65" +checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" dependencies = [ "itoa", + "memchr", "ryu", "serde", ] @@ -5783,7 +6645,7 @@ version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" dependencies = [ - "darling", + "darling 0.13.4", "proc-macro2 1.0.86", "quote 1.0.36", "syn 1.0.109", @@ -5802,6 +6664,19 @@ dependencies = [ "unsafe-libyaml", ] +[[package]] +name = "sha-1" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6" +dependencies = [ + "block-buffer 0.9.0", + "cfg-if 1.0.0", + "cpufeatures", + "digest 0.9.0", + "opaque-debug", +] + [[package]] name = "sha1" version = "0.10.6" @@ -5948,6 +6823,12 @@ dependencies = [ "time", ] +[[package]] +name = "siphasher" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d" + [[package]] name = "sized-chunks" version = "0.6.5" @@ -5974,21 +6855,129 @@ dependencies = [ ] [[package]] -name = "slab" -version = "0.4.9" +name = "slab" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" +dependencies = [ + "autocfg", +] + +[[package]] +name = "smallvec" +version = "1.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6ecd384b10a64542d77071bd64bd7b231f4ed5940fba55e98c3de13824cf3d7" +dependencies = [ + "serde", +] + +[[package]] +name = "smol" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a33bd3e260892199c3ccfc487c88b2da2265080acb316cd920da72fdfd7c599f" +dependencies = [ + "async-channel", + "async-executor", + "async-fs", + "async-io", + "async-lock", + "async-net", + "async-process", + "blocking", + "futures-lite", +] + +[[package]] +name = "smoldot" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" +checksum = "e6d1eaa97d77be4d026a1e7ffad1bb3b78448763b357ea6f8188d3e6f736a9b9" dependencies = [ - "autocfg", + "arrayvec 0.7.4", + "async-lock", + "atomic-take", + "base64 0.21.5", + "bip39", + "blake2-rfc", + "bs58", + "chacha20", + "crossbeam-queue 0.3.11", + "derive_more 0.99.18", + "ed25519-zebra", + "either", + "event-listener 4.0.3", + "fnv", + "futures-lite", + "futures-util", + "hashbrown 0.14.5", + "hex", + "hmac 0.12.1", + "itertools 0.12.0", + "libm", + "libsecp256k1", + "merlin", + "no-std-net", + "nom", + "num-bigint 0.4.6", + "num-rational", + "num-traits", + "pbkdf2", + "pin-project", + "poly1305", + "rand 0.8.5", + "rand_chacha", + "ruzstd", + "schnorrkel", + "serde", + "serde_json", + "sha2 0.10.8", + "sha3 0.10.8", + "siphasher", + "slab", + "smallvec", + "soketto 0.7.1", + "twox-hash", + "wasmi", + "x25519-dalek", + "zeroize", ] [[package]] -name = "smallvec" -version = "1.13.1" +name = "smoldot-light" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6ecd384b10a64542d77071bd64bd7b231f4ed5940fba55e98c3de13824cf3d7" +checksum = "5496f2d116b7019a526b1039ec2247dd172b8670633b1a64a614c9ea12c9d8c7" dependencies = [ + "async-channel", + "async-lock", + "base64 0.21.5", + "blake2-rfc", + "derive_more 0.99.18", + "either", + "event-listener 4.0.3", + "fnv", + "futures-channel", + "futures-lite", + "futures-util", + "hashbrown 0.14.5", + "hex", + "itertools 0.12.0", + "log", + "lru", + "no-std-net", + "parking_lot", + "pin-project", + "rand 0.8.5", + "rand_chacha", "serde", + "serde_json", + "siphasher", + "slab", + "smol", + "smoldot", + "zeroize", ] [[package]] @@ -5996,7 +6985,7 @@ name = "snapshots_creator" version = "0.1.0" dependencies = [ "anyhow", - "futures 0.3.28", + "futures 0.3.30", "rand 0.8.5", "structopt", "tokio", @@ -6037,6 +7026,21 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "soketto" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d1c5305e39e09653383c2c7244f2f78b3bcae37cf50c64cb4789c9f5096ec2" +dependencies = [ + "base64 0.13.1", + "bytes", + "futures 0.3.30", + "httparse", + "log", + "rand 0.8.5", + "sha-1", +] + [[package]] name = "soketto" version = "0.8.0" @@ -6045,7 +7049,7 @@ checksum = "37468c595637c10857701c990f93a40ce0e357cedb0953d1c26c8d8027f9bb53" dependencies = [ "base64 0.22.1", "bytes", - "futures 0.3.28", + "futures 0.3.30", "http 1.1.0", "httparse", "log", @@ -6053,6 +7057,20 @@ dependencies = [ "sha1", ] +[[package]] +name = "sp-core-hashing" +version = "15.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e0f4990add7b2cefdeca883c0efa99bb4d912cb2196120e1500c0cc099553b0" +dependencies = [ + "blake2b_simd", + "byteorder", + "digest 0.10.7", + "sha2 0.10.8", + "sha3 0.10.8", + "twox-hash", +] + [[package]] name = "spin" version = "0.9.8" @@ -6126,7 +7144,7 @@ dependencies = [ "crc", "crossbeam-queue 0.3.11", "either", - "event-listener", + "event-listener 5.3.1", "futures-channel", "futures-core", "futures-intrusive", @@ -6219,7 +7237,7 @@ dependencies = [ "generic-array", "hex", "hkdf", - "hmac", + "hmac 0.12.1", "itoa", "log", "md-5", @@ -6261,7 +7279,7 @@ dependencies = [ "futures-util", "hex", "hkdf", - "hmac", + "hmac 0.12.1", "home", "ipnetwork", "itoa", @@ -6342,6 +7360,12 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" +[[package]] +name = "strsim" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" + [[package]] name = "structopt" version = "0.3.26" @@ -6394,6 +7418,129 @@ version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" +[[package]] +name = "subxt" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b3323d5c27898b139d043dc1ee971f602f937b99354ee33ee933bd90e0009fbd" +dependencies = [ + "async-trait", + "base58", + "blake2 0.10.6", + "derivative", + "either", + "frame-metadata 16.0.0", + "futures 0.3.30", + "hex", + "impl-serde", + "instant", + "jsonrpsee 0.21.0", + "parity-scale-codec", + "primitive-types", + "scale-bits", + "scale-decode", + "scale-encode", + "scale-info", + "scale-value", + "serde", + "serde_json", + "sp-core-hashing", + "subxt-lightclient", + "subxt-macro", + "subxt-metadata", + "thiserror", + "tokio-util", + "tracing", + "url", +] + +[[package]] +name = "subxt-codegen" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d0e58c3f88651cff26aa52bae0a0a85f806a2e923a20eb438c16474990743ea" +dependencies = [ + "frame-metadata 16.0.0", + "heck 0.4.1", + "hex", + "jsonrpsee 0.21.0", + "parity-scale-codec", + "proc-macro2 1.0.86", + "quote 1.0.36", + "scale-info", + "scale-typegen", + "subxt-metadata", + "syn 2.0.72", + "thiserror", + "tokio", +] + +[[package]] +name = "subxt-lightclient" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecec7066ba7bc0c3608fcd1d0c7d9584390990cd06095b6ae4f114f74c4b8550" +dependencies = [ + "futures 0.3.30", + "futures-util", + "serde", + "serde_json", + "smoldot-light", + "thiserror", + "tokio", + "tokio-stream", + "tracing", +] + +[[package]] +name = "subxt-macro" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "365251668613323064803427af8c7c7bc366cd8b28e33639640757669dafebd5" +dependencies = [ + "darling 0.20.10", + "parity-scale-codec", + "proc-macro-error", + "quote 1.0.36", + "scale-typegen", + "subxt-codegen", + "syn 2.0.72", +] + +[[package]] +name = "subxt-metadata" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c02aca8d39a1f6c55fff3a8fd81557d30a610fedc1cef03f889a81bc0f8f0b52" +dependencies = [ + "frame-metadata 16.0.0", + "parity-scale-codec", + "scale-info", + "sp-core-hashing", + "thiserror", +] + +[[package]] +name = "subxt-signer" +version = "0.34.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f88a76a5d114bfae2f6f9cc1491c46173ecc3fb2b9e53948eb3c8d43d4b43ab5" +dependencies = [ + "bip39", + "hex", + "hmac 0.12.1", + "parity-scale-codec", + "pbkdf2", + "regex", + "schnorrkel", + "secrecy", + "sha2 0.10.8", + "sp-core-hashing", + "subxt", + "thiserror", + "zeroize", +] + [[package]] name = "syn" version = "0.15.44" @@ -6577,18 +7724,18 @@ checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" [[package]] name = "thiserror" -version = "1.0.50" +version = "1.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9a7210f5c9a7156bb50aa36aed4c95afb51df0df00713949448cf9e97d382d2" +checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.50" +version = "1.0.63" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" +checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.36", @@ -6718,7 +7865,7 @@ dependencies = [ "pin-project-lite", "thiserror", "tokio", - "tokio-rustls", + "tokio-rustls 0.26.0", ] [[package]] @@ -6760,13 +7907,34 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-rustls" +version = "0.24.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" +dependencies = [ + "rustls 0.21.12", + "tokio", +] + +[[package]] +name = "tokio-rustls" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" +dependencies = [ + "rustls 0.22.4", + "rustls-pki-types", + "tokio", +] + [[package]] name = "tokio-rustls" version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls", + "rustls 0.23.10", "rustls-pki-types", "tokio", ] @@ -6785,9 +7953,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.9" +version = "0.7.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d68074620f57a0b21594d9735eb2e98ab38b17f80d3fcb189fca266771ca60d" +checksum = "61e7c3654c13bcd040d4a03abee2c75b1d14a37b423cf5a813ceae1cc903ec6a" dependencies = [ "bytes", "futures-core", @@ -6795,7 +7963,6 @@ dependencies = [ "futures-sink", "pin-project-lite", "tokio", - "tracing", ] [[package]] @@ -7048,6 +8215,17 @@ dependencies = [ "termcolor", ] +[[package]] +name = "twox-hash" +version = "1.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" +dependencies = [ + "cfg-if 1.0.0", + "digest 0.10.7", + "static_assertions", +] + [[package]] name = "typenum" version = "1.17.0" @@ -7188,9 +8366,9 @@ dependencies = [ [[package]] name = "url" -version = "2.4.1" +version = "2.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "143b538f18257fac9cad154828a57c6bf5157e1aa604d4816b5995bf6de87ae5" +checksum = "22784dbdf76fdde8af1aeda5622b546b422b6fc585325248a2bf9f5e41e94d6c" dependencies = [ "form_urlencoded", "idna", @@ -7440,6 +8618,46 @@ dependencies = [ "web-sys", ] +[[package]] +name = "wasmi" +version = "0.31.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77a8281d1d660cdf54c76a3efa9ddd0c270cada1383a995db3ccb43d166456c7" +dependencies = [ + "smallvec", + "spin", + "wasmi_arena", + "wasmi_core", + "wasmparser-nostd", +] + +[[package]] +name = "wasmi_arena" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "104a7f73be44570cac297b3035d76b169d6599637631cf37a1703326a0727073" + +[[package]] +name = "wasmi_core" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcf1a7db34bff95b85c261002720c00c3a6168256dcb93041d3fa2054d19856a" +dependencies = [ + "downcast-rs", + "libm", + "num-traits", + "paste", +] + +[[package]] +name = "wasmparser-nostd" +version = "0.100.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d5a015fe95f3504a94bb1462c717aae75253e39b9dd6c3fb1062c934535c64aa" +dependencies = [ + "indexmap-nostd", +] + [[package]] name = "web-sys" version = "0.3.64" @@ -7558,6 +8776,15 @@ dependencies = [ "windows-targets 0.52.6", ] +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", +] + [[package]] name = "windows-targets" version = "0.42.2" @@ -7774,6 +9001,18 @@ dependencies = [ "tap", ] +[[package]] +name = "x25519-dalek" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7e468321c81fb07fa7f4c636c3972b9100f0346e5b6a9f2bd0603a52f7ed277" +dependencies = [ + "curve25519-dalek", + "rand_core 0.6.4", + "serde", + "zeroize", +] + [[package]] name = "yaml-rust" version = "0.4.5" @@ -7789,6 +9028,12 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" +[[package]] +name = "yap" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff4524214bc4629eba08d78ceb1d6507070cc0bcbbed23af74e19e6e924a24cf" + [[package]] name = "zerocopy" version = "0.7.31" @@ -8121,7 +9366,7 @@ dependencies = [ "anyhow", "assert_matches", "async-trait", - "futures 0.3.28", + "futures 0.3.30", "serde", "tempfile", "test-casing", @@ -8160,7 +9405,7 @@ dependencies = [ "circuit_sequencer_api 0.140.0", "circuit_sequencer_api 0.141.1", "circuit_sequencer_api 0.150.4", - "futures 0.3.28", + "futures 0.3.30", "itertools 0.10.5", "num_cpus", "rand 0.8.5", @@ -8314,7 +9559,7 @@ dependencies = [ "thiserror", "tls-listener", "tokio", - "tokio-rustls", + "tokio-rustls 0.26.0", "tracing", "vise", "zksync_concurrency", @@ -8428,7 +9673,7 @@ version = "0.1.0" dependencies = [ "anyhow", "ctrlc", - "futures 0.3.28", + "futures 0.3.30", "structopt", "tokio", "tracing", @@ -8521,13 +9766,41 @@ dependencies = [ "serde", ] +[[package]] +name = "zksync_da_clients" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-trait", + "base58", + "blake2 0.10.6", + "blake2b_simd", + "flate2", + "futures 0.3.30", + "hex", + "jsonrpsee 0.23.2", + "parity-scale-codec", + "scale-encode", + "serde", + "serde_json", + "subxt-metadata", + "subxt-signer", + "tokio", + "tracing", + "zksync_config", + "zksync_da_client", + "zksync_env_config", + "zksync_object_store", + "zksync_types", +] + [[package]] name = "zksync_da_dispatcher" version = "0.1.0" dependencies = [ "anyhow", "chrono", - "futures 0.3.28", + "futures 0.3.30", "rand 0.8.5", "tokio", "tracing", @@ -8591,23 +9864,6 @@ dependencies = [ "zksync_basic_types", ] -[[package]] -name = "zksync_default_da_clients" -version = "0.1.0" -dependencies = [ - "anyhow", - "async-trait", - "flate2", - "serde", - "tracing", - "zksync_config", - "zksync_da_client", - "zksync_env_config", - "zksync_node_framework", - "zksync_object_store", - "zksync_types", -] - [[package]] name = "zksync_env_config" version = "0.1.0" @@ -8627,7 +9883,7 @@ dependencies = [ "assert_matches", "async-trait", "hex", - "jsonrpsee", + "jsonrpsee 0.23.2", "pretty_assertions", "rlp", "serde_json", @@ -8710,7 +9966,7 @@ dependencies = [ "async-trait", "clap 4.4.6", "envy", - "futures 0.3.28", + "futures 0.3.30", "rustc_version", "serde", "serde_json", @@ -8799,7 +10055,7 @@ version = "0.1.0" dependencies = [ "assert_matches", "async-trait", - "futures 0.3.28", + "futures 0.3.30", "serde", "serde_json", "thiserror", @@ -8915,7 +10171,7 @@ dependencies = [ "assert_matches", "async-trait", "axum", - "futures 0.3.28", + "futures 0.3.30", "itertools 0.10.5", "once_cell", "reqwest 0.12.5", @@ -8996,7 +10252,7 @@ dependencies = [ "async-trait", "axum", "chrono", - "futures 0.3.28", + "futures 0.3.30", "governor", "hex", "http 1.1.0", @@ -9132,7 +10388,7 @@ dependencies = [ "assert_matches", "async-trait", "ctrlc", - "futures 0.3.28", + "futures 0.3.30", "pin-project-lite", "semver", "thiserror", @@ -9149,6 +10405,7 @@ dependencies = [ "zksync_contract_verification_server", "zksync_contracts", "zksync_da_client", + "zksync_da_clients", "zksync_da_dispatcher", "zksync_dal", "zksync_db_connection", @@ -9244,7 +10501,7 @@ dependencies = [ "assert_matches", "async-trait", "chrono", - "futures 0.3.28", + "futures 0.3.30", "once_cell", "serde", "serde_json", @@ -9445,7 +10702,7 @@ version = "0.1.0" dependencies = [ "anyhow", "clap 4.4.6", - "futures 0.3.28", + "futures 0.3.30", "serde_json", "tikv-jemallocator", "tokio", @@ -9456,7 +10713,7 @@ dependencies = [ "zksync_consensus_executor", "zksync_consensus_roles", "zksync_core_leftovers", - "zksync_default_da_clients", + "zksync_da_clients", "zksync_env_config", "zksync_eth_client", "zksync_metadata_calculator", @@ -9488,7 +10745,7 @@ dependencies = [ "anyhow", "assert_matches", "async-trait", - "futures 0.3.28", + "futures 0.3.30", "serde", "test-casing", "thiserror", @@ -9554,7 +10811,7 @@ dependencies = [ "anyhow", "assert_matches", "async-trait", - "futures 0.3.28", + "futures 0.3.30", "hex", "itertools 0.10.5", "once_cell", @@ -9693,7 +10950,7 @@ dependencies = [ "bincode", "blake2 0.10.6", "chrono", - "derive_more", + "derive_more 1.0.0-beta.6", "hex", "itertools 0.10.5", "num", @@ -9727,7 +10984,7 @@ dependencies = [ "assert_matches", "bigdecimal", "bincode", - "futures 0.3.28", + "futures 0.3.30", "hex", "num", "once_cell", @@ -9811,7 +11068,7 @@ dependencies = [ "async-trait", "backon", "dashmap", - "futures 0.3.28", + "futures 0.3.30", "once_cell", "rand 0.8.5", "serde", @@ -9843,12 +11100,12 @@ dependencies = [ "anyhow", "assert_matches", "async-trait", - "futures 0.3.28", - "jsonrpsee", + "futures 0.3.30", + "jsonrpsee 0.23.2", "pin-project-lite", "rand 0.8.5", "rlp", - "rustls", + "rustls 0.23.10", "serde", "serde_json", "test-casing", diff --git a/Cargo.toml b/Cargo.toml index 075f5007be4c..84e8df61f096 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -38,6 +38,7 @@ members = [ "core/node/base_token_adjuster", "core/node/external_proof_integration_api", "core/node/logs_bloom_backfill", + "core/node/da_clients", # Libraries "core/lib/db_connection", "core/lib/zksync_core_leftovers", @@ -50,7 +51,6 @@ members = [ "core/lib/dal", "core/lib/env_config", "core/lib/da_client", - "core/lib/default_da_clients", "core/lib/eth_client", "core/lib/eth_signer", "core/lib/l1_contract_interface", @@ -196,6 +196,15 @@ trybuild = "1.0" vise = "0.2.0" vise-exporter = "0.2.0" +# DA clients' dependencies +# Avail +base58 = "0.2.0" +scale-encode = "0.5.0" +blake2b_simd = "1.0.2" +subxt-metadata = "0.34.0" +parity-scale-codec = { version = "3.6.9", default-features = false } +subxt-signer = { version = "0.34", default-features = false } + # Here and below: # We *always* pin the latest version of protocol to disallow accidental changes in the execution logic. # However, for the historical version of protocol crates, we have lax requirements. Otherwise, @@ -245,7 +254,6 @@ zksync_db_connection = { version = "0.1.0", path = "core/lib/db_connection" } zksync_env_config = { version = "0.1.0", path = "core/lib/env_config" } zksync_eth_client = { version = "0.1.0", path = "core/lib/eth_client" } zksync_da_client = { version = "0.1.0", path = "core/lib/da_client" } -zksync_default_da_clients = { version = "0.1.0", path = "core/lib/default_da_clients" } zksync_eth_signer = { version = "0.1.0", path = "core/lib/eth_signer" } zksync_health_check = { version = "0.1.0", path = "core/lib/health_check" } zksync_l1_contract_interface = { version = "0.1.0", path = "core/lib/l1_contract_interface" } @@ -279,6 +287,7 @@ zksync_commitment_generator = { version = "0.1.0", path = "core/node/commitment_ zksync_house_keeper = { version = "0.1.0", path = "core/node/house_keeper" } zksync_node_genesis = { version = "0.1.0", path = "core/node/genesis" } zksync_da_dispatcher = { version = "0.1.0", path = "core/node/da_dispatcher" } +zksync_da_clients = { version = "0.1.0", path = "core/node/da_clients" } zksync_eth_sender = { version = "0.1.0", path = "core/node/eth_sender" } zksync_node_db_pruner = { version = "0.1.0", path = "core/node/db_pruner" } zksync_node_fee_model = { version = "0.1.0", path = "core/node/fee_model" } diff --git a/core/bin/zksync_server/Cargo.toml b/core/bin/zksync_server/Cargo.toml index 72eff1384e2d..031183924064 100644 --- a/core/bin/zksync_server/Cargo.toml +++ b/core/bin/zksync_server/Cargo.toml @@ -21,7 +21,7 @@ zksync_utils.workspace = true zksync_types.workspace = true zksync_core_leftovers.workspace = true zksync_node_genesis.workspace = true -zksync_default_da_clients.workspace = true +zksync_da_clients.workspace = true # Consensus dependenices zksync_consensus_crypto.workspace = true diff --git a/core/bin/zksync_server/src/main.rs b/core/bin/zksync_server/src/main.rs index 7e0ff0e49201..84898d6da067 100644 --- a/core/bin/zksync_server/src/main.rs +++ b/core/bin/zksync_server/src/main.rs @@ -17,9 +17,9 @@ use zksync_config::{ L1Secrets, ObservabilityConfig, PrometheusConfig, ProofDataHandlerConfig, ProtectiveReadsWriterConfig, Secrets, }, - ApiConfig, BaseTokenAdjusterConfig, ContractVerifierConfig, DADispatcherConfig, DBConfig, - EthConfig, EthWatchConfig, ExternalProofIntegrationApiConfig, GasAdjusterConfig, GenesisConfig, - ObjectStoreConfig, PostgresConfig, SnapshotsCreatorConfig, + ApiConfig, BaseTokenAdjusterConfig, ContractVerifierConfig, DAClientConfig, DADispatcherConfig, + DBConfig, EthConfig, EthWatchConfig, ExternalProofIntegrationApiConfig, GasAdjusterConfig, + GenesisConfig, ObjectStoreConfig, PostgresConfig, SnapshotsCreatorConfig, }; use zksync_core_leftovers::{ temp_config_store::{decode_yaml_repr, TempConfigStore}, @@ -199,6 +199,7 @@ fn load_env_config() -> anyhow::Result { gas_adjuster_config: GasAdjusterConfig::from_env().ok(), observability: ObservabilityConfig::from_env().ok(), snapshot_creator: SnapshotsCreatorConfig::from_env().ok(), + da_client_config: DAClientConfig::from_env().ok(), da_dispatcher_config: DADispatcherConfig::from_env().ok(), protective_reads_writer_config: ProtectiveReadsWriterConfig::from_env().ok(), basic_witness_input_producer_config: BasicWitnessInputProducerConfig::from_env().ok(), diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs index e2a0c5846b5d..069a7a799ab5 100644 --- a/core/bin/zksync_server/src/node_builder.rs +++ b/core/bin/zksync_server/src/node_builder.rs @@ -3,14 +3,13 @@ use anyhow::Context; use zksync_config::{ - configs::{eth_sender::PubdataSendingMode, wallets::Wallets, GeneralConfig, Secrets}, + configs::{ + da_client::DAClient, eth_sender::PubdataSendingMode, wallets::Wallets, GeneralConfig, + Secrets, + }, ContractsConfig, GenesisConfig, }; use zksync_core_leftovers::Component; -use zksync_default_da_clients::{ - no_da::wiring_layer::NoDAClientWiringLayer, - object_store::{config::DAObjectStoreConfig, wiring_layer::ObjectStorageClientWiringLayer}, -}; use zksync_metadata_calculator::MetadataCalculatorConfig; use zksync_node_api_server::{ tx_sender::{ApiContracts, TxSenderConfig}, @@ -28,6 +27,10 @@ use zksync_node_framework::{ commitment_generator::CommitmentGeneratorLayer, consensus::MainNodeConsensusLayer, contract_verification_api::ContractVerificationApiLayer, + da_clients::{ + avail::AvailWiringLayer, no_da::NoDAClientWiringLayer, + object_store::ObjectStorageClientWiringLayer, + }, da_dispatcher::DataAvailabilityDispatcherLayer, eth_sender::{EthTxAggregatorLayer, EthTxManagerLayer}, eth_watch::EthWatchLayer, @@ -500,16 +503,23 @@ impl MainNodeBuilder { Ok(self) } - fn add_no_da_client_layer(mut self) -> anyhow::Result { - self.node.add_layer(NoDAClientWiringLayer); - Ok(self) - } + fn add_da_client_layer(mut self) -> anyhow::Result { + let Some(da_client_config) = self.configs.da_client_config.clone() else { + tracing::warn!("No config for DA client, using the NoDA client"); + self.node.add_layer(NoDAClientWiringLayer); + return Ok(self); + }; + + match da_client_config.client { + DAClient::Avail(config) => { + self.node.add_layer(AvailWiringLayer::new(config)); + } + DAClient::ObjectStore(config) => { + self.node + .add_layer(ObjectStorageClientWiringLayer::new(config)); + } + } - #[allow(dead_code)] - fn add_object_storage_da_client_layer(mut self) -> anyhow::Result { - let object_store_config = DAObjectStoreConfig::from_env()?; - self.node - .add_layer(ObjectStorageClientWiringLayer::new(object_store_config.0)); Ok(self) } @@ -750,7 +760,7 @@ impl MainNodeBuilder { self = self.add_commitment_generator_layer()?; } Component::DADispatcher => { - self = self.add_no_da_client_layer()?.add_da_dispatcher_layer()?; + self = self.add_da_client_layer()?.add_da_dispatcher_layer()?; } Component::VmRunnerProtectiveReads => { self = self.add_vm_runner_protective_reads_layer()?; diff --git a/core/lib/config/src/configs/da_client/avail.rs b/core/lib/config/src/configs/da_client/avail.rs new file mode 100644 index 000000000000..e8d119787912 --- /dev/null +++ b/core/lib/config/src/configs/da_client/avail.rs @@ -0,0 +1,11 @@ +use serde::Deserialize; + +#[derive(Clone, Debug, PartialEq, Deserialize)] +pub struct AvailConfig { + pub api_node_url: String, + pub bridge_api_url: String, + pub seed: String, + pub app_id: u32, + pub timeout: usize, + pub max_retries: usize, +} diff --git a/core/lib/config/src/configs/da_client/mod.rs b/core/lib/config/src/configs/da_client/mod.rs new file mode 100644 index 000000000000..38337438c10e --- /dev/null +++ b/core/lib/config/src/configs/da_client/mod.rs @@ -0,0 +1,20 @@ +use serde::Deserialize; + +use crate::{AvailConfig, ObjectStoreConfig}; + +pub mod avail; + +pub const AVAIL_CLIENT_CONFIG_NAME: &str = "Avail"; +pub const OBJECT_STORE_CLIENT_CONFIG_NAME: &str = "ObjectStore"; + +#[derive(Debug, Clone, PartialEq)] +pub struct DAClientConfig { + pub client: DAClient, +} + +#[derive(Debug, Clone, PartialEq, Deserialize)] +#[serde(tag = "client")] +pub enum DAClient { + Avail(AvailConfig), + ObjectStore(ObjectStoreConfig), +} diff --git a/core/lib/config/src/configs/general.rs b/core/lib/config/src/configs/general.rs index 38ffd3d45fac..bb733510f77d 100644 --- a/core/lib/config/src/configs/general.rs +++ b/core/lib/config/src/configs/general.rs @@ -3,6 +3,7 @@ use crate::{ base_token_adjuster::BaseTokenAdjusterConfig, chain::{CircuitBreakerConfig, MempoolConfig, OperationsManagerConfig, StateKeeperConfig}, consensus::ConsensusConfig, + da_client::DAClientConfig, da_dispatcher::DADispatcherConfig, fri_prover_group::FriProverGroupConfig, house_keeper::HouseKeeperConfig, @@ -41,6 +42,7 @@ pub struct GeneralConfig { pub eth: Option, pub snapshot_creator: Option, pub observability: Option, + pub da_client_config: Option, pub da_dispatcher_config: Option, pub protective_reads_writer_config: Option, pub basic_witness_input_producer_config: Option, diff --git a/core/lib/config/src/configs/mod.rs b/core/lib/config/src/configs/mod.rs index b213060f7ced..1ad503e0687f 100644 --- a/core/lib/config/src/configs/mod.rs +++ b/core/lib/config/src/configs/mod.rs @@ -5,6 +5,7 @@ pub use self::{ commitment_generator::CommitmentGeneratorConfig, contract_verifier::ContractVerifierConfig, contracts::{ContractsConfig, EcosystemContracts}, + da_client::{avail::AvailConfig, DAClientConfig}, da_dispatcher::DADispatcherConfig, database::{DBConfig, PostgresConfig}, eth_sender::{EthConfig, GasAdjusterConfig}, @@ -38,6 +39,7 @@ mod commitment_generator; pub mod consensus; pub mod contract_verifier; pub mod contracts; +pub mod da_client; pub mod da_dispatcher; pub mod database; pub mod en_config; diff --git a/core/lib/config/src/lib.rs b/core/lib/config/src/lib.rs index ae8288fa72ea..9191edc39822 100644 --- a/core/lib/config/src/lib.rs +++ b/core/lib/config/src/lib.rs @@ -1,9 +1,10 @@ #![allow(clippy::upper_case_acronyms, clippy::derive_partial_eq_without_eq)] pub use crate::configs::{ - ApiConfig, BaseTokenAdjusterConfig, ContractVerifierConfig, ContractsConfig, - DADispatcherConfig, DBConfig, EthConfig, EthWatchConfig, ExternalProofIntegrationApiConfig, - GasAdjusterConfig, GenesisConfig, ObjectStoreConfig, PostgresConfig, SnapshotsCreatorConfig, + ApiConfig, AvailConfig, BaseTokenAdjusterConfig, ContractVerifierConfig, ContractsConfig, + DAClientConfig, DADispatcherConfig, DBConfig, EthConfig, EthWatchConfig, + ExternalProofIntegrationApiConfig, GasAdjusterConfig, GenesisConfig, ObjectStoreConfig, + PostgresConfig, SnapshotsCreatorConfig, }; pub mod configs; diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index bc3b6025b15a..4a2858b9cbfc 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -12,8 +12,12 @@ use zksync_basic_types::{ use zksync_consensus_utils::EncodeDist; use zksync_crypto_primitives::K256PrivateKey; -use crate::configs::{ - self, eth_sender::PubdataSendingMode, external_price_api_client::ForcedPriceClientConfig, +use crate::{ + configs::{ + self, da_client::DAClient::Avail, eth_sender::PubdataSendingMode, + external_price_api_client::ForcedPriceClientConfig, + }, + AvailConfig, }; trait Sample { @@ -922,6 +926,21 @@ impl Distribution for EncodeDist { } } +impl Distribution for EncodeDist { + fn sample(&self, rng: &mut R) -> configs::da_client::DAClientConfig { + configs::da_client::DAClientConfig { + client: Avail(AvailConfig { + api_node_url: self.sample(rng), + bridge_api_url: self.sample(rng), + seed: self.sample(rng), + app_id: self.sample(rng), + timeout: self.sample(rng), + max_retries: self.sample(rng), + }), + } + } +} + impl Distribution for EncodeDist { fn sample(&self, rng: &mut R) -> configs::da_dispatcher::DADispatcherConfig { configs::da_dispatcher::DADispatcherConfig { @@ -1121,6 +1140,7 @@ impl Distribution for EncodeDist { eth: self.sample(rng), snapshot_creator: self.sample(rng), observability: self.sample(rng), + da_client_config: self.sample(rng), da_dispatcher_config: self.sample(rng), protective_reads_writer_config: self.sample(rng), basic_witness_input_producer_config: self.sample(rng), diff --git a/core/lib/da_client/src/types.rs b/core/lib/da_client/src/types.rs index 2b15cbe905ed..e7e4453d727e 100644 --- a/core/lib/da_client/src/types.rs +++ b/core/lib/da_client/src/types.rs @@ -35,6 +35,12 @@ pub struct DispatchResponse { pub blob_id: String, } +impl From for DispatchResponse { + fn from(blob_id: String) -> Self { + DispatchResponse { blob_id } + } +} + /// `InclusionData` is the data needed to verify on L1 that a blob is included in the DA layer. #[derive(Default, Serialize)] pub struct InclusionData { diff --git a/core/lib/default_da_clients/README.md b/core/lib/default_da_clients/README.md deleted file mode 100644 index 17ced715b268..000000000000 --- a/core/lib/default_da_clients/README.md +++ /dev/null @@ -1,11 +0,0 @@ -# Default DA Clients - -This crate contains the default implementations of the Data Availability clients. Default clients are maintained within -this repo because they are tightly coupled with the codebase, and would cause the circular dependency if they were to be -moved to the [hyperchain-da](https://github.com/matter-labs/hyperchain-da) repository. - -Currently, the following DataAvailability clients are implemented: - -- `NoDA client` that does not send or store any pubdata, it is needed to run the zkSync network in the "no-DA" mode - utilizing the DA framework. -- `Object Store client` that stores the pubdata in the Object Store(GCS). diff --git a/core/lib/default_da_clients/src/no_da/mod.rs b/core/lib/default_da_clients/src/no_da/mod.rs deleted file mode 100644 index 814cf30c2cbd..000000000000 --- a/core/lib/default_da_clients/src/no_da/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -pub mod client; -pub mod wiring_layer; diff --git a/core/lib/default_da_clients/src/object_store/config.rs b/core/lib/default_da_clients/src/object_store/config.rs deleted file mode 100644 index 285c39827c79..000000000000 --- a/core/lib/default_da_clients/src/object_store/config.rs +++ /dev/null @@ -1,12 +0,0 @@ -use zksync_config::ObjectStoreConfig; -use zksync_env_config::envy_load; - -#[derive(Debug)] -pub struct DAObjectStoreConfig(pub ObjectStoreConfig); - -impl DAObjectStoreConfig { - pub fn from_env() -> anyhow::Result { - let config = envy_load("object_store", "DA_CLIENT_OBJECT_STORE_")?; - Ok(Self(config)) - } -} diff --git a/core/lib/default_da_clients/src/object_store/mod.rs b/core/lib/default_da_clients/src/object_store/mod.rs deleted file mode 100644 index 1600941b0572..000000000000 --- a/core/lib/default_da_clients/src/object_store/mod.rs +++ /dev/null @@ -1,4 +0,0 @@ -pub mod client; -pub mod config; -mod types; -pub mod wiring_layer; diff --git a/core/lib/default_da_clients/src/object_store/types.rs b/core/lib/default_da_clients/src/object_store/types.rs deleted file mode 100644 index b8ec9303e71e..000000000000 --- a/core/lib/default_da_clients/src/object_store/types.rs +++ /dev/null @@ -1,38 +0,0 @@ -use std::io::{Read, Write}; - -use flate2::{read::GzDecoder, write::GzEncoder, Compression}; -use zksync_object_store::{Bucket, StoredObject, _reexports::BoxedError}; -use zksync_types::L1BatchNumber; - -/// Used as a wrapper for the pubdata to be stored in the GCS. -#[derive(Debug)] -pub struct StorablePubdata { - pub data: Vec, -} - -impl StoredObject for StorablePubdata { - const BUCKET: Bucket = Bucket::DataAvailability; - type Key<'a> = L1BatchNumber; - - fn encode_key(key: Self::Key<'_>) -> String { - format!("l1_batch_{key}_pubdata.gzip") - } - - fn serialize(&self) -> Result, BoxedError> { - let mut encoder = GzEncoder::new(Vec::new(), Compression::default()); - encoder.write_all(&self.data[..])?; - encoder.finish().map_err(From::from) - } - - fn deserialize(bytes: Vec) -> Result { - let mut decoder = GzDecoder::new(&bytes[..]); - let mut decompressed_bytes = Vec::new(); - decoder - .read_to_end(&mut decompressed_bytes) - .map_err(BoxedError::from)?; - - Ok(Self { - data: decompressed_bytes, - }) - } -} diff --git a/core/lib/env_config/src/da_client.rs b/core/lib/env_config/src/da_client.rs new file mode 100644 index 000000000000..f2da3b83f18a --- /dev/null +++ b/core/lib/env_config/src/da_client.rs @@ -0,0 +1,115 @@ +use zksync_config::configs::da_client::{ + DAClient, DAClientConfig, AVAIL_CLIENT_CONFIG_NAME, OBJECT_STORE_CLIENT_CONFIG_NAME, +}; + +use crate::{envy_load, FromEnv}; + +impl FromEnv for DAClientConfig { + fn from_env() -> anyhow::Result { + let client_tag = std::env::var("DA_CLIENT")?; + let client = match client_tag.as_str() { + AVAIL_CLIENT_CONFIG_NAME => DAClient::Avail(envy_load("da_avail_config", "DA_")?), + OBJECT_STORE_CLIENT_CONFIG_NAME => { + DAClient::ObjectStore(envy_load("da_object_store", "DA_")?) + } + _ => anyhow::bail!("Unknown DA client name: {}", client_tag), + }; + + Ok(Self { client }) + } +} + +#[cfg(test)] +mod tests { + use zksync_config::{ + configs::{ + da_client::{DAClient, DAClient::ObjectStore}, + object_store::ObjectStoreMode::GCS, + }, + AvailConfig, DAClientConfig, ObjectStoreConfig, + }; + + use super::*; + use crate::test_utils::EnvMutex; + + static MUTEX: EnvMutex = EnvMutex::new(); + + fn expected_object_store_da_client_config(url: String, max_retries: u16) -> DAClientConfig { + DAClientConfig { + client: ObjectStore(ObjectStoreConfig { + mode: GCS { + bucket_base_url: url, + }, + max_retries, + local_mirror_path: None, + }), + } + } + + #[test] + fn from_env_object_store() { + let mut lock = MUTEX.lock(); + let config = r#" + DA_CLIENT="ObjectStore" + + DA_BUCKET_BASE_URL="sometestpath" + DA_MODE="GCS" + DA_MAX_RETRIES="5" + "#; + lock.set_env(config); + let actual = DAClientConfig::from_env().unwrap(); + assert_eq!( + actual, + expected_object_store_da_client_config("sometestpath".to_string(), 5) + ); + } + + fn expected_avail_da_layer_config( + api_node_url: &str, + bridge_api_url: &str, + seed: &str, + app_id: u32, + timeout: usize, + max_retries: usize, + ) -> DAClientConfig { + DAClientConfig { + client: DAClient::Avail(AvailConfig { + api_node_url: api_node_url.to_string(), + bridge_api_url: bridge_api_url.to_string(), + seed: seed.to_string(), + app_id, + timeout, + max_retries, + }), + } + } + + #[test] + fn from_env_avail_client() { + let mut lock = MUTEX.lock(); + let config = r#" + DA_CLIENT="Avail" + DA_API_NODE_URL="localhost:12345" + DA_BRIDGE_API_URL="localhost:54321" + DA_SEED="bottom drive obey lake curtain smoke basket hold race lonely fit walk" + DA_APP_ID="1" + DA_TIMEOUT="2" + DA_MAX_RETRIES="3" + "#; + + lock.set_env(config); + + let actual = DAClientConfig::from_env().unwrap(); + assert_eq!( + actual, + expected_avail_da_layer_config( + "localhost:12345", + "localhost:54321", + "bottom drive obey lake curtain smoke basket hold race lonely fit walk", + "1".parse::().unwrap(), + "2".parse::().unwrap(), + "3".parse::().unwrap(), + ) + ); + } +} diff --git a/core/lib/env_config/src/lib.rs b/core/lib/env_config/src/lib.rs index 8cfa7b58a31c..b72c2c5d5b94 100644 --- a/core/lib/env_config/src/lib.rs +++ b/core/lib/env_config/src/lib.rs @@ -32,6 +32,8 @@ mod test_utils; mod vm_runner; mod wallets; +mod da_client; + pub trait FromEnv: Sized { fn from_env() -> anyhow::Result; } diff --git a/core/lib/protobuf_config/src/da_client.rs b/core/lib/protobuf_config/src/da_client.rs new file mode 100644 index 000000000000..2009d32db17c --- /dev/null +++ b/core/lib/protobuf_config/src/da_client.rs @@ -0,0 +1,61 @@ +use anyhow::Context; +use zksync_config::{ + configs::{ + da_client::DAClient::{Avail, ObjectStore}, + {self}, + }, + AvailConfig, +}; +use zksync_protobuf::{required, ProtoRepr}; + +use crate::proto::{da_client as proto, object_store as object_store_proto}; + +impl ProtoRepr for proto::DataAvailabilityClient { + type Type = configs::DAClientConfig; + + fn read(&self) -> anyhow::Result { + let config = required(&self.config).context("config")?; + + let client = match config { + proto::data_availability_client::Config::Avail(conf) => Avail(AvailConfig { + api_node_url: required(&conf.api_node_url) + .context("api_node_url")? + .clone(), + bridge_api_url: required(&conf.bridge_api_url) + .context("bridge_api_url")? + .clone(), + seed: required(&conf.seed).context("seed")?.clone(), + app_id: *required(&conf.app_id).context("app_id")?, + timeout: *required(&conf.timeout).context("timeout")? as usize, + max_retries: *required(&conf.max_retries).context("max_retries")? as usize, + }), + proto::data_availability_client::Config::ObjectStore(conf) => { + ObjectStore(object_store_proto::ObjectStore::read(conf)?) + } + }; + + Ok(configs::DAClientConfig { client }) + } + + fn build(this: &Self::Type) -> Self { + match &this.client { + Avail(config) => Self { + config: Some(proto::data_availability_client::Config::Avail( + proto::AvailConfig { + api_node_url: Some(config.api_node_url.clone()), + bridge_api_url: Some(config.bridge_api_url.clone()), + seed: Some(config.seed.clone()), + app_id: Some(config.app_id), + timeout: Some(config.timeout as u64), + max_retries: Some(config.max_retries as u64), + }, + )), + }, + ObjectStore(config) => Self { + config: Some(proto::data_availability_client::Config::ObjectStore( + object_store_proto::ObjectStore::build(config), + )), + }, + } + } +} diff --git a/core/lib/protobuf_config/src/general.rs b/core/lib/protobuf_config/src/general.rs index 87bca88db387..b73539a0897f 100644 --- a/core/lib/protobuf_config/src/general.rs +++ b/core/lib/protobuf_config/src/general.rs @@ -28,6 +28,7 @@ impl ProtoRepr for proto::GeneralConfig { eth: read_optional_repr(&self.eth), snapshot_creator: read_optional_repr(&self.snapshot_creator), observability: read_optional_repr(&self.observability), + da_client_config: read_optional_repr(&self.da_client), da_dispatcher_config: read_optional_repr(&self.da_dispatcher), protective_reads_writer_config: read_optional_repr(&self.protective_reads_writer), basic_witness_input_producer_config: read_optional_repr( @@ -76,6 +77,7 @@ impl ProtoRepr for proto::GeneralConfig { eth: this.eth.as_ref().map(ProtoRepr::build), snapshot_creator: this.snapshot_creator.as_ref().map(ProtoRepr::build), observability: this.observability.as_ref().map(ProtoRepr::build), + da_client: this.da_client_config.as_ref().map(ProtoRepr::build), da_dispatcher: this.da_dispatcher_config.as_ref().map(ProtoRepr::build), protective_reads_writer: this .protective_reads_writer_config diff --git a/core/lib/protobuf_config/src/lib.rs b/core/lib/protobuf_config/src/lib.rs index f4d0188ea20f..a4822edbe8e4 100644 --- a/core/lib/protobuf_config/src/lib.rs +++ b/core/lib/protobuf_config/src/lib.rs @@ -29,6 +29,7 @@ mod pruning; mod secrets; mod snapshots_creator; +mod da_client; mod external_price_api_client; mod external_proof_integration_api; mod prover_job_monitor; diff --git a/core/lib/protobuf_config/src/proto/config/da_client.proto b/core/lib/protobuf_config/src/proto/config/da_client.proto new file mode 100644 index 000000000000..ef58fbcecb4f --- /dev/null +++ b/core/lib/protobuf_config/src/proto/config/da_client.proto @@ -0,0 +1,22 @@ +syntax = "proto3"; + +package zksync.config.da_client; + +import "zksync/config/object_store.proto"; + +message AvailConfig { + optional string api_node_url = 1; + optional string bridge_api_url = 2; + optional string seed = 3; + optional uint32 app_id = 4; + optional uint64 timeout = 5; + optional uint64 max_retries = 6; +} + +message DataAvailabilityClient { + // oneof in protobuf allows for None + oneof config { + AvailConfig avail = 1; + object_store.ObjectStore object_store = 2; + } +} diff --git a/core/lib/protobuf_config/src/proto/config/general.proto b/core/lib/protobuf_config/src/proto/config/general.proto index 3595468949b1..ee70b61b18b3 100644 --- a/core/lib/protobuf_config/src/proto/config/general.proto +++ b/core/lib/protobuf_config/src/proto/config/general.proto @@ -25,6 +25,7 @@ import "zksync/config/external_price_api_client.proto"; import "zksync/config/external_proof_integration_api.proto"; import "zksync/core/consensus.proto"; import "zksync/config/prover_job_monitor.proto"; +import "zksync/config/da_client.proto"; message GeneralConfig { optional database.Postgres postgres = 1; @@ -60,4 +61,5 @@ message GeneralConfig { optional external_proof_integration_api.ExternalProofIntegrationApi external_proof_integration_api = 43; optional experimental.Vm experimental_vm = 44; optional prover_job_monitor.ProverJobMonitor prover_job_monitor = 45; + optional da_client.DataAvailabilityClient da_client = 46; } diff --git a/core/lib/protobuf_config/src/tests.rs b/core/lib/protobuf_config/src/tests.rs index 695f404f64d1..d653b9b92bfd 100644 --- a/core/lib/protobuf_config/src/tests.rs +++ b/core/lib/protobuf_config/src/tests.rs @@ -42,6 +42,7 @@ fn test_encoding() { test_encode_all_formats::>(rng); test_encode_all_formats::>(rng); test_encode_all_formats::>(rng); + test_encode_all_formats::>(rng); test_encode_all_formats::>(rng); test_encode_all_formats::>(rng); test_encode_all_formats::>(rng); diff --git a/core/lib/zksync_core_leftovers/src/temp_config_store/mod.rs b/core/lib/zksync_core_leftovers/src/temp_config_store/mod.rs index 8224b03da071..2d6af705f482 100644 --- a/core/lib/zksync_core_leftovers/src/temp_config_store/mod.rs +++ b/core/lib/zksync_core_leftovers/src/temp_config_store/mod.rs @@ -18,8 +18,8 @@ use zksync_config::{ GeneralConfig, ObservabilityConfig, PrometheusConfig, ProofDataHandlerConfig, ProtectiveReadsWriterConfig, ProverJobMonitorConfig, PruningConfig, SnapshotRecoveryConfig, }, - ApiConfig, BaseTokenAdjusterConfig, ContractVerifierConfig, DADispatcherConfig, DBConfig, - EthConfig, EthWatchConfig, ExternalProofIntegrationApiConfig, GasAdjusterConfig, + ApiConfig, BaseTokenAdjusterConfig, ContractVerifierConfig, DAClientConfig, DADispatcherConfig, + DBConfig, EthConfig, EthWatchConfig, ExternalProofIntegrationApiConfig, GasAdjusterConfig, ObjectStoreConfig, PostgresConfig, SnapshotsCreatorConfig, }; use zksync_env_config::FromEnv; @@ -68,6 +68,7 @@ pub struct TempConfigStore { pub gas_adjuster_config: Option, pub observability: Option, pub snapshot_creator: Option, + pub da_client_config: Option, pub da_dispatcher_config: Option, pub protective_reads_writer_config: Option, pub basic_witness_input_producer_config: Option, @@ -105,6 +106,7 @@ impl TempConfigStore { eth: self.eth_sender_config.clone(), snapshot_creator: self.snapshot_creator.clone(), observability: self.observability.clone(), + da_client_config: self.da_client_config.clone(), da_dispatcher_config: self.da_dispatcher_config.clone(), protective_reads_writer_config: self.protective_reads_writer_config.clone(), basic_witness_input_producer_config: self.basic_witness_input_producer_config.clone(), @@ -188,6 +190,7 @@ fn load_env_config() -> anyhow::Result { gas_adjuster_config: GasAdjusterConfig::from_env().ok(), observability: ObservabilityConfig::from_env().ok(), snapshot_creator: SnapshotsCreatorConfig::from_env().ok(), + da_client_config: DAClientConfig::from_env().ok(), da_dispatcher_config: DADispatcherConfig::from_env().ok(), protective_reads_writer_config: ProtectiveReadsWriterConfig::from_env().ok(), basic_witness_input_producer_config: BasicWitnessInputProducerConfig::from_env().ok(), diff --git a/core/lib/default_da_clients/Cargo.toml b/core/node/da_clients/Cargo.toml similarity index 51% rename from core/lib/default_da_clients/Cargo.toml rename to core/node/da_clients/Cargo.toml index 737d209aed31..60b65067f48d 100644 --- a/core/lib/default_da_clients/Cargo.toml +++ b/core/node/da_clients/Cargo.toml @@ -1,6 +1,6 @@ [package] -name = "zksync_default_da_clients" -description = "ZKsync DA client implementations" +name = "zksync_da_clients" +description = "ZKsync data availability clients" version.workspace = true edition.workspace = true authors.workspace = true @@ -16,10 +16,24 @@ tracing.workspace = true async-trait.workspace = true anyhow.workspace = true flate2.workspace = true +tokio.workspace = true zksync_config.workspace = true zksync_types.workspace = true zksync_object_store.workspace = true zksync_da_client.workspace = true -zksync_node_framework.workspace = true zksync_env_config.workspace = true +futures.workspace = true + +# Avail dependencies +scale-encode.workspace = true +subxt-metadata.workspace = true +blake2.workspace = true +base58.workspace = true +serde_json.workspace = true +hex.workspace = true +blake2b_simd.workspace = true + +jsonrpsee = { workspace = true, features = ["ws-client"] } +parity-scale-codec = { workspace = true, features = ["derive"] } +subxt-signer = { workspace = true, features = ["sr25519", "native"] } diff --git a/core/node/da_clients/README.md b/core/node/da_clients/README.md new file mode 100644 index 000000000000..df06cef24197 --- /dev/null +++ b/core/node/da_clients/README.md @@ -0,0 +1,10 @@ +# Data Availability Clients + +This crate contains the implementations of the Data Availability clients. + +Currently, the following DataAvailability clients are implemented: + +- `NoDA client` that does not send or store any pubdata, it is needed to run the zkSync network in the "no-DA" mode + utilizing the DA framework. +- `Object Store client` that stores the pubdata in the Object Store(GCS). +- `Avail` that sends the pubdata to the Avail DA layer. diff --git a/core/node/da_clients/src/avail/client.rs b/core/node/da_clients/src/avail/client.rs new file mode 100644 index 000000000000..021906d73a01 --- /dev/null +++ b/core/node/da_clients/src/avail/client.rs @@ -0,0 +1,85 @@ +use std::{fmt::Debug, sync::Arc}; + +use async_trait::async_trait; +use jsonrpsee::ws_client::WsClientBuilder; +use zksync_config::AvailConfig; +use zksync_da_client::{ + types::{DAError, DispatchResponse, InclusionData}, + DataAvailabilityClient, +}; + +use crate::avail::sdk::RawAvailClient; + +/// An implementation of the `DataAvailabilityClient` trait that interacts with the Avail network. +#[derive(Debug, Clone)] +pub struct AvailClient { + config: AvailConfig, + sdk_client: Arc, +} + +impl AvailClient { + pub async fn new(config: AvailConfig) -> anyhow::Result { + let sdk_client = RawAvailClient::new(config.app_id, config.seed.clone()).await?; + + Ok(Self { + config, + sdk_client: Arc::new(sdk_client), + }) + } +} + +#[async_trait] +impl DataAvailabilityClient for AvailClient { + async fn dispatch_blob( + &self, + _: u32, // batch_number + data: Vec, + ) -> anyhow::Result { + let client = WsClientBuilder::default() + .build(self.config.api_node_url.as_str()) + .await + .map_err(to_non_retriable_da_error)?; + + let extrinsic = self + .sdk_client + .build_extrinsic(&client, data) + .await + .map_err(to_non_retriable_da_error)?; + + let block_hash = self + .sdk_client + .submit_extrinsic(&client, extrinsic.as_str()) + .await + .map_err(to_non_retriable_da_error)?; + let tx_id = self + .sdk_client + .get_tx_id(&client, block_hash.as_str(), extrinsic.as_str()) + .await + .map_err(to_non_retriable_da_error)?; + + Ok(DispatchResponse::from(format!("{}:{}", block_hash, tx_id))) + } + + async fn get_inclusion_data( + &self, + _blob_id: &str, + ) -> anyhow::Result, DAError> { + // TODO: implement inclusion data retrieval + Ok(Some(InclusionData { data: vec![] })) + } + + fn clone_boxed(&self) -> Box { + Box::new(self.clone()) + } + + fn blob_size_limit(&self) -> Option { + Some(RawAvailClient::MAX_BLOB_SIZE) + } +} + +pub fn to_non_retriable_da_error(error: impl Into) -> DAError { + DAError { + error: error.into(), + is_retriable: false, + } +} diff --git a/core/node/da_clients/src/avail/mod.rs b/core/node/da_clients/src/avail/mod.rs new file mode 100644 index 000000000000..82073448ba15 --- /dev/null +++ b/core/node/da_clients/src/avail/mod.rs @@ -0,0 +1,4 @@ +mod client; +mod sdk; + +pub use self::client::AvailClient; diff --git a/core/node/da_clients/src/avail/sdk.rs b/core/node/da_clients/src/avail/sdk.rs new file mode 100644 index 000000000000..5e67540fcc69 --- /dev/null +++ b/core/node/da_clients/src/avail/sdk.rs @@ -0,0 +1,371 @@ +//! Minimal reimplementation of the Avail SDK client required for the DA client implementation. +//! This is considered to be a temporary solution until a mature SDK is available on crates.io + +use std::fmt::Debug; + +use jsonrpsee::{ + core::client::{Client, ClientT, Subscription, SubscriptionClientT}, + rpc_params, +}; +use parity_scale_codec::{Compact, Decode, Encode}; +use scale_encode::EncodeAsFields; +use subxt_signer::{ + bip39::Mnemonic, + sr25519::{Keypair, Signature}, +}; + +use crate::avail::client::to_non_retriable_da_error; + +const PROTOCOL_VERSION: u8 = 4; + +/// An implementation of the `DataAvailabilityClient` trait that interacts with the Avail network. +#[derive(Debug, Clone)] +pub(crate) struct RawAvailClient { + app_id: u32, + keypair: Keypair, +} + +/// Utility type needed for encoding the call data +#[derive(parity_scale_codec::Encode, scale_encode::EncodeAsType)] +#[encode_as_type(crate_path = "scale_encode")] +struct SubmitData { + pub data: BoundedVec, +} + +/// Utility type needed for encoding the call data +#[derive(parity_scale_codec::Encode, scale_encode::EncodeAsType)] +#[encode_as_type(crate_path = "scale_encode")] +struct BoundedVec<_0>(pub Vec<_0>); + +impl RawAvailClient { + pub(crate) const MAX_BLOB_SIZE: usize = 512 * 1024; // 512kb + + pub(crate) async fn new(app_id: u32, seed: String) -> anyhow::Result { + let mnemonic = Mnemonic::parse(seed)?; + let keypair = Keypair::from_phrase(&mnemonic, None)?; + + Ok(Self { app_id, keypair }) + } + + /// Returns a hex-encoded extrinsic + pub(crate) async fn build_extrinsic( + &self, + client: &Client, + data: Vec, + ) -> anyhow::Result { + let call_data = self + .get_encoded_call(client, data) + .await + .map_err(to_non_retriable_da_error)?; + let extra_params = self + .get_extended_params(client) + .await + .map_err(to_non_retriable_da_error)?; + let additional_params = self + .get_additional_params(client) + .await + .map_err(to_non_retriable_da_error)?; + + let signature = self.get_signature( + call_data.as_slice(), + extra_params.as_slice(), + additional_params.as_slice(), + ); + + let ext = self.get_submittable_extrinsic( + signature, + extra_params.as_slice(), + call_data.as_slice(), + ); + + Ok(hex::encode(&ext)) + } + + /// Returns an encoded call data + async fn get_encoded_call( + &self, + client: &Client, + data: Vec, + ) -> anyhow::Result, anyhow::Error> { + let resp: serde_json::Value = client.request("state_getMetadata", rpc_params![]).await?; + + let resp = resp + .as_str() + .ok_or_else(|| anyhow::anyhow!("Invalid metadata"))? + .to_string(); + + let metadata_bytes = hex::decode( + resp.strip_prefix("0x") + .ok_or_else(|| anyhow::anyhow!("Metadata doesn't have 0x prefix"))?, + )?; + let meta = subxt_metadata::Metadata::decode(&mut &metadata_bytes[..])?; + + let pallet = meta + .pallet_by_name("DataAvailability") + .ok_or_else(|| anyhow::anyhow!("DataAvailability pallet not found"))?; + + let call = pallet + .call_variant_by_name("submit_data") + .ok_or_else(|| anyhow::anyhow!("submit_data call not found"))?; + + let mut fields = call + .fields + .iter() + .map(|f| scale_encode::Field::new(f.ty.id, f.name.as_deref())); + + let mut bytes = Vec::new(); + pallet.index().encode_to(&mut bytes); + call.index.encode_to(&mut bytes); + + SubmitData { + data: BoundedVec(data), + } + .encode_as_fields_to(&mut fields, meta.types(), &mut bytes)?; + + Ok(bytes) + } + + /// Queries a node for a nonce + async fn fetch_account_nonce(&self, client: &Client) -> anyhow::Result { + let address = to_addr(self.keypair.clone()); + let resp: serde_json::Value = client + .request("system_accountNextIndex", rpc_params![address]) + .await?; + + let nonce = resp + .as_u64() + .ok_or_else(|| anyhow::anyhow!("Invalid nonce"))?; + + Ok(nonce) + } + + /// Returns a Compact-encoded extended extrinsic parameters + /// Extrinsic params used here: + /// - CheckMortality + /// - CheckNonce + /// - ChargeTransactionPayment + /// - CheckAppId + async fn get_extended_params(&self, client: &Client) -> anyhow::Result> { + let era = 0u8; // immortal era + let tip = 0u128; // no tip + let nonce = self.fetch_account_nonce(client).await?; + + // Encode the params + let mut bytes = vec![era]; + Compact(nonce).encode_to(&mut bytes); + Compact(tip).encode_to(&mut bytes); + Compact(self.app_id).encode_to(&mut bytes); + + Ok(bytes) + } + + /// Returns a Compact-encoded additional extrinsic parameters + /// Extrinsic params used here + /// - CheckSpecVersion + /// - CheckTxVersion + /// - CheckGenesis + async fn get_additional_params(&self, client: &Client) -> anyhow::Result> { + let (spec_version, tx_version) = self.get_runtime_version(client).await?; + let genesis_hash = self.fetch_genesis_hash(client).await?; + + let mut bytes = Vec::new(); + spec_version.encode_to(&mut bytes); + tx_version.encode_to(&mut bytes); + // adding genesis hash twice (that's what API requires ¯\_(ツ)_/¯) + bytes.extend(hex::decode(&genesis_hash)?); + bytes.extend(hex::decode(&genesis_hash)?); + + Ok(bytes) + } + + /// Returns the specification and transaction versions of a runtime + async fn get_runtime_version(&self, client: &Client) -> anyhow::Result<(u32, u32)> { + let resp: serde_json::Value = client + .request("chain_getRuntimeVersion", rpc_params![]) + .await?; + + let sv = resp + .get("specVersion") + .ok_or_else(|| anyhow::anyhow!("Invalid runtime version"))?; + let tv = resp + .get("transactionVersion") + .ok_or_else(|| anyhow::anyhow!("Invalid runtime version"))?; + + let spec_version = sv + .as_u64() + .ok_or_else(|| anyhow::anyhow!("Invalid spec version"))?; + let transaction_version = tv + .as_u64() + .ok_or_else(|| anyhow::anyhow!("Invalid transaction version"))?; + + Ok((spec_version as u32, transaction_version as u32)) + } + + async fn fetch_genesis_hash(&self, client: &Client) -> anyhow::Result { + let resp: serde_json::Value = client.request("chain_getBlockHash", rpc_params![0]).await?; + + let genesis_hash = resp + .as_str() + .ok_or_else(|| anyhow::anyhow!("Invalid genesis hash"))?; + + Ok(genesis_hash + .strip_prefix("0x") + .ok_or_else(|| anyhow::anyhow!("Genesis hash doesn't have a 0x prefix"))? + .to_string()) + } + + /// Returns a signature for partially-encoded extrinsic + fn get_signature( + &self, + call_data: &[u8], + extra_params: &[u8], + additional_params: &[u8], + ) -> Signature { + let mut bytes = vec![]; + bytes.extend_from_slice(call_data); + bytes.extend_from_slice(extra_params); + bytes.extend_from_slice(additional_params); + + if bytes.len() > 256 { + bytes = blake2::<32>(bytes).to_vec(); + } + + self.keypair.sign(&bytes) + } + + /// Encodes all the components of an extrinsic into a single vector + fn get_submittable_extrinsic( + &self, + signature: Signature, + extra_params: &[u8], + call_data: &[u8], + ) -> Vec { + let mut encoded_inner = Vec::new(); + (0b10000000 + PROTOCOL_VERSION).encode_to(&mut encoded_inner); // "is signed" + transaction protocol version + + // sender + encoded_inner.push(0); // 0 as an id param in MultiAddress enum + self.keypair.public_key().0.encode_to(&mut encoded_inner); // from address for signature + + // signature + encoded_inner.push(1); // 1 as an Sr25519 in MultiSignature enum + signature.0.encode_to(&mut encoded_inner); + + // extra params + encoded_inner.extend_from_slice(extra_params); + + // call data + encoded_inner.extend_from_slice(call_data); + + // now, prefix with byte length: + let len = Compact( + u32::try_from(encoded_inner.len()).expect("extrinsic size expected to be <4GB"), + ); + let mut encoded = Vec::new(); + len.encode_to(&mut encoded); + encoded.extend(encoded_inner); + + encoded + } + + /// Submits an extrinsic. Subscribes to a stream and waits for a tx to be included in a block + /// to return the block hash + pub(crate) async fn submit_extrinsic( + &self, + client: &Client, + extrinsic: &str, + ) -> anyhow::Result { + let mut sub: Subscription = client + .subscribe( + "author_submitAndWatchExtrinsic", + rpc_params![extrinsic], + "author_unwatchExtrinsic", + ) + .await?; + + let block_hash = loop { + let status = sub.next().await.transpose()?; + + if status.is_some() && status.as_ref().unwrap().is_object() { + if let Some(block_hash) = status.unwrap().get("inBlock") { + break block_hash + .as_str() + .ok_or_else(|| anyhow::anyhow!("Invalid block hash"))? + .strip_prefix("0x") + .ok_or_else(|| anyhow::anyhow!("Block hash doesn't have 0x prefix"))? + .to_string(); + } + } + }; + sub.unsubscribe().await?; + + Ok(block_hash) + } + + /// Iterates over all transaction in the block and finds an ID of the one provided as an argument + pub(crate) async fn get_tx_id( + &self, + client: &Client, + block_hash: &str, + hex_ext: &str, + ) -> anyhow::Result { + let resp: serde_json::Value = client + .request("chain_getBlock", rpc_params![block_hash]) + .await?; + + let block = resp + .get("block") + .ok_or_else(|| anyhow::anyhow!("Invalid block"))?; + let extrinsics = block + .get("extrinsics") + .ok_or_else(|| anyhow::anyhow!("No field named extrinsics in block"))? + .as_array() + .ok_or_else(|| anyhow::anyhow!("Extrinsics field is not an array"))?; + + let hex_ext = format!("0x{}", hex_ext); + + let tx_id = extrinsics + .iter() + .position(|extrinsic| extrinsic.as_str() == Some(hex_ext.as_str())) + .ok_or_else(|| anyhow::anyhow!("Extrinsic not found in block"))?; + + Ok(tx_id) + } +} + +fn blake2(data: Vec) -> [u8; N] { + blake2b_simd::Params::new() + .hash_length(N) + .hash(data.as_slice()) + .as_bytes() + .try_into() + .expect("slice is always the necessary length") +} + +// Taken from subxt accountId implementation +fn to_addr(keypair: Keypair) -> String { + // For serializing to a string to obtain the account nonce, we use the default substrate + // prefix (since we have no way to otherwise pick one). It doesn't really matter, since when + // it's deserialized back in system_accountNextIndex, we ignore this (so long as it's valid). + const SUBSTRATE_SS58_PREFIX: u8 = 42; + // prefix <= 63 just take up one byte at the start: + let mut v = vec![SUBSTRATE_SS58_PREFIX]; + // then push the account ID bytes. + v.extend(keypair.public_key().0); + // then push a 2 byte checksum of what we have so far. + let r = ss58hash(&v); + v.extend(&r[0..2]); + // then encode to base58. + use base58::ToBase58; + v.to_base58() +} + +// Taken from subxt accountId implementation +fn ss58hash(data: &[u8]) -> Vec { + use blake2::{Blake2b512, Digest}; + const PREFIX: &[u8] = b"SS58PRE"; + let mut ctx = Blake2b512::new(); + ctx.update(PREFIX); + ctx.update(data); + ctx.finalize().to_vec() +} diff --git a/core/lib/default_da_clients/src/lib.rs b/core/node/da_clients/src/lib.rs similarity index 71% rename from core/lib/default_da_clients/src/lib.rs rename to core/node/da_clients/src/lib.rs index 3aa2a18cdcec..48311ce4c3f2 100644 --- a/core/lib/default_da_clients/src/lib.rs +++ b/core/node/da_clients/src/lib.rs @@ -1,2 +1,3 @@ +pub mod avail; pub mod no_da; pub mod object_store; diff --git a/core/lib/default_da_clients/src/no_da/client.rs b/core/node/da_clients/src/no_da.rs similarity index 100% rename from core/lib/default_da_clients/src/no_da/client.rs rename to core/node/da_clients/src/no_da.rs diff --git a/core/lib/default_da_clients/src/object_store/client.rs b/core/node/da_clients/src/object_store.rs similarity index 51% rename from core/lib/default_da_clients/src/object_store/client.rs rename to core/node/da_clients/src/object_store.rs index f05029a8eb1c..55764e8260e0 100644 --- a/core/lib/default_da_clients/src/object_store/client.rs +++ b/core/node/da_clients/src/object_store.rs @@ -1,16 +1,20 @@ -use std::sync::Arc; +use std::{ + io::{Read, Write}, + sync::Arc, +}; use async_trait::async_trait; +use flate2::{read::GzDecoder, write::GzEncoder, Compression}; use zksync_config::ObjectStoreConfig; use zksync_da_client::{ types::{DAError, DispatchResponse, InclusionData}, DataAvailabilityClient, }; -use zksync_object_store::{ObjectStore, ObjectStoreFactory}; +use zksync_object_store::{ + Bucket, ObjectStore, ObjectStoreFactory, StoredObject, _reexports::BoxedError, +}; use zksync_types::L1BatchNumber; -use crate::object_store::types::StorablePubdata; - /// An implementation of the `DataAvailabilityClient` trait that stores the pubdata in the GCS. #[derive(Clone, Debug)] pub struct ObjectStoreDAClient { @@ -84,3 +88,79 @@ impl DataAvailabilityClient for ObjectStoreDAClient { None } } + +/// Used as a wrapper for the pubdata to be stored in the GCS. +#[derive(Debug)] +struct StorablePubdata { + pub data: Vec, +} + +impl StoredObject for StorablePubdata { + const BUCKET: Bucket = Bucket::DataAvailability; + type Key<'a> = L1BatchNumber; + + fn encode_key(key: Self::Key<'_>) -> String { + format!("l1_batch_{key}_pubdata.gzip") + } + + fn serialize(&self) -> Result, BoxedError> { + let mut encoder = GzEncoder::new(Vec::new(), Compression::default()); + encoder.write_all(&self.data[..])?; + encoder.finish().map_err(From::from) + } + + fn deserialize(bytes: Vec) -> Result { + let mut decoder = GzDecoder::new(&bytes[..]); + let mut decompressed_bytes = Vec::new(); + decoder + .read_to_end(&mut decompressed_bytes) + .map_err(BoxedError::from)?; + + Ok(Self { + data: decompressed_bytes, + }) + } +} + +#[cfg(test)] +mod tests { + use tokio::fs; + use zksync_object_store::{MockObjectStore, StoredObject}; + use zksync_types::L1BatchNumber; + + use super::StorablePubdata; + + #[tokio::test] + async fn test_storable_pubdata_deserialization() { + let serialized = fs::read("./src/test_data/l1_batch_123_pubdata.gzip") + .await + .unwrap(); + + let data = StorablePubdata::deserialize(serialized).unwrap().data; + assert_eq!(data[12], 0); + assert_eq!(data[123], 129); + assert_eq!(data[1234], 153); + } + + #[tokio::test] + async fn stored_object_serialization() { + let batch_number = 123; + let data = vec![1, 2, 3, 4, 5, 6, 123, 255, 0, 0]; + + let store = MockObjectStore::arc(); + store + .put( + L1BatchNumber(batch_number), + &StorablePubdata { data: data.clone() }, + ) + .await + .unwrap(); + + let resp = store + .get::(L1BatchNumber(batch_number)) + .await + .unwrap(); + + assert_eq!(data, resp.data); + } +} diff --git a/core/node/da_clients/src/test_data/l1_batch_123_pubdata.gzip b/core/node/da_clients/src/test_data/l1_batch_123_pubdata.gzip new file mode 100644 index 0000000000000000000000000000000000000000..78bc67e94efb969de7038b3cdd957b3eef1eb228 GIT binary patch literal 2511 zcmV;=2{85_iwFP!00000|BcmWRFlaX0N@l5B1l4&-kV5~CIS)yf}!_7B!HAi30*^z zUV=nWnv?|>h#(>z#04SL+^ZohGnKRG4@600#mIg?H|A4f=+US_+5v!wJX4o@_K5!2QtA6b~>TH z)WK&52&$aVBNn!Q!v&w@n#zZem3g3`Ct>#nmznTmTeDG@HuUDnd1jSy<7vo`zu{7y zk^ffxFB$tfn!%orB~ zdfvc(!=*XN6)Yza()!3kA+Ttjt$W6QdSJc~qUf`)j+^{-D`tfu`Cr}t{?JmW05Ucn zc%9D7O0E~GPq=be^5@d|K-g(q;DuKjOB!9&BZTAa@MDWBNfk_IMN%ie+3|!=xy8|8 zd#edP8bj)O%xev1(<{}_p>f!(ndVTGRo)>nSS00@Te-V;ZgUqu={OTx-N6i(I#=0H zGd#g|7I$nvnS`#JeNWH!@UR_sp&}Ms@X*p^5`OIxOmi zZ`%{Elox~juv)9*2oBL&`pajXrQSN$N9>BQrjkDWPKk!wQIR z-CxwYRtsWlqOz3$bxj4+P!_EgKhb18$RK;oa<=lNQI%Y#c5Xs2yNtwsh-PHa;Pq}c z5xXqc{I9!EJ}=n8@&YKrsyQna0ppd*K)DzuzS9={o+W8GRpO9H{Pl9mB@^^#k5tD8am<+#ZuHorCFgrY0slc$!8n*pCIAEXx#Bg0uL z*u_c_7a_!7E-Hfbp^ju@BunnfXdoB7S5RWB)DGBFM6;-CdK<-I?+yWW(>II5>f0$5 zLh=D;k3>+4yLkuo?8t{@+f1&Y{&{f-(C1P<3*1KHG&314l28f_!)1FAR~qO$t7yu{ zyCWTvAV88s^`#+Uv-a&i@sC$t>Z~yqKthoUJDz>sdX7Bxh{@|E2 zA(JJ2Bl8NQe9f|rdUVKo^drnbKLq$;N+kGcMOH^42V*kzoYlexjR&A)h8u9zF|ycP zk}v`RHXdCOn>q+1N$$@pxF7?`PGlvKoueMri$$s5Ej1^dAi#HJ+zfm6?PF3r1uGW^ zs9w7N?2YZZAHP%$8^NVAUHh4>I(#l;)x|}b9Fm#A?N*aXZ`5h0q+12c>0&q84sT`z zGWdQzD{z=h@E^6`lmYj&g?J;tvi7nu_b>DOv9P3qyrKavr8h86jsVD7d2gt+J_D`g zLp6)^Iu*KBD}YMiu!Hc8mVE+OhOu%i_$uHuIsG|U+Z8fxg$?W^jiRj9@( zgIE?1BRz@%Ey9!m+p z>Ld&r-~ueXtf5PdUyUDu2P(c@?3g*l=c5K~S7i+tJR@cuFB>f^IKBYGr4PGd+va4vHu2IZzB*yc(Xp*M%@~v1|W5Cj) zzbM-KRz2#XL9l)kT`k4ct`V_hR)uv^(d!)&Hc*@8m+W=K4c!@~5nM2SsbwiGBgA=* zp4R;QJscS!vj^}xAaceT8s+2LxuwLRfX*zL)7!>!pgNcsK4wNPD z6r2cI&@;)K;p`j{frsK^P6^aPexM7w_7Z0=WNsPtFuA^l3cSQE5Mg+EkwG6~s21BQ zCj=-BR$lZHX@Y!HL*%xzYFDSL4G!wD>*4NGh-O&BVL@D%j3Eqw8+68!63<1;-2 z?PMkNDC9)8JH?I34`!p!To$k7S2!0K_>fD^^QM7I{i_6_@Th7<67&MYlnRHOQK-0> zp+|hnCuc#^6)&y%$LQhBF_Y7;MKSr;^Sbr|R&64~^%Bk6J}+1D`BzQ1NMX`B9mbx; znspA8N?pPRhX?~M#5y%jhN$vN9EH?fz(qoYbkq)%D z=I#|7R$IGv+NAG8Vi`V25)maE0kl}Pc-Y^*4*l*kYGDtbRUToaoDP!kIDUKtxaj2- zFHhZ;gZkfBYomM3Ec#CQTPY-)oYQ|_VH5;wzN=s{pt)k3bo9>GHb^auq*RYmuS3@` zAoD0QB;sc5hp%>iMzmCNb5bMaHzH};m>q>0*u3V&S Self { + Self { config } + } +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + pub client: DAClientResource, +} + +#[async_trait::async_trait] +impl WiringLayer for AvailWiringLayer { + type Input = (); + type Output = Output; + + fn layer_name(&self) -> &'static str { + "avail_client_layer" + } + + async fn wire(self, _input: Self::Input) -> Result { + let client: Box = + Box::new(AvailClient::new(self.config).await?); + + Ok(Self::Output { + client: DAClientResource(client), + }) + } +} diff --git a/core/node/node_framework/src/implementations/layers/da_clients/mod.rs b/core/node/node_framework/src/implementations/layers/da_clients/mod.rs new file mode 100644 index 000000000000..48311ce4c3f2 --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/da_clients/mod.rs @@ -0,0 +1,3 @@ +pub mod avail; +pub mod no_da; +pub mod object_store; diff --git a/core/lib/default_da_clients/src/no_da/wiring_layer.rs b/core/node/node_framework/src/implementations/layers/da_clients/no_da.rs similarity index 90% rename from core/lib/default_da_clients/src/no_da/wiring_layer.rs rename to core/node/node_framework/src/implementations/layers/da_clients/no_da.rs index 71a2ee7ce582..5a81ce9b3400 100644 --- a/core/lib/default_da_clients/src/no_da/wiring_layer.rs +++ b/core/node/node_framework/src/implementations/layers/da_clients/no_da.rs @@ -1,18 +1,19 @@ use std::fmt::Debug; use zksync_da_client::DataAvailabilityClient; -use zksync_node_framework::{ +use zksync_da_clients::no_da::NoDAClient; + +use crate::{ implementations::resources::da_client::DAClientResource, wiring_layer::{WiringError, WiringLayer}, IntoContext, }; -use crate::no_da::client::NoDAClient; - #[derive(Debug, Default)] pub struct NoDAClientWiringLayer; #[derive(Debug, IntoContext)] +#[context(crate = crate)] pub struct Output { pub client: DAClientResource, } diff --git a/core/lib/default_da_clients/src/object_store/wiring_layer.rs b/core/node/node_framework/src/implementations/layers/da_clients/object_store.rs similarity index 91% rename from core/lib/default_da_clients/src/object_store/wiring_layer.rs rename to core/node/node_framework/src/implementations/layers/da_clients/object_store.rs index 6fc84fb707b7..3fb720696da5 100644 --- a/core/lib/default_da_clients/src/object_store/wiring_layer.rs +++ b/core/node/node_framework/src/implementations/layers/da_clients/object_store.rs @@ -1,13 +1,13 @@ use zksync_config::ObjectStoreConfig; use zksync_da_client::DataAvailabilityClient; -use zksync_node_framework::{ +use zksync_da_clients::object_store::ObjectStoreDAClient; + +use crate::{ implementations::resources::da_client::DAClientResource, wiring_layer::{WiringError, WiringLayer}, IntoContext, }; -use crate::object_store::client::ObjectStoreDAClient; - #[derive(Debug)] pub struct ObjectStorageClientWiringLayer { config: ObjectStoreConfig, @@ -20,6 +20,7 @@ impl ObjectStorageClientWiringLayer { } #[derive(Debug, IntoContext)] +#[context(crate = crate)] pub struct Output { pub client: DAClientResource, } diff --git a/core/node/node_framework/src/implementations/layers/mod.rs b/core/node/node_framework/src/implementations/layers/mod.rs index 6f3500a82cb9..75828da19023 100644 --- a/core/node/node_framework/src/implementations/layers/mod.rs +++ b/core/node/node_framework/src/implementations/layers/mod.rs @@ -6,6 +6,7 @@ pub mod commitment_generator; pub mod consensus; pub mod consistency_checker; pub mod contract_verification_api; +pub mod da_clients; pub mod da_dispatcher; pub mod eth_sender; pub mod eth_watch; diff --git a/deny.toml b/deny.toml index b840ec5176e8..83a8709a69c6 100644 --- a/deny.toml +++ b/deny.toml @@ -31,6 +31,8 @@ allow = [ "BSD-3-Clause", "Zlib", "OpenSSL", + "Apache-2.0 WITH LLVM-exception", + "0BSD", ] confidence-threshold = 0.8 From 93b4e08257802d11108870d867dd59fa35e52733 Mon Sep 17 00:00:00 2001 From: Danil Date: Thu, 12 Sep 2024 12:16:21 +0200 Subject: [PATCH 060/116] feat(zk_toolbox): deploy legacy bridge (#2837) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Support legacy bridges in zk toolbox ## Why ❔ We still have some legacy bridges in production, so we have to have an opportunity to test it ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --------- Signed-off-by: Danil --- .github/workflows/ci-core-reusable.yml | 36 +++++---- contracts | 2 +- prover/crates/lib/keystore/src/utils.rs | 1 + zk_toolbox/crates/common/src/server.rs | 8 +- zk_toolbox/crates/config/src/chain.rs | 4 + zk_toolbox/crates/config/src/ecosystem.rs | 1 + .../crates/config/src/forge_interface/mod.rs | 1 + .../src/forge_interface/script_params.rs | 6 ++ .../setup_legacy_bridge/mod.rs | 20 +++++ .../src/commands/args/run_server.rs | 4 +- .../src/commands/chain/args/create.rs | 4 + .../zk_inception/src/commands/chain/create.rs | 18 ++++- .../src/commands/chain/deploy_l2_contracts.rs | 14 +++- .../src/commands/chain/genesis.rs | 2 +- .../zk_inception/src/commands/chain/init.rs | 12 +++ .../zk_inception/src/commands/chain/mod.rs | 1 + .../src/commands/chain/setup_legacy_bridge.rs | 73 +++++++++++++++++++ .../zk_inception/src/commands/server.rs | 6 +- .../crates/zk_inception/src/messages.rs | 1 + .../src/commands/test/loadtest.rs | 52 +++++++++++++ .../zk_supervisor/src/commands/test/mod.rs | 10 ++- .../src/commands/test/recovery.rs | 2 +- .../crates/zk_supervisor/src/messages.rs | 2 + 23 files changed, 251 insertions(+), 29 deletions(-) create mode 100644 zk_toolbox/crates/config/src/forge_interface/setup_legacy_bridge/mod.rs create mode 100644 zk_toolbox/crates/zk_inception/src/commands/chain/setup_legacy_bridge.rs create mode 100644 zk_toolbox/crates/zk_supervisor/src/commands/test/loadtest.rs diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index 18cbc2c2afa3..a88a8fe3944e 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -71,7 +71,7 @@ jobs: strategy: fail-fast: false matrix: - vm_mode: [ "old", "new" ] + vm_mode: [ "OLD", "NEW" ] steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 @@ -87,13 +87,12 @@ jobs: - name: Loadtest configuration run: | - echo EXPECTED_TX_COUNT=${{ matrix.vm_mode == 'new' && 21000 || 16000 }} >> .env + echo EXPECTED_TX_COUNT=${{ matrix.vm_mode == 'NEW' && 21000 || 16000 }} >> .env echo ACCOUNTS_AMOUNT="100" >> .env echo MAX_INFLIGHT_TXS="10" >> .env echo SYNC_API_REQUESTS_LIMIT="15" >> .env echo FAIL_FAST=true >> .env echo IN_DOCKER=1 >> .env - echo DATABASE_MERKLE_TREE_MODE=lightweight >> .env - name: Start services run: | @@ -107,23 +106,34 @@ jobs: ci_run git config --global --add safe.directory /usr/src/zksync/contracts/system-contracts ci_run git config --global --add safe.directory /usr/src/zksync/contracts - ci_run zk - ci_run zk init --local-legacy-bridge-testing + ci_run ./bin/zkt + ci_run zk_inception chain create \ + --chain-name legacy \ + --chain-id sequential \ + --prover-mode no-proofs \ + --wallet-creation localhost \ + --l1-batch-commit-data-generator-mode rollup \ + --base-token-address 0x0000000000000000000000000000000000000001 \ + --base-token-price-nominator 1 \ + --base-token-price-denominator 1 \ + --set-as-default false \ + --ignore-prerequisites \ + --legacy-bridge + + ci_run zk_inception ecosystem init --dev --verbose + ci_run zk_supervisor contracts --test-contracts # `sleep 60` because we need to wait until server added all the tokens - name: Run server run: | - EXPERIMENTAL_VM_STATE_KEEPER_FAST_VM_MODE=${{ matrix.vm_mode }} \ - CHAIN_MEMPOOL_DELAY_INTERVAL=50 \ - PASSED_ENV_VARS="EXPERIMENTAL_VM_STATE_KEEPER_FAST_VM_MODE,CHAIN_MEMPOOL_DELAY_INTERVAL" \ - ci_run zk server --uring --components api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads &>server.log & + ci_run sed -i -e 's/mode: FULL/mode: LIGHTWEIGHT/' chains/legacy/configs/general.yaml + ci_run sed -i -e 's/state_keeper_fast_vm_mode:.*/state_keeper_fast_vm_mode: ${{ matrix.vm_mode }}/' chains/legacy/configs/general.yaml + ci_run sed -i -e 's/delay_interval:.*/delay_interval: 50/' chains/legacy/configs/general.yaml + ci_run zk_inception server --uring --chain=legacy --components api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads &>server.log & ci_run sleep 60 - - name: Deploy legacy era contracts - run: ci_run zk contract setup-legacy-bridge-era - - name: Perform loadtest - run: ci_run zk run loadtest + run: ci_run zk_supervisor t loadtest -v --chain=legacy - name: Show server.log logs if: always() diff --git a/contracts b/contracts index d3687694f71d..73b20c4b972f 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit d3687694f71d83fa286b9c186b4c3ea173028f83 +Subproject commit 73b20c4b972f575613b4054d238332f93f2685cc diff --git a/prover/crates/lib/keystore/src/utils.rs b/prover/crates/lib/keystore/src/utils.rs index d9bb3b47dbb0..10504292d64f 100644 --- a/prover/crates/lib/keystore/src/utils.rs +++ b/prover/crates/lib/keystore/src/utils.rs @@ -115,6 +115,7 @@ pub fn calculate_snark_vk_hash(keystore: &Keystore) -> anyhow::Result { #[cfg(test)] mod tests { use std::str::FromStr; + use zksync_utils::env::Workspace; use super::*; diff --git a/zk_toolbox/crates/common/src/server.rs b/zk_toolbox/crates/common/src/server.rs index c65c8d4c13e2..40da1cf80325 100644 --- a/zk_toolbox/crates/common/src/server.rs +++ b/zk_toolbox/crates/common/src/server.rs @@ -9,6 +9,7 @@ use crate::cmd::Cmd; pub struct Server { components: Option>, code_path: PathBuf, + uring: bool, } /// Possible server modes. @@ -20,10 +21,11 @@ pub enum ServerMode { impl Server { /// Creates a new instance of the server. - pub fn new(components: Option>, code_path: PathBuf) -> Self { + pub fn new(components: Option>, code_path: PathBuf, uring: bool) -> Self { Self { components, code_path, + uring, } } @@ -52,10 +54,12 @@ impl Server { additional_args.push("--genesis".to_string()); } + let uring = self.uring.then_some("--features=rocksdb/io-uring"); + let mut cmd = Cmd::new( cmd!( shell, - "cargo run --release --bin zksync_server -- + "cargo run --release --bin zksync_server {uring...} -- --genesis-path {genesis_path} --wallets-path {wallets_path} --config-path {general_path} diff --git a/zk_toolbox/crates/config/src/chain.rs b/zk_toolbox/crates/config/src/chain.rs index 54ed1f7d3f35..affc8ccc770c 100644 --- a/zk_toolbox/crates/config/src/chain.rs +++ b/zk_toolbox/crates/config/src/chain.rs @@ -38,6 +38,8 @@ pub struct ChainConfigInternal { pub l1_batch_commit_data_generator_mode: L1BatchCommitmentMode, pub base_token: BaseToken, pub wallet_creation: WalletCreation, + #[serde(skip_serializing_if = "Option::is_none")] + pub legacy_bridge: Option, } /// Chain configuration file. This file is created in the chain @@ -58,6 +60,7 @@ pub struct ChainConfig { pub base_token: BaseToken, pub wallet_creation: WalletCreation, pub shell: OnceCell, + pub legacy_bridge: Option, } impl Serialize for ChainConfig { @@ -153,6 +156,7 @@ impl ChainConfig { l1_batch_commit_data_generator_mode: self.l1_batch_commit_data_generator_mode, base_token: self.base_token.clone(), wallet_creation: self.wallet_creation, + legacy_bridge: self.legacy_bridge, } } } diff --git a/zk_toolbox/crates/config/src/ecosystem.rs b/zk_toolbox/crates/config/src/ecosystem.rs index a0412fbc4733..7ff65d4612df 100644 --- a/zk_toolbox/crates/config/src/ecosystem.rs +++ b/zk_toolbox/crates/config/src/ecosystem.rs @@ -173,6 +173,7 @@ impl EcosystemConfig { artifacts: config .artifacts_path .unwrap_or_else(|| self.get_chain_artifacts_path(name)), + legacy_bridge: config.legacy_bridge, }) } diff --git a/zk_toolbox/crates/config/src/forge_interface/mod.rs b/zk_toolbox/crates/config/src/forge_interface/mod.rs index ea3d49c67ecb..c7033c45ed22 100644 --- a/zk_toolbox/crates/config/src/forge_interface/mod.rs +++ b/zk_toolbox/crates/config/src/forge_interface/mod.rs @@ -4,3 +4,4 @@ pub mod deploy_l2_contracts; pub mod paymaster; pub mod register_chain; pub mod script_params; +pub mod setup_legacy_bridge; diff --git a/zk_toolbox/crates/config/src/forge_interface/script_params.rs b/zk_toolbox/crates/config/src/forge_interface/script_params.rs index fb16aa97e6a8..e7e21ad132b8 100644 --- a/zk_toolbox/crates/config/src/forge_interface/script_params.rs +++ b/zk_toolbox/crates/config/src/forge_interface/script_params.rs @@ -61,3 +61,9 @@ pub const ACCEPT_GOVERNANCE_SCRIPT_PARAMS: ForgeScriptParams = ForgeScriptParams output: "script-out/output-accept-admin.toml", script_path: "deploy-scripts/AcceptAdmin.s.sol", }; + +pub const SETUP_LEGACY_BRIDGE: ForgeScriptParams = ForgeScriptParams { + input: "script-config/setup-legacy-bridge.toml", + output: "script-out/setup-legacy-bridge.toml", + script_path: "deploy-scripts/dev/SetupLegacyBridge.s.sol", +}; diff --git a/zk_toolbox/crates/config/src/forge_interface/setup_legacy_bridge/mod.rs b/zk_toolbox/crates/config/src/forge_interface/setup_legacy_bridge/mod.rs new file mode 100644 index 000000000000..e8189c521fb3 --- /dev/null +++ b/zk_toolbox/crates/config/src/forge_interface/setup_legacy_bridge/mod.rs @@ -0,0 +1,20 @@ +use serde::{Deserialize, Serialize}; +use zksync_basic_types::{Address, L2ChainId, H256}; + +use crate::traits::ZkToolboxConfig; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct SetupLegacyBridgeInput { + pub bridgehub: Address, + pub diamond_proxy: Address, + pub shared_bridge_proxy: Address, + pub transparent_proxy_admin: Address, + pub erc20bridge_proxy: Address, + pub token_weth_address: Address, + pub chain_id: L2ChainId, + pub l2shared_bridge_address: Address, + pub create2factory_salt: H256, + pub create2factory_addr: Address, +} + +impl ZkToolboxConfig for SetupLegacyBridgeInput {} diff --git a/zk_toolbox/crates/zk_inception/src/commands/args/run_server.rs b/zk_toolbox/crates/zk_inception/src/commands/args/run_server.rs index 1e373319ec73..ebe407d4822d 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/args/run_server.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/args/run_server.rs @@ -3,7 +3,7 @@ use serde::{Deserialize, Serialize}; use crate::messages::{ MSG_SERVER_ADDITIONAL_ARGS_HELP, MSG_SERVER_BUILD_HELP, MSG_SERVER_COMPONENTS_HELP, - MSG_SERVER_GENESIS_HELP, + MSG_SERVER_GENESIS_HELP, MSG_SERVER_URING_HELP, }; #[derive(Debug, Serialize, Deserialize, Parser)] @@ -17,4 +17,6 @@ pub struct RunServerArgs { additional_args: Vec, #[clap(long, help = MSG_SERVER_BUILD_HELP)] pub build: bool, + #[clap(help=MSG_SERVER_URING_HELP, long, default_missing_value = "true", num_args = 0..=1)] + pub uring: bool, } diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/args/create.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/args/create.rs index 65f809287890..3ea15d10f8be 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/args/create.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/args/create.rs @@ -65,6 +65,8 @@ pub struct ChainCreateArgs { base_token_price_denominator: Option, #[clap(long, help = MSG_SET_AS_DEFAULT_HELP, default_missing_value = "true", num_args = 0..=1)] pub(crate) set_as_default: Option, + #[clap(long, default_value = "false")] + pub(crate) legacy_bridge: bool, } impl ChainCreateArgs { @@ -224,6 +226,7 @@ impl ChainCreateArgs { wallet_path, base_token, set_as_default, + legacy_bridge: self.legacy_bridge, }) } } @@ -238,6 +241,7 @@ pub struct ChainCreateArgsFinal { pub wallet_path: Option, pub base_token: BaseToken, pub set_as_default: bool, + pub legacy_bridge: bool, } #[derive(Debug, Clone, EnumIter, Display, PartialEq, Eq)] diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/create.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/create.rs index abdea482db4c..48a320ec27e0 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/create.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/create.rs @@ -59,15 +59,24 @@ pub(crate) fn create_chain_inner( ecosystem_config: &EcosystemConfig, shell: &Shell, ) -> anyhow::Result<()> { + if args.legacy_bridge { + logger::warn("WARNING!!! You are creating a chain with legacy bridge, use it only for testing compatibility") + } let default_chain_name = args.chain_name.clone(); let chain_path = ecosystem_config.chains.join(&default_chain_name); let chain_configs_path = create_local_configs_dir(shell, &chain_path)?; - let chain_id = ecosystem_config.list_of_chains().len() as u32; + let (chain_id, legacy_bridge) = if args.legacy_bridge { + // Legacy bridge is distinguished by using the same chain id as ecosystem + (ecosystem_config.era_chain_id, Some(true)) + } else { + (L2ChainId::from(args.chain_id), None) + }; + let internal_id = ecosystem_config.list_of_chains().len() as u32; let chain_config = ChainConfig { - id: chain_id, + id: internal_id, name: default_chain_name.clone(), - chain_id: L2ChainId::from(args.chain_id), + chain_id, prover_version: args.prover_version, l1_network: ecosystem_config.l1_network, link_to_code: ecosystem_config.link_to_code.clone(), @@ -79,13 +88,14 @@ pub(crate) fn create_chain_inner( base_token: args.base_token, wallet_creation: args.wallet_creation, shell: OnceCell::from(shell.clone()), + legacy_bridge, }; create_wallets( shell, &chain_config.configs, &ecosystem_config.link_to_code, - chain_id, + internal_id, args.wallet_creation, args.wallet_path, )?; diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_l2_contracts.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_l2_contracts.rs index 3625abfb15a9..7545ec2ec26f 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_l2_contracts.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_l2_contracts.rs @@ -125,12 +125,17 @@ pub async fn initialize_bridges( contracts_config: &mut ContractsConfig, forge_args: ForgeScriptArgs, ) -> anyhow::Result<()> { + let signature = if let Some(true) = chain_config.legacy_bridge { + Some("runDeployLegacySharedBridge") + } else { + Some("runDeploySharedBridge") + }; build_and_deploy( shell, chain_config, ecosystem_config, forge_args, - Some("runDeploySharedBridge"), + signature, |shell, out| { contracts_config.set_l2_shared_bridge(&InitializeBridgeOutput::read(shell, out)?) }, @@ -185,12 +190,17 @@ pub async fn deploy_l2_contracts( contracts_config: &mut ContractsConfig, forge_args: ForgeScriptArgs, ) -> anyhow::Result<()> { + let signature = if let Some(true) = chain_config.legacy_bridge { + Some("runWithLegacyBridge") + } else { + None + }; build_and_deploy( shell, chain_config, ecosystem_config, forge_args, - None, + signature, |shell, out| { contracts_config.set_l2_shared_bridge(&InitializeBridgeOutput::read(shell, out)?)?; contracts_config.set_default_l2_upgrade(&DefaultL2UpgradeOutput::read(shell, out)?)?; diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/genesis.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/genesis.rs index 0eb40d630ae9..bfa3f94916b8 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/genesis.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/genesis.rs @@ -168,7 +168,7 @@ async fn initialize_databases( } fn run_server_genesis(chain_config: &ChainConfig, shell: &Shell) -> anyhow::Result<()> { - let server = Server::new(None, chain_config.link_to_code.clone()); + let server = Server::new(None, chain_config.link_to_code.clone(), false); server .run( shell, diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/init.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/init.rs index a5f57981d583..2b7dbf73f681 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/init.rs @@ -27,6 +27,7 @@ use crate::{ deploy_l2_contracts, deploy_paymaster, genesis::genesis, set_token_multiplier_setter::set_token_multiplier_setter, + setup_legacy_bridge::setup_legacy_bridge, }, portal::update_portal_config, }, @@ -142,6 +143,17 @@ pub async fn init( .await?; contracts_config.save_with_base_path(shell, &chain_config.configs)?; + if let Some(true) = chain_config.legacy_bridge { + setup_legacy_bridge( + shell, + chain_config, + ecosystem_config, + &contracts_config, + init_args.forge_args.clone(), + ) + .await?; + } + if init_args.deploy_paymaster { deploy_paymaster::deploy_paymaster( shell, diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs index afc92d2288bf..61a164c16553 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs @@ -16,6 +16,7 @@ pub mod deploy_paymaster; pub mod genesis; pub(crate) mod init; mod set_token_multiplier_setter; +mod setup_legacy_bridge; #[derive(Subcommand, Debug)] pub enum ChainCommands { diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/setup_legacy_bridge.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/setup_legacy_bridge.rs new file mode 100644 index 000000000000..925014fe4e61 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/setup_legacy_bridge.rs @@ -0,0 +1,73 @@ +use anyhow::Context; +use common::{ + forge::{Forge, ForgeScriptArgs}, + spinner::Spinner, +}; +use config::{ + forge_interface::{ + script_params::SETUP_LEGACY_BRIDGE, setup_legacy_bridge::SetupLegacyBridgeInput, + }, + traits::SaveConfig, + ChainConfig, ContractsConfig, EcosystemConfig, +}; +use xshell::Shell; + +use crate::{ + messages::{MSG_DEPLOYING_PAYMASTER, MSG_L1_SECRETS_MUST_BE_PRESENTED}, + utils::forge::{check_the_balance, fill_forge_private_key}, +}; + +pub async fn setup_legacy_bridge( + shell: &Shell, + chain_config: &ChainConfig, + ecosystem_config: &EcosystemConfig, + contracts_config: &ContractsConfig, + forge_args: ForgeScriptArgs, +) -> anyhow::Result<()> { + let input = SetupLegacyBridgeInput { + bridgehub: contracts_config.ecosystem_contracts.bridgehub_proxy_addr, + diamond_proxy: contracts_config.l1.diamond_proxy_addr, + shared_bridge_proxy: contracts_config.bridges.shared.l1_address, + transparent_proxy_admin: contracts_config + .ecosystem_contracts + .transparent_proxy_admin_addr, + erc20bridge_proxy: contracts_config.bridges.erc20.l1_address, + token_weth_address: Default::default(), + chain_id: chain_config.chain_id, + l2shared_bridge_address: contracts_config + .bridges + .shared + .l2_address + .expect("Not fully initialized"), + create2factory_salt: contracts_config.create2_factory_salt, + create2factory_addr: contracts_config.create2_factory_addr, + }; + let foundry_contracts_path = chain_config.path_to_foundry(); + input.save(shell, SETUP_LEGACY_BRIDGE.input(&chain_config.link_to_code))?; + let secrets = chain_config.get_secrets_config()?; + + let mut forge = Forge::new(&foundry_contracts_path) + .script(&SETUP_LEGACY_BRIDGE.script(), forge_args.clone()) + .with_ffi() + .with_rpc_url( + secrets + .l1 + .context(MSG_L1_SECRETS_MUST_BE_PRESENTED)? + .l1_rpc_url + .expose_str() + .to_string(), + ) + .with_broadcast(); + + forge = fill_forge_private_key( + forge, + ecosystem_config.get_wallets()?.governor_private_key(), + )?; + + let spinner = Spinner::new(MSG_DEPLOYING_PAYMASTER); + check_the_balance(&forge).await?; + forge.run(shell)?; + spinner.finish(); + + Ok(()) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/server.rs b/zk_toolbox/crates/zk_inception/src/commands/server.rs index b5a09ed04370..f96bc3aeebc9 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/server.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/server.rs @@ -35,7 +35,11 @@ fn run_server( chain_config: &ChainConfig, shell: &Shell, ) -> anyhow::Result<()> { - let server = Server::new(args.components.clone(), chain_config.link_to_code.clone()); + let server = Server::new( + args.components.clone(), + chain_config.link_to_code.clone(), + args.uring, + ); if args.build { server.build(shell)?; diff --git a/zk_toolbox/crates/zk_inception/src/messages.rs b/zk_toolbox/crates/zk_inception/src/messages.rs index 827aa03d7ba8..6fa1e293927b 100644 --- a/zk_toolbox/crates/zk_inception/src/messages.rs +++ b/zk_toolbox/crates/zk_inception/src/messages.rs @@ -224,6 +224,7 @@ pub(super) const MSG_SERVER_GENESIS_HELP: &str = "Run server in genesis mode"; pub(super) const MSG_SERVER_ADDITIONAL_ARGS_HELP: &str = "Additional arguments that can be passed through the CLI"; pub(super) const MSG_SERVER_BUILD_HELP: &str = "Build server but don't run it"; +pub(super) const MSG_SERVER_URING_HELP: &str = "Enables uring support for RocksDB"; /// Accept ownership related messages pub(super) const MSG_ACCEPTING_GOVERNANCE_SPINNER: &str = "Accepting governance..."; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/loadtest.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/loadtest.rs new file mode 100644 index 000000000000..5a2a87871b58 --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/loadtest.rs @@ -0,0 +1,52 @@ +use anyhow::Context; +use common::{cmd::Cmd, config::global_config, logger}; +use config::EcosystemConfig; +use xshell::{cmd, Shell}; + +use crate::messages::MSG_CHAIN_NOT_FOUND_ERR; + +pub fn run(shell: &Shell) -> anyhow::Result<()> { + let ecosystem_config = EcosystemConfig::from_file(shell)?; + + let chain_config = ecosystem_config + .load_chain(global_config().chain_name.clone()) + .context(MSG_CHAIN_NOT_FOUND_ERR)?; + + let general_api = chain_config + .get_general_config()? + .api_config + .context("API config is not found")?; + + let mut command = cmd!(shell, "cargo run --release --bin loadnext") + .env( + "L2_CHAIN_ID", + chain_config + .get_genesis_config()? + .l2_chain_id + .as_u64() + .to_string(), + ) + .env( + "MAIN_TOKEN", + format!( + "{:?}", + ecosystem_config + .get_erc20_tokens() + .first() + .context("NO Erc20 tokens were deployed")? + .address + ), + ) + .env("L2_RPC_ADDRESS", general_api.web3_json_rpc.http_url) + .env("L2_WS_RPC_ADDRESS", general_api.web3_json_rpc.ws_url); + + if global_config().verbose { + command = command.env("RUST_LOG", "loadnext=info") + } + + Cmd::new(command).with_force_run().run()?; + + logger::outro("Loadtest success"); + + Ok(()) +} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/mod.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/mod.rs index a536302afc15..712e2f75eefd 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/mod.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/mod.rs @@ -6,15 +6,16 @@ use clap::Subcommand; use xshell::Shell; use crate::messages::{ - MSG_BUILD_ABOUT, MSG_INTEGRATION_TESTS_ABOUT, MSG_L1_CONTRACTS_ABOUT, MSG_PROVER_TEST_ABOUT, - MSG_RECOVERY_TEST_ABOUT, MSG_REVERT_TEST_ABOUT, MSG_RUST_TEST_ABOUT, MSG_TEST_WALLETS_INFO, - MSG_UPGRADE_TEST_ABOUT, + MSG_BUILD_ABOUT, MSG_INTEGRATION_TESTS_ABOUT, MSG_L1_CONTRACTS_ABOUT, MSG_LOADTEST_ABOUT, + MSG_PROVER_TEST_ABOUT, MSG_RECOVERY_TEST_ABOUT, MSG_REVERT_TEST_ABOUT, MSG_RUST_TEST_ABOUT, + MSG_TEST_WALLETS_INFO, MSG_UPGRADE_TEST_ABOUT, }; mod args; mod build; mod integration; mod l1_contracts; +mod loadtest; mod prover; mod recovery; mod revert; @@ -43,6 +44,8 @@ pub enum TestCommands { Prover, #[clap(about = MSG_TEST_WALLETS_INFO)] Wallet, + #[clap(about = MSG_LOADTEST_ABOUT)] + Loadtest, } pub async fn run(shell: &Shell, args: TestCommands) -> anyhow::Result<()> { @@ -56,5 +59,6 @@ pub async fn run(shell: &Shell, args: TestCommands) -> anyhow::Result<()> { TestCommands::L1Contracts => l1_contracts::run(shell), TestCommands::Prover => prover::run(shell), TestCommands::Wallet => wallet::run(shell), + TestCommands::Loadtest => loadtest::run(shell), } } diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/recovery.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/recovery.rs index 030d28966031..c69a9826d56c 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/recovery.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/recovery.rs @@ -21,7 +21,7 @@ pub async fn run(shell: &Shell, args: RecoveryArgs) -> anyhow::Result<()> { shell.change_dir(ecosystem_config.link_to_code.join(RECOVERY_TESTS_PATH)); logger::info(MSG_RECOVERY_TEST_RUN_INFO); - Server::new(None, ecosystem_config.link_to_code.clone()).build(shell)?; + Server::new(None, ecosystem_config.link_to_code.clone(), false).build(shell)?; if !args.no_deps { install_and_build_dependencies(shell, &ecosystem_config)?; diff --git a/zk_toolbox/crates/zk_supervisor/src/messages.rs b/zk_toolbox/crates/zk_supervisor/src/messages.rs index 89cf8c1d9b60..311a6e11c326 100644 --- a/zk_toolbox/crates/zk_supervisor/src/messages.rs +++ b/zk_toolbox/crates/zk_supervisor/src/messages.rs @@ -200,3 +200,5 @@ pub(super) const MSG_RUNNING_CONTRACTS_FMT_SPINNER: &str = "Running prettier for pub(super) const MSG_TEST_WALLETS_INFO: &str = "Print test wallets information"; pub(super) const MSG_DESERIALIZE_TEST_WALLETS_ERR: &str = "Impossible to deserialize test wallets"; pub(super) const MSG_WALLETS_TEST_SUCCESS: &str = "Wallets test success"; + +pub(super) const MSG_LOADTEST_ABOUT: &str = "Run loadtest"; From 4a10d7d9554d6c1aa2f4fc46557d40baaad8ff2f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mat=C3=ADas=20Ignacio=20Gonz=C3=A1lez?= Date: Thu, 12 Sep 2024 07:39:58 -0300 Subject: [PATCH 061/116] feat(zk_toolbox): Add external_node consensus support (#2821) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Add external_node consensus support --------- Signed-off-by: Danil Co-authored-by: Danil --- .github/workflows/ci-zk-toolbox-reusable.yml | 53 ++++ core/bin/external_node/src/config/mod.rs | 8 +- core/bin/external_node/src/node_builder.rs | 9 +- core/lib/config/src/configs/consensus.rs | 3 +- zk_toolbox/Cargo.lock | 290 +++++++++++++++--- zk_toolbox/Cargo.toml | 3 + zk_toolbox/crates/common/src/external_node.rs | 2 + .../crates/config/src/consensus_config.rs | 18 ++ .../crates/config/src/consensus_secrets.rs | 14 + zk_toolbox/crates/config/src/consts.rs | 4 + zk_toolbox/crates/config/src/general.rs | 33 +- zk_toolbox/crates/config/src/lib.rs | 2 + zk_toolbox/crates/zk_inception/Cargo.toml | 3 + .../zk_inception/src/commands/chain/init.rs | 14 +- .../src/commands/external_node/args/run.rs | 6 +- .../commands/external_node/prepare_configs.rs | 60 +++- .../src/commands/external_node/run.rs | 3 +- zk_toolbox/crates/zk_inception/src/consts.rs | 23 ++ .../crates/zk_inception/src/defaults.rs | 1 + .../crates/zk_inception/src/external_node.rs | 18 +- .../crates/zk_inception/src/messages.rs | 6 + .../zk_inception/src/utils/consensus.rs | 124 ++++++++ .../crates/zk_inception/src/utils/mod.rs | 1 + .../crates/zk_inception/src/utils/rocks_db.rs | 8 +- 24 files changed, 640 insertions(+), 66 deletions(-) create mode 100644 zk_toolbox/crates/config/src/consensus_config.rs create mode 100644 zk_toolbox/crates/config/src/consensus_secrets.rs create mode 100644 zk_toolbox/crates/zk_inception/src/utils/consensus.rs diff --git a/.github/workflows/ci-zk-toolbox-reusable.yml b/.github/workflows/ci-zk-toolbox-reusable.yml index 78e1e485cafc..638f168de309 100644 --- a/.github/workflows/ci-zk-toolbox-reusable.yml +++ b/.github/workflows/ci-zk-toolbox-reusable.yml @@ -132,6 +132,30 @@ jobs: --prover-db-name=zksync_prover_localhost_custom_token \ --port-offset 3000 \ --chain custom_token + + - name: Create and initialize Consensus chain + run: | + ci_run zk_inception chain create \ + --chain-name consensus \ + --chain-id sequential \ + --prover-mode no-proofs \ + --wallet-creation localhost \ + --l1-batch-commit-data-generator-mode validium \ + --base-token-address ${{ env.address }} \ + --base-token-price-nominator 3 \ + --base-token-price-denominator 2 \ + --set-as-default false \ + --ignore-prerequisites + + ci_run zk_inception chain init \ + --deploy-paymaster \ + --l1-rpc-url=http://localhost:8545 \ + --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ + --server-db-name=zksync_server_localhost_consensus \ + --prover-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ + --prover-db-name=zksync_prover_localhost_consensus \ + --port-offset 4000 \ + --chain consensus - name: Build test dependencies run: | @@ -142,6 +166,10 @@ jobs: ci_run zk_inception server --ignore-prerequisites --chain era &> ${{ env.SERVER_LOGS_DIR }}/rollup.log & ci_run zk_inception server --ignore-prerequisites --chain validium &> ${{ env.SERVER_LOGS_DIR }}/validium.log & ci_run zk_inception server --ignore-prerequisites --chain custom_token &> ${{ env.SERVER_LOGS_DIR }}/custom_token.log & + ci_run zk_inception server --ignore-prerequisites --chain consensus \ + --components=api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads,vm_runner_bwip,vm_playground,da_dispatcher,consensus \ + &> ${{ env.SERVER_LOGS_DIR }}/consensus.log & + ci_run sleep 5 - name: Run integration tests @@ -155,9 +183,13 @@ jobs: ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --chain custom_token &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/custom_token.log & PID3=$! + ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --chain consensus &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/consensus.log & + PID4=$! + wait $PID1 wait $PID2 wait $PID3 + wait $PID4 - name: Init external nodes run: | @@ -173,6 +205,10 @@ jobs: --db-name=zksync_en_localhost_era_custom_token --l1-rpc-url=http://localhost:8545 --chain custom_token ci_run zk_inception external-node init --ignore-prerequisites --chain custom_token + ci_run zk_inception external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \ + --db-name=zksync_en_localhost_era_consensus --l1-rpc-url=http://localhost:8545 --chain consensus + ci_run zk_inception external-node init --ignore-prerequisites --chain consensus + - name: Run recovery tests (from snapshot) run: | @@ -185,9 +221,13 @@ jobs: ci_run zk_supervisor test recovery --snapshot --no-deps --ignore-prerequisites --verbose --chain custom_token &> ${{ env.SNAPSHOT_RECOVERY_LOGS_DIR }}//custom_token.log & PID3=$! + ci_run zk_supervisor test recovery --snapshot --no-deps --ignore-prerequisites --verbose --chain consensus &> ${{ env.SNAPSHOT_RECOVERY_LOGS_DIR }}//consensus.log & + PID4=$! + wait $PID1 wait $PID2 wait $PID3 + wait $PID4 - name: Run recovery tests (from genesis) run: | @@ -200,15 +240,20 @@ jobs: ci_run zk_supervisor test recovery --no-deps --no-kill --ignore-prerequisites --verbose --chain custom_token &> ${{ env.GENESIS_RECOVERY_LOGS_DIR }}/custom_token.log & PID3=$! + ci_run zk_supervisor test recovery --no-deps --no-kill --ignore-prerequisites --verbose --chain consensus &> ${{ env.GENESIS_RECOVERY_LOGS_DIR }}/consensus.log & + PID4=$! + wait $PID1 wait $PID2 wait $PID3 + wait $PID4 - name: Run external node server run: | ci_run zk_inception external-node run --ignore-prerequisites --chain era &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/rollup.log & ci_run zk_inception external-node run --ignore-prerequisites --chain validium &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/validium.log & ci_run zk_inception external-node run --ignore-prerequisites --chain custom_token &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/custom_token.log & + ci_run zk_inception external-node run --ignore-prerequisites --chain consensus --enable-consensus &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/consensus.log & ci_run sleep 5 - name: Run integration tests en @@ -222,9 +267,13 @@ jobs: ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --external-node --chain custom_token &> ${{ env.INTEGRATION_TESTS_EN_LOGS_DIR }}/custom_token.log & PID3=$! + ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --external-node --chain consensus &> ${{ env.INTEGRATION_TESTS_EN_LOGS_DIR }}/consensus.log & + PID4=$! + wait $PID1 wait $PID2 wait $PID3 + wait $PID4 - name: Run revert tests run: | @@ -239,10 +288,14 @@ jobs: ci_run zk_supervisor test revert --no-deps --external-node --no-kill --ignore-prerequisites --chain custom_token &> ${{ env.REVERT_LOGS_DIR }}/custom_token.log & PID3=$! + + ci_run zk_supervisor test revert --no-deps --external-node --no-kill --ignore-prerequisites --chain consensus &> ${{ env.REVERT_LOGS_DIR }}/consensus.log & + PID4=$! wait $PID1 wait $PID2 wait $PID3 + wait $PID4 # Upgrade tests should run last, because as soon as they diff --git a/core/bin/external_node/src/config/mod.rs b/core/bin/external_node/src/config/mod.rs index cd4e845b8f3e..f8241deae26c 100644 --- a/core/bin/external_node/src/config/mod.rs +++ b/core/bin/external_node/src/config/mod.rs @@ -1217,6 +1217,7 @@ pub(crate) struct ExternalNodeConfig { pub observability: ObservabilityENConfig, pub experimental: ExperimentalENConfig, pub consensus: Option, + pub consensus_secrets: Option, pub api_component: ApiComponentConfig, pub tree_component: TreeComponentConfig, pub remote: R, @@ -1240,6 +1241,8 @@ impl ExternalNodeConfig<()> { tree_component: envy::prefixed("EN_TREE_") .from_env::() .context("could not load external node config (tree component params)")?, + consensus_secrets: read_consensus_secrets() + .context("config::read_consensus_secrets()")?, remote: (), }) } @@ -1262,7 +1265,7 @@ impl ExternalNodeConfig<()> { .map(read_yaml_repr::) .transpose() .context("failed decoding consensus YAML config")?; - + let consensus_secrets = secrets_config.consensus.clone(); let required = RequiredENConfig::from_configs( &general_config, &external_node_config, @@ -1298,6 +1301,7 @@ impl ExternalNodeConfig<()> { consensus, api_component, tree_component, + consensus_secrets, remote: (), }) } @@ -1332,6 +1336,7 @@ impl ExternalNodeConfig<()> { consensus: self.consensus, tree_component: self.tree_component, api_component: self.api_component, + consensus_secrets: self.consensus_secrets, remote, }) } @@ -1348,6 +1353,7 @@ impl ExternalNodeConfig { observability: ObservabilityENConfig::default(), experimental: ExperimentalENConfig::mock(), consensus: None, + consensus_secrets: None, api_component: ApiComponentConfig { tree_api_remote_url: None, }, diff --git a/core/bin/external_node/src/node_builder.rs b/core/bin/external_node/src/node_builder.rs index 7b94ca7a0c2a..98e286c253a2 100644 --- a/core/bin/external_node/src/node_builder.rs +++ b/core/bin/external_node/src/node_builder.rs @@ -56,11 +56,7 @@ use zksync_node_framework::{ }; use zksync_state::RocksdbStorageOptions; -use crate::{ - config::{self, ExternalNodeConfig}, - metrics::framework::ExternalNodeMetricsLayer, - Component, -}; +use crate::{config::ExternalNodeConfig, metrics::framework::ExternalNodeMetricsLayer, Component}; /// Builder for the external node. #[derive(Debug)] @@ -240,8 +236,7 @@ impl ExternalNodeBuilder { fn add_consensus_layer(mut self) -> anyhow::Result { let config = self.config.consensus.clone(); - let secrets = - config::read_consensus_secrets().context("config::read_consensus_secrets()")?; + let secrets = self.config.consensus_secrets.clone(); let layer = ExternalNodeConsensusLayer { build_version: crate::metadata::SERVER_VERSION .parse() diff --git a/core/lib/config/src/configs/consensus.rs b/core/lib/config/src/configs/consensus.rs index e5e01f880feb..759e13128338 100644 --- a/core/lib/config/src/configs/consensus.rs +++ b/core/lib/config/src/configs/consensus.rs @@ -1,6 +1,7 @@ use std::collections::{BTreeMap, BTreeSet}; -use secrecy::{ExposeSecret as _, Secret}; +use secrecy::ExposeSecret as _; +pub use secrecy::Secret; use zksync_basic_types::{ethabi, L2ChainId}; use zksync_concurrency::{limiter, time}; diff --git a/zk_toolbox/Cargo.lock b/zk_toolbox/Cargo.lock index 7c53e2747daf..eb16477382c2 100644 --- a/zk_toolbox/Cargo.lock +++ b/zk_toolbox/Cargo.lock @@ -409,6 +409,18 @@ dependencies = [ "generic-array", ] +[[package]] +name = "blst" +version = "0.3.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4378725facc195f1a538864863f6de233b500a8862747e7f165078a419d5e874" +dependencies = [ + "cc", + "glob", + "threadpool", + "zeroize", +] + [[package]] name = "bs58" version = "0.5.1" @@ -631,7 +643,7 @@ dependencies = [ "hmac", "once_cell", "pbkdf2 0.12.2", - "rand", + "rand 0.8.5", "sha2", "thiserror", ] @@ -709,7 +721,7 @@ dependencies = [ "clap", "common", "ethers", - "rand", + "rand 0.8.5", "serde", "serde_json", "strum", @@ -857,7 +869,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef" dependencies = [ "generic-array", - "rand_core", + "rand_core 0.6.4", "subtle", "zeroize", ] @@ -869,7 +881,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" dependencies = [ "generic-array", - "rand_core", + "rand_core 0.6.4", "subtle", "zeroize", ] @@ -903,6 +915,33 @@ dependencies = [ "cipher", ] +[[package]] +name = "curve25519-dalek" +version = "4.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" +dependencies = [ + "cfg-if", + "cpufeatures", + "curve25519-dalek-derive", + "digest", + "fiat-crypto", + "rustc_version", + "subtle", + "zeroize", +] + +[[package]] +name = "curve25519-dalek-derive" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.68", +] + [[package]] name = "darling" version = "0.13.4" @@ -1120,6 +1159,31 @@ dependencies = [ "spki 0.7.3", ] +[[package]] +name = "ed25519" +version = "2.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" +dependencies = [ + "pkcs8 0.10.2", + "signature 2.2.0", +] + +[[package]] +name = "ed25519-dalek" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a3daa8e81a3963a60642bcc1f90a670680bd4a77535faa384e9d1c79d620871" +dependencies = [ + "curve25519-dalek", + "ed25519", + "rand_core 0.6.4", + "serde", + "sha2", + "subtle", + "zeroize", +] + [[package]] name = "either" version = "1.13.0" @@ -1143,7 +1207,7 @@ dependencies = [ "generic-array", "group 0.12.1", "pkcs8 0.9.0", - "rand_core", + "rand_core 0.6.4", "sec1 0.3.0", "subtle", "zeroize", @@ -1162,7 +1226,7 @@ dependencies = [ "generic-array", "group 0.13.0", "pkcs8 0.10.2", - "rand_core", + "rand_core 0.6.4", "sec1 0.7.3", "subtle", "zeroize", @@ -1212,7 +1276,7 @@ dependencies = [ "hex", "k256 0.13.3", "log", - "rand", + "rand 0.8.5", "rlp", "serde", "sha3", @@ -1267,7 +1331,7 @@ dependencies = [ "hex", "hmac", "pbkdf2 0.11.0", - "rand", + "rand 0.8.5", "scrypt", "serde", "serde_json", @@ -1430,7 +1494,7 @@ dependencies = [ "num_enum 0.7.2", "once_cell", "open-fastrlp", - "rand", + "rand 0.8.5", "rlp", "serde", "serde_json", @@ -1535,7 +1599,7 @@ dependencies = [ "elliptic-curve 0.13.8", "eth-keystore", "ethers-core", - "rand", + "rand 0.8.5", "sha2", "thiserror", "tracing", @@ -1606,7 +1670,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" dependencies = [ - "rand_core", + "rand_core 0.6.4", "subtle", ] @@ -1616,10 +1680,44 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" dependencies = [ - "rand_core", + "rand_core 0.6.4", "subtle", ] +[[package]] +name = "ff_ce" +version = "0.14.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b538e4231443a5b9c507caee3356f016d832cf7393d2d90f03ea3180d4e3fbc" +dependencies = [ + "byteorder", + "ff_derive_ce", + "hex", + "rand 0.4.6", + "serde", +] + +[[package]] +name = "ff_derive_ce" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b96fbccd88dbb1fac4ee4a07c2fcc4ca719a74ffbd9d2b9d41d8c8eb073d8b20" +dependencies = [ + "num-bigint", + "num-integer", + "num-traits", + "proc-macro2", + "quote", + "serde", + "syn 1.0.109", +] + +[[package]] +name = "fiat-crypto" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" + [[package]] name = "findshlibs" version = "0.10.2" @@ -1639,7 +1737,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" dependencies = [ "byteorder", - "rand", + "rand 0.8.5", "rustc-hex", "static_assertions", ] @@ -1711,6 +1809,12 @@ dependencies = [ "winapi", ] +[[package]] +name = "fuchsia-cprng" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" + [[package]] name = "funty" version = "2.0.0" @@ -1899,7 +2003,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" dependencies = [ "ff 0.12.1", - "rand_core", + "rand_core 0.6.4", "subtle", ] @@ -1910,7 +2014,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ "ff 0.13.0", - "rand_core", + "rand_core 0.6.4", "subtle", ] @@ -2740,7 +2844,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3ffa00dec017b5b1a8b7cf5e2c008bfda1aa7e0697ac1508b491fdf2622fb4d8" dependencies = [ - "rand", + "rand 0.8.5", ] [[package]] @@ -2823,7 +2927,7 @@ dependencies = [ "num-integer", "num-iter", "num-traits", - "rand", + "rand 0.8.5", "smallvec", "zeroize", ] @@ -3119,7 +3223,7 @@ dependencies = [ "once_cell", "opentelemetry", "percent-encoding", - "rand", + "rand 0.8.5", "serde_json", "thiserror", "tokio", @@ -3158,6 +3262,19 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" +[[package]] +name = "pairing_ce" +version = "0.28.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "843b5b6fb63f00460f611dbc87a50bbbb745f0dfe5cbf67ca89299c79098640e" +dependencies = [ + "byteorder", + "cfg-if", + "ff_ce", + "rand 0.4.6", + "serde", +] + [[package]] name = "parity-scale-codec" version = "3.6.12" @@ -3220,7 +3337,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7676374caaee8a325c9e7a2ae557f216c5563a171d6997b0ef8a65af35147700" dependencies = [ "base64ct", - "rand_core", + "rand_core 0.6.4", "subtle", ] @@ -3319,7 +3436,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48e4cc64c2ad9ebe670cb8fd69dd50ae301650392e81c05f9bfcb2d5bdbc24b0" dependencies = [ "phf_shared 0.11.2", - "rand", + "rand 0.8.5", ] [[package]] @@ -3530,7 +3647,7 @@ dependencies = [ "bitflags 2.6.0", "lazy_static", "num-traits", - "rand", + "rand 0.8.5", "rand_chacha", "rand_xorshift", "regex-syntax 0.8.4", @@ -3680,6 +3797,19 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" +[[package]] +name = "rand" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "552840b97013b1a26992c11eac34bdd778e464601a4c2054b5f0bff7c6761293" +dependencies = [ + "fuchsia-cprng", + "libc", + "rand_core 0.3.1", + "rdrand", + "winapi", +] + [[package]] name = "rand" version = "0.8.5" @@ -3688,7 +3818,7 @@ checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", "rand_chacha", - "rand_core", + "rand_core 0.6.4", ] [[package]] @@ -3698,9 +3828,24 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core", + "rand_core 0.6.4", +] + +[[package]] +name = "rand_core" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" +dependencies = [ + "rand_core 0.4.2", ] +[[package]] +name = "rand_core" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" + [[package]] name = "rand_core" version = "0.6.4" @@ -3716,7 +3861,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" dependencies = [ - "rand_core", + "rand_core 0.6.4", ] [[package]] @@ -3739,6 +3884,15 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "rdrand" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" +dependencies = [ + "rand_core 0.3.1", +] + [[package]] name = "redox_syscall" version = "0.4.1" @@ -3995,7 +4149,7 @@ dependencies = [ "num-traits", "pkcs1", "pkcs8 0.10.2", - "rand_core", + "rand_core 0.6.4", "signature 2.2.0", "spki 0.7.3", "subtle", @@ -4349,7 +4503,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "901f761681f97db3db836ef9e094acdd8756c40215326c194201941947164ef1" dependencies = [ "once_cell", - "rand", + "rand 0.8.5", "sentry-types", "serde", "serde_json", @@ -4396,7 +4550,7 @@ checksum = "da956cca56e0101998c8688bc65ce1a96f00673a0e58e663664023d4c7911e82" dependencies = [ "debugid", "hex", - "rand", + "rand 0.8.5", "serde", "serde_json", "thiserror", @@ -4586,7 +4740,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" dependencies = [ "digest", - "rand_core", + "rand_core 0.6.4", ] [[package]] @@ -4596,7 +4750,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" dependencies = [ "digest", - "rand_core", + "rand_core 0.6.4", ] [[package]] @@ -4840,7 +4994,7 @@ dependencies = [ "memchr", "once_cell", "percent-encoding", - "rand", + "rand 0.8.5", "rsa", "serde", "sha1", @@ -4879,7 +5033,7 @@ dependencies = [ "md-5", "memchr", "once_cell", - "rand", + "rand 0.8.5", "serde", "serde_json", "sha2", @@ -5145,6 +5299,15 @@ dependencies = [ "once_cell", ] +[[package]] +name = "threadpool" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa" +dependencies = [ + "num_cpus", +] + [[package]] name = "time" version = "0.3.36" @@ -5397,7 +5560,7 @@ dependencies = [ "indexmap 1.9.3", "pin-project", "pin-project-lite", - "rand", + "rand 0.8.5", "slab", "tokio", "tokio-util", @@ -5540,7 +5703,7 @@ dependencies = [ "http 0.2.12", "httparse", "log", - "rand", + "rand 0.8.5", "rustls 0.21.12", "sha1", "thiserror", @@ -6277,6 +6440,7 @@ dependencies = [ "ethers", "human-panic", "lazy_static", + "secrecy", "serde", "serde_json", "serde_yaml", @@ -6290,6 +6454,8 @@ dependencies = [ "xshell", "zksync_basic_types", "zksync_config", + "zksync_consensus_crypto", + "zksync_consensus_roles", ] [[package]] @@ -6356,7 +6522,7 @@ dependencies = [ "anyhow", "once_cell", "pin-project", - "rand", + "rand 0.8.5", "sha3", "thiserror", "time", @@ -6371,7 +6537,7 @@ name = "zksync_config" version = "0.1.0" dependencies = [ "anyhow", - "rand", + "rand 0.8.5", "secrecy", "serde", "url", @@ -6381,6 +6547,52 @@ dependencies = [ "zksync_crypto_primitives", ] +[[package]] +name = "zksync_consensus_crypto" +version = "0.1.0-rc.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ace39bdf50b8421c4d546381fe1ecc5212f953ce61cf93d4fa69172078dbe4af" +dependencies = [ + "anyhow", + "blst", + "ed25519-dalek", + "elliptic-curve 0.13.8", + "ff_ce", + "hex", + "k256 0.13.3", + "num-bigint", + "num-traits", + "pairing_ce", + "rand 0.4.6", + "rand 0.8.5", + "sha3", + "thiserror", + "tracing", + "zeroize", +] + +[[package]] +name = "zksync_consensus_roles" +version = "0.1.0-rc.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06277266e31efdc1465f6a27ce96c7435392a270978a91956b8a848732df2cfa" +dependencies = [ + "anyhow", + "bit-vec", + "hex", + "num-bigint", + "prost 0.12.6", + "rand 0.8.5", + "serde", + "thiserror", + "tracing", + "zksync_concurrency", + "zksync_consensus_crypto", + "zksync_consensus_utils", + "zksync_protobuf", + "zksync_protobuf_build", +] + [[package]] name = "zksync_consensus_utils" version = "0.1.0-rc.12" @@ -6388,7 +6600,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4d624f55e2449f43b2c85588b5dd2a28b3c5ea629effc89df76e3254f8d9d2fb" dependencies = [ "anyhow", - "rand", + "rand 0.8.5", "thiserror", "zksync_concurrency", ] @@ -6413,7 +6625,7 @@ dependencies = [ "anyhow", "blake2", "hex", - "rand", + "rand 0.8.5", "secp256k1", "serde", "serde_json", @@ -6444,7 +6656,7 @@ dependencies = [ "prost 0.12.6", "prost-reflect", "quick-protobuf", - "rand", + "rand 0.8.5", "serde", "serde_json", "serde_yaml", @@ -6477,7 +6689,7 @@ dependencies = [ "anyhow", "hex", "prost 0.12.6", - "rand", + "rand 0.8.5", "secrecy", "serde_json", "serde_yaml", diff --git a/zk_toolbox/Cargo.toml b/zk_toolbox/Cargo.toml index e1ad63136af1..126c44f0eaeb 100644 --- a/zk_toolbox/Cargo.toml +++ b/zk_toolbox/Cargo.toml @@ -30,6 +30,8 @@ types = { path = "crates/types" } zksync_config = { path = "../core/lib/config" } zksync_protobuf_config = { path = "../core/lib/protobuf_config" } zksync_basic_types = { path = "../core/lib/basic_types" } +zksync_consensus_roles = "=0.1.0-rc.12" +zksync_consensus_crypto = "=0.1.0-rc.12" zksync_protobuf = "=0.1.0-rc.12" # External dependencies @@ -59,3 +61,4 @@ toml = "0.8.12" url = { version = "2.5.0", features = ["serde"] } xshell = "0.2.6" clap-markdown = "0.1.4" +secrecy = "0.8.0" diff --git a/zk_toolbox/crates/common/src/external_node.rs b/zk_toolbox/crates/common/src/external_node.rs index 09115f92d5fb..8a5cbc3cd14c 100644 --- a/zk_toolbox/crates/common/src/external_node.rs +++ b/zk_toolbox/crates/common/src/external_node.rs @@ -9,6 +9,7 @@ pub fn run( config_path: &str, secrets_path: &str, en_config_path: &str, + consensus_args: Vec, additional_args: Vec, ) -> anyhow::Result<()> { let _dir = shell.push_dir(code_path); @@ -22,6 +23,7 @@ pub fn run( --external-node-config-path {en_config_path} " ) + .args(consensus_args) .args(additional_args) .env_remove("RUSTUP_TOOLCHAIN"), ) diff --git a/zk_toolbox/crates/config/src/consensus_config.rs b/zk_toolbox/crates/config/src/consensus_config.rs new file mode 100644 index 000000000000..0bb4750d1fc0 --- /dev/null +++ b/zk_toolbox/crates/config/src/consensus_config.rs @@ -0,0 +1,18 @@ +use zksync_config::configs::consensus::ConsensusConfig; +use zksync_protobuf_config::encode_yaml_repr; + +use crate::{ + traits::{FileConfigWithDefaultName, SaveConfig}, + CONSENSUS_CONFIG_FILE, +}; + +impl FileConfigWithDefaultName for ConsensusConfig { + const FILE_NAME: &'static str = CONSENSUS_CONFIG_FILE; +} + +impl SaveConfig for ConsensusConfig { + fn save(&self, shell: &xshell::Shell, path: impl AsRef) -> anyhow::Result<()> { + let bytes = encode_yaml_repr::(self)?; + Ok(shell.write_file(path.as_ref(), bytes)?) + } +} diff --git a/zk_toolbox/crates/config/src/consensus_secrets.rs b/zk_toolbox/crates/config/src/consensus_secrets.rs new file mode 100644 index 000000000000..0e5c4592d2fc --- /dev/null +++ b/zk_toolbox/crates/config/src/consensus_secrets.rs @@ -0,0 +1,14 @@ +use std::path::Path; + +use xshell::Shell; +use zksync_config::configs::consensus::ConsensusSecrets; +use zksync_protobuf_config::decode_yaml_repr; + +use crate::traits::ReadConfig; + +impl ReadConfig for ConsensusSecrets { + fn read(shell: &Shell, path: impl AsRef) -> anyhow::Result { + let path = shell.current_dir().join(path); + decode_yaml_repr::(&path, false) + } +} diff --git a/zk_toolbox/crates/config/src/consts.rs b/zk_toolbox/crates/config/src/consts.rs index 1e1c0998f00e..80b204cc6191 100644 --- a/zk_toolbox/crates/config/src/consts.rs +++ b/zk_toolbox/crates/config/src/consts.rs @@ -11,6 +11,8 @@ pub const GENESIS_FILE: &str = "genesis.yaml"; // Name of external node specific config pub const EN_CONFIG_FILE: &str = "external_node.yaml"; +// Name of consensus config +pub const CONSENSUS_CONFIG_FILE: &str = "consensus_config.yaml"; pub(crate) const ERC20_CONFIGS_FILE: &str = "erc20.yaml"; /// Name of the initial deployments config file pub(crate) const INITIAL_DEPLOYMENT_FILE: &str = "initial_deployments.yaml"; @@ -60,6 +62,8 @@ pub const DEFAULT_EXPLORER_WORKER_PORT: u16 = 3001; pub const DEFAULT_EXPLORER_API_PORT: u16 = 3002; /// Default port for the explorer data fetcher service pub const DEFAULT_EXPLORER_DATA_FETCHER_PORT: u16 = 3040; +/// Default port for consensus service +pub const DEFAULT_CONSENSUS_PORT: u16 = 3054; pub const EXPLORER_API_DOCKER_IMAGE: &str = "matterlabs/block-explorer-api"; pub const EXPLORER_DATA_FETCHER_DOCKER_IMAGE: &str = "matterlabs/block-explorer-data-fetcher"; diff --git a/zk_toolbox/crates/config/src/general.rs b/zk_toolbox/crates/config/src/general.rs index 41c2e4c33cfd..6498beb0f532 100644 --- a/zk_toolbox/crates/config/src/general.rs +++ b/zk_toolbox/crates/config/src/general.rs @@ -3,19 +3,21 @@ use std::path::{Path, PathBuf}; use anyhow::Context; use url::Url; use xshell::Shell; -use zksync_config::configs::object_store::ObjectStoreMode; pub use zksync_config::configs::GeneralConfig; +use zksync_config::configs::{consensus::Host, object_store::ObjectStoreMode}; use zksync_protobuf_config::{decode_yaml_repr, encode_yaml_repr}; use crate::{ consts::GENERAL_FILE, traits::{ConfigWithL2RpcUrl, FileConfigWithDefaultName, ReadConfig, SaveConfig}, + DEFAULT_CONSENSUS_PORT, }; pub struct RocksDbs { pub state_keeper: PathBuf, pub merkle_tree: PathBuf, pub protective_reads: PathBuf, + pub basic_witness_input_producer: PathBuf, } pub struct FileArtifacts { @@ -54,6 +56,15 @@ pub fn set_rocks_db_config(config: &mut GeneralConfig, rocks_dbs: RocksDbs) -> a .as_mut() .context("Protective reads config is not presented")? .db_path = rocks_dbs.protective_reads.to_str().unwrap().to_string(); + config + .basic_witness_input_producer_config + .as_mut() + .context("Basic witness input producer config is not presented")? + .db_path = rocks_dbs + .basic_witness_input_producer + .to_str() + .unwrap() + .to_string(); Ok(()) } @@ -104,6 +115,11 @@ pub fn set_file_artifacts(config: &mut GeneralConfig, file_artifacts: FileArtifa pub fn ports_config(config: &GeneralConfig) -> Option { let api = config.api_config.as_ref()?; let contract_verifier = config.contract_verifier.as_ref()?; + let consensus_port = if let Some(consensus_config) = config.clone().consensus_config { + consensus_config.server_addr.port() + } else { + DEFAULT_CONSENSUS_PORT + }; Some(PortsConfig { web3_json_rpc_http_port: api.web3_json_rpc.http_port, @@ -112,6 +128,7 @@ pub fn ports_config(config: &GeneralConfig) -> Option { merkle_tree_port: api.merkle_tree.port, prometheus_listener_port: api.prometheus.listener_port, contract_verifier_port: contract_verifier.port, + consensus_port, }) } @@ -128,6 +145,10 @@ pub fn update_ports(config: &mut GeneralConfig, ports_config: &PortsConfig) -> a .prometheus_config .as_mut() .context("Prometheus config is not presented")?; + if let Some(consensus) = config.consensus_config.as_mut() { + consensus.server_addr.set_port(ports_config.consensus_port); + update_port_in_host(&mut consensus.public_addr, ports_config.consensus_port)?; + } api.web3_json_rpc.http_port = ports_config.web3_json_rpc_http_port; update_port_in_url( @@ -162,6 +183,13 @@ fn update_port_in_url(http_url: &mut String, port: u16) -> anyhow::Result<()> { Ok(()) } +fn update_port_in_host(host: &mut Host, port: u16) -> anyhow::Result<()> { + let url = Url::parse(&format!("http://{}", host.0))?; + let host_str = url.host_str().context("Failed to get host")?; + host.0 = format!("{host_str}:{port}"); + Ok(()) +} + impl FileConfigWithDefaultName for GeneralConfig { const FILE_NAME: &'static str = GENERAL_FILE; } @@ -173,6 +201,7 @@ pub struct PortsConfig { pub merkle_tree_port: u16, pub prometheus_listener_port: u16, pub contract_verifier_port: u16, + pub consensus_port: u16, } impl PortsConfig { @@ -183,6 +212,7 @@ impl PortsConfig { self.merkle_tree_port += offset; self.prometheus_listener_port += offset; self.contract_verifier_port += offset; + self.consensus_port += offset; } pub fn next_empty_ports_config(&self) -> PortsConfig { @@ -193,6 +223,7 @@ impl PortsConfig { merkle_tree_port: self.merkle_tree_port + 100, prometheus_listener_port: self.prometheus_listener_port + 100, contract_verifier_port: self.contract_verifier_port + 100, + consensus_port: self.consensus_port + 100, } } } diff --git a/zk_toolbox/crates/config/src/lib.rs b/zk_toolbox/crates/config/src/lib.rs index 3c7443f24490..1a7c5bf1d7e2 100644 --- a/zk_toolbox/crates/config/src/lib.rs +++ b/zk_toolbox/crates/config/src/lib.rs @@ -25,6 +25,8 @@ mod secrets; mod wallet_creation; mod wallets; +pub mod consensus_config; +pub mod consensus_secrets; pub mod docker_compose; pub mod explorer; pub mod explorer_compose; diff --git a/zk_toolbox/crates/zk_inception/Cargo.toml b/zk_toolbox/crates/zk_inception/Cargo.toml index 01d0697d6b6c..61983d59e6e9 100644 --- a/zk_toolbox/crates/zk_inception/Cargo.toml +++ b/zk_toolbox/crates/zk_inception/Cargo.toml @@ -34,3 +34,6 @@ zksync_config.workspace = true slugify-rs.workspace = true zksync_basic_types.workspace = true clap-markdown.workspace = true +zksync_consensus_roles.workspace = true +zksync_consensus_crypto.workspace = true +secrecy.workspace = true diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/init.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/init.rs index 2b7dbf73f681..734e5e54863b 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/init.rs @@ -35,11 +35,14 @@ use crate::{ messages::{ msg_initializing_chain, MSG_ACCEPTING_ADMIN_SPINNER, MSG_CHAIN_INITIALIZED, MSG_CHAIN_NOT_FOUND_ERR, MSG_DISTRIBUTING_ETH_SPINNER, MSG_GENESIS_DATABASE_ERR, - MSG_MINT_BASE_TOKEN_SPINNER, MSG_PORTAL_FAILED_TO_CREATE_CONFIG_ERR, + MSG_MINT_BASE_TOKEN_SPINNER, MSG_PORTAL_FAILED_TO_CREATE_CONFIG_ERR, MSG_PORTS_CONFIG_ERR, MSG_REGISTERING_CHAIN_SPINNER, MSG_SELECTED_CONFIG, MSG_UPDATING_TOKEN_MULTIPLIER_SETTER_SPINNER, MSG_WALLET_TOKEN_MULTIPLIER_SETTER_NOT_FOUND, }, - utils::forge::{check_the_balance, fill_forge_private_key}, + utils::{ + consensus::{generate_consensus_keys, get_consensus_config, get_consensus_secrets}, + forge::{check_the_balance, fill_forge_private_key}, + }, }; pub(crate) async fn run(args: InitArgs, shell: &Shell) -> anyhow::Result<()> { @@ -70,6 +73,12 @@ pub async fn init( let mut general_config = chain_config.get_general_config()?; apply_port_offset(init_args.port_offset, &mut general_config)?; + let ports = ports_config(&general_config).context(MSG_PORTS_CONFIG_ERR)?; + + let consensus_keys = generate_consensus_keys(); + let consensus_config = + get_consensus_config(chain_config, ports, Some(consensus_keys.clone()), None)?; + general_config.consensus_config = Some(consensus_config); general_config.save_with_base_path(shell, &chain_config.configs)?; let mut genesis_config = chain_config.get_genesis_config()?; @@ -86,6 +95,7 @@ pub async fn init( let mut secrets = chain_config.get_secrets_config()?; set_l1_rpc_url(&mut secrets, init_args.l1_rpc_url.clone())?; + secrets.consensus = Some(get_consensus_secrets(&consensus_keys)); secrets.save_with_base_path(shell, &chain_config.configs)?; let spinner = Spinner::new(MSG_REGISTERING_CHAIN_SPINNER); diff --git a/zk_toolbox/crates/zk_inception/src/commands/external_node/args/run.rs b/zk_toolbox/crates/zk_inception/src/commands/external_node/args/run.rs index 1bc0c06728d7..cd6ff4c71534 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/external_node/args/run.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/external_node/args/run.rs @@ -1,7 +1,9 @@ use clap::Parser; use serde::{Deserialize, Serialize}; -use crate::messages::{MSG_SERVER_ADDITIONAL_ARGS_HELP, MSG_SERVER_COMPONENTS_HELP}; +use crate::messages::{ + MSG_ENABLE_CONSENSUS_HELP, MSG_SERVER_ADDITIONAL_ARGS_HELP, MSG_SERVER_COMPONENTS_HELP, +}; #[derive(Debug, Serialize, Deserialize, Parser)] pub struct RunExternalNodeArgs { @@ -9,6 +11,8 @@ pub struct RunExternalNodeArgs { pub reinit: bool, #[clap(long, help = MSG_SERVER_COMPONENTS_HELP)] pub components: Option>, + #[clap(long, help = MSG_ENABLE_CONSENSUS_HELP, default_missing_value = "true", num_args = 0..=1)] + pub enable_consensus: Option, #[clap(long, short)] #[arg(trailing_var_arg = true, allow_hyphen_values = true, hide = false, help = MSG_SERVER_ADDITIONAL_ARGS_HELP)] pub additional_args: Vec, diff --git a/zk_toolbox/crates/zk_inception/src/commands/external_node/prepare_configs.rs b/zk_toolbox/crates/zk_inception/src/commands/external_node/prepare_configs.rs index 51101c228878..89e08418c6e5 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/external_node/prepare_configs.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/external_node/prepare_configs.rs @@ -1,4 +1,4 @@ -use std::{path::Path, str::FromStr}; +use std::{collections::BTreeMap, path::Path, str::FromStr}; use anyhow::Context; use common::{config::global_config, logger}; @@ -8,14 +8,24 @@ use config::{ }; use xshell::Shell; use zksync_basic_types::url::SensitiveUrl; -use zksync_config::configs::{DatabaseSecrets, L1Secrets}; +use zksync_config::configs::{ + consensus::{ConsensusSecrets, NodeSecretKey, Secret}, + DatabaseSecrets, L1Secrets, +}; +use zksync_consensus_crypto::TextFmt; +use zksync_consensus_roles as roles; use crate::{ commands::external_node::args::prepare_configs::{PrepareConfigArgs, PrepareConfigFinal}, messages::{ - msg_preparing_en_config_is_done, MSG_CHAIN_NOT_INITIALIZED, MSG_PREPARING_EN_CONFIGS, + msg_preparing_en_config_is_done, MSG_CHAIN_NOT_INITIALIZED, + MSG_CONSENSUS_CONFIG_MISSING_ERR, MSG_CONSENSUS_SECRETS_MISSING_ERR, + MSG_CONSENSUS_SECRETS_NODE_KEY_MISSING_ERR, MSG_PORTS_CONFIG_ERR, MSG_PREPARING_EN_CONFIGS, + }, + utils::{ + consensus::{get_consensus_config, node_public_key}, + rocks_db::{recreate_rocksdb_dirs, RocksDBDirOption}, }, - utils::rocks_db::{recreate_rocksdb_dirs, RocksDBDirOption}, }; pub fn run(shell: &Shell, args: PrepareConfigArgs) -> anyhow::Result<()> { @@ -64,15 +74,45 @@ fn prepare_configs( gateway_url: None, }; let mut general_en = general.clone(); + let next_empty_ports_config = ports_config(&general) + .context(MSG_PORTS_CONFIG_ERR)? + .next_empty_ports_config(); + update_ports(&mut general_en, &next_empty_ports_config)?; + + // Set consensus config + let main_node_consensus_config = general + .consensus_config + .context(MSG_CONSENSUS_CONFIG_MISSING_ERR)?; + + let mut gossip_static_outbound = BTreeMap::new(); + let main_node_public_key = node_public_key( + &config + .get_secrets_config()? + .consensus + .context(MSG_CONSENSUS_SECRETS_MISSING_ERR)?, + )? + .context(MSG_CONSENSUS_SECRETS_NODE_KEY_MISSING_ERR)?; - update_ports( - &mut general_en, - &ports_config(&general) - .context("da")? - .next_empty_ports_config(), + gossip_static_outbound.insert(main_node_public_key, main_node_consensus_config.public_addr); + + let en_consensus_config = get_consensus_config( + config, + next_empty_ports_config, + None, + Some(gossip_static_outbound), )?; + general_en.consensus_config = Some(en_consensus_config.clone()); + en_consensus_config.save_with_base_path(shell, en_configs_path)?; + + // Set secrets config + let node_key = roles::node::SecretKey::generate().encode(); + let consensus_secrets = ConsensusSecrets { + validator_key: None, + attester_key: None, + node_key: Some(NodeSecretKey(Secret::new(node_key))), + }; let secrets = SecretsConfig { - consensus: None, + consensus: Some(consensus_secrets), database: Some(DatabaseSecrets { server_url: Some(args.db.full_url().into()), prover_url: None, diff --git a/zk_toolbox/crates/zk_inception/src/commands/external_node/run.rs b/zk_toolbox/crates/zk_inception/src/commands/external_node/run.rs index 9d3da4663859..46328699e6d4 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/external_node/run.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/external_node/run.rs @@ -32,6 +32,7 @@ async fn run_external_node( if args.reinit { init::init(shell, chain_config).await? } + let enable_consensus = args.enable_consensus.unwrap_or(false); let server = RunExternalNode::new(args.components.clone(), chain_config)?; - server.run(shell, args.additional_args.clone()) + server.run(shell, enable_consensus, args.additional_args.clone()) } diff --git a/zk_toolbox/crates/zk_inception/src/consts.rs b/zk_toolbox/crates/zk_inception/src/consts.rs index 72c8948a65d1..22e570a5439e 100644 --- a/zk_toolbox/crates/zk_inception/src/consts.rs +++ b/zk_toolbox/crates/zk_inception/src/consts.rs @@ -1,3 +1,5 @@ +use std::net::{IpAddr, Ipv4Addr}; + pub const AMOUNT_FOR_DISTRIBUTION_TO_WALLETS: u128 = 1000000000000000000000; pub const MINIMUM_BALANCE_FOR_WALLET: u128 = 5000000000000000000; @@ -9,6 +11,27 @@ pub const DEFAULT_PROOF_STORE_DIR: &str = "artifacts"; pub const BELLMAN_CUDA_DIR: &str = "era-bellman-cuda"; pub const L2_BASE_TOKEN_ADDRESS: &str = "0x000000000000000000000000000000000000800A"; +#[allow(non_upper_case_globals)] +const kB: usize = 1024; + +/// Max payload size for consensus in bytes +pub const MAX_PAYLOAD_SIZE: usize = 2_500_000; +/// Max batch size for consensus in bytes +/// Compute a default batch size, so operators are not caught out by the missing setting +/// while we're still working on batch syncing. The batch interval is ~1 minute, +/// so there will be ~60 blocks, and an Ethereum Merkle proof is ~1kB, but under high +/// traffic there can be thousands of huge transactions that quickly fill up blocks +/// and there could be more blocks in a batch then expected. We chose a generous +/// limit so as not to prevent any legitimate batch from being transmitted. +pub const MAX_BATCH_SIZE: usize = MAX_PAYLOAD_SIZE * 5000 + kB; +/// Gossip dynamic inbound limit for consensus +pub const GOSSIP_DYNAMIC_INBOUND_LIMIT: usize = 100; + +/// Public address for consensus +pub const CONSENSUS_PUBLIC_ADDRESS_HOST: IpAddr = IpAddr::V4(Ipv4Addr::UNSPECIFIED); +/// Server address for consensus +pub const CONSENSUS_SERVER_ADDRESS_HOST: IpAddr = IpAddr::V4(Ipv4Addr::LOCALHOST); + /// Path to the JS runtime config for the block-explorer-app docker container to be mounted to pub const EXPLORER_APP_DOCKER_CONFIG_PATH: &str = "/usr/src/app/packages/app/dist/config.js"; pub const EXPLORER_APP_DOCKER_IMAGE: &str = "matterlabs/block-explorer-app"; diff --git a/zk_toolbox/crates/zk_inception/src/defaults.rs b/zk_toolbox/crates/zk_inception/src/defaults.rs index 544e28377403..6c3821eed856 100644 --- a/zk_toolbox/crates/zk_inception/src/defaults.rs +++ b/zk_toolbox/crates/zk_inception/src/defaults.rs @@ -14,6 +14,7 @@ lazy_static! { pub const ROCKS_DB_STATE_KEEPER: &str = "state_keeper"; pub const ROCKS_DB_TREE: &str = "tree"; pub const ROCKS_DB_PROTECTIVE_READS: &str = "protective_reads"; +pub const ROCKS_DB_BASIC_WITNESS_INPUT_PRODUCER: &str = "basic_witness_input_producer"; pub const EN_ROCKS_DB_PREFIX: &str = "en"; pub const MAIN_ROCKS_DB_PREFIX: &str = "main"; diff --git a/zk_toolbox/crates/zk_inception/src/external_node.rs b/zk_toolbox/crates/zk_inception/src/external_node.rs index ef62738a7d2a..5ff4ce070250 100644 --- a/zk_toolbox/crates/zk_inception/src/external_node.rs +++ b/zk_toolbox/crates/zk_inception/src/external_node.rs @@ -6,6 +6,7 @@ use config::{ SecretsConfig, }; use xshell::Shell; +use zksync_config::configs::consensus::ConsensusConfig; use crate::messages::MSG_FAILED_TO_RUN_SERVER_ERR; @@ -15,6 +16,7 @@ pub struct RunExternalNode { general_config: PathBuf, secrets: PathBuf, en_config: PathBuf, + consensus_config: PathBuf, } impl RunExternalNode { @@ -29,6 +31,7 @@ impl RunExternalNode { let general_config = GeneralConfig::get_path_with_base_path(&en_path); let secrets = SecretsConfig::get_path_with_base_path(&en_path); let enconfig = ENConfig::get_path_with_base_path(&en_path); + let consensus_config = ConsensusConfig::get_path_with_base_path(&en_path); Ok(Self { components, @@ -36,17 +39,29 @@ impl RunExternalNode { general_config, secrets, en_config: enconfig, + consensus_config, }) } - pub fn run(&self, shell: &Shell, mut additional_args: Vec) -> anyhow::Result<()> { + pub fn run( + &self, + shell: &Shell, + enable_consensus: bool, + mut additional_args: Vec, + ) -> anyhow::Result<()> { let code_path = self.code_path.to_str().unwrap(); let config_general_config = &self.general_config.to_str().unwrap(); let en_config = &self.en_config.to_str().unwrap(); let secrets = &self.secrets.to_str().unwrap(); + let consensus_config = &self.consensus_config.to_str().unwrap(); if let Some(components) = self.components() { additional_args.push(format!("--components={}", components)) } + let mut consensus_args = vec![]; + if enable_consensus { + consensus_args.push("--enable-consensus".to_string()); + consensus_args.push(format!("--consensus-path={}", consensus_config)) + } common::external_node::run( shell, @@ -54,6 +69,7 @@ impl RunExternalNode { config_general_config, secrets, en_config, + consensus_args, additional_args, ) .context(MSG_FAILED_TO_RUN_SERVER_ERR) diff --git a/zk_toolbox/crates/zk_inception/src/messages.rs b/zk_toolbox/crates/zk_inception/src/messages.rs index 6fa1e293927b..c5b77f63ebae 100644 --- a/zk_toolbox/crates/zk_inception/src/messages.rs +++ b/zk_toolbox/crates/zk_inception/src/messages.rs @@ -220,6 +220,7 @@ pub(super) const MSG_DEPLOYING_PAYMASTER: &str = "Deploying paymaster"; /// Run server related messages pub(super) const MSG_SERVER_COMPONENTS_HELP: &str = "Components of server to run"; +pub(super) const MSG_ENABLE_CONSENSUS_HELP: &str = "Enable consensus"; pub(super) const MSG_SERVER_GENESIS_HELP: &str = "Run server in genesis mode"; pub(super) const MSG_SERVER_ADDITIONAL_ARGS_HELP: &str = "Additional arguments that can be passed through the CLI"; @@ -306,6 +307,11 @@ pub(super) fn msg_preparing_en_config_is_done(path: &Path) -> String { pub(super) const MSG_EXTERNAL_NODE_CONFIG_NOT_INITIALIZED: &str = "External node is not initialized"; +pub(super) const MSG_CONSENSUS_CONFIG_MISSING_ERR: &str = "Consensus config is missing"; +pub(super) const MSG_CONSENSUS_SECRETS_MISSING_ERR: &str = "Consensus secrets config is missing"; +pub(super) const MSG_CONSENSUS_SECRETS_NODE_KEY_MISSING_ERR: &str = "Consensus node key is missing"; + +pub(super) const MSG_PORTS_CONFIG_ERR: &str = "Failed to get ports config"; pub(super) const MSG_STARTING_EN: &str = "Starting external node"; diff --git a/zk_toolbox/crates/zk_inception/src/utils/consensus.rs b/zk_toolbox/crates/zk_inception/src/utils/consensus.rs new file mode 100644 index 000000000000..06848334a6e1 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/utils/consensus.rs @@ -0,0 +1,124 @@ +use std::{ + collections::{BTreeMap, BTreeSet}, + net::SocketAddr, +}; + +use config::{ChainConfig, PortsConfig}; +use secrecy::{ExposeSecret, Secret}; +use zksync_config::configs::consensus::{ + AttesterPublicKey, AttesterSecretKey, ConsensusConfig, ConsensusSecrets, GenesisSpec, Host, + NodePublicKey, NodeSecretKey, ProtocolVersion, ValidatorPublicKey, ValidatorSecretKey, + WeightedAttester, WeightedValidator, +}; +use zksync_consensus_crypto::{Text, TextFmt}; +use zksync_consensus_roles as roles; + +use crate::consts::{ + CONSENSUS_PUBLIC_ADDRESS_HOST, CONSENSUS_SERVER_ADDRESS_HOST, GOSSIP_DYNAMIC_INBOUND_LIMIT, + MAX_BATCH_SIZE, MAX_PAYLOAD_SIZE, +}; + +#[derive(Debug, Clone)] +pub struct ConsensusSecretKeys { + validator_key: roles::validator::SecretKey, + attester_key: roles::attester::SecretKey, + node_key: roles::node::SecretKey, +} + +pub struct ConsensusPublicKeys { + validator_key: roles::validator::PublicKey, + attester_key: roles::attester::PublicKey, +} + +pub fn get_consensus_config( + chain_config: &ChainConfig, + ports: PortsConfig, + consensus_keys: Option, + gossip_static_outbound: Option>, +) -> anyhow::Result { + let genesis_spec = + consensus_keys.map(|consensus_keys| get_genesis_specs(chain_config, &consensus_keys)); + + let public_addr = SocketAddr::new(CONSENSUS_PUBLIC_ADDRESS_HOST, ports.consensus_port); + let server_addr = SocketAddr::new(CONSENSUS_SERVER_ADDRESS_HOST, ports.consensus_port); + + Ok(ConsensusConfig { + server_addr, + public_addr: Host(public_addr.encode()), + genesis_spec, + max_payload_size: MAX_PAYLOAD_SIZE, + gossip_dynamic_inbound_limit: GOSSIP_DYNAMIC_INBOUND_LIMIT, + max_batch_size: MAX_BATCH_SIZE, + gossip_static_inbound: BTreeSet::new(), + gossip_static_outbound: gossip_static_outbound.unwrap_or_default(), + rpc: None, + }) +} + +pub fn generate_consensus_keys() -> ConsensusSecretKeys { + ConsensusSecretKeys { + validator_key: roles::validator::SecretKey::generate(), + attester_key: roles::attester::SecretKey::generate(), + node_key: roles::node::SecretKey::generate(), + } +} + +fn get_consensus_public_keys(consensus_keys: &ConsensusSecretKeys) -> ConsensusPublicKeys { + ConsensusPublicKeys { + validator_key: consensus_keys.validator_key.public(), + attester_key: consensus_keys.attester_key.public(), + } +} + +pub fn get_genesis_specs( + chain_config: &ChainConfig, + consensus_keys: &ConsensusSecretKeys, +) -> GenesisSpec { + let public_keys = get_consensus_public_keys(consensus_keys); + let validator_key = public_keys.validator_key.encode(); + let attester_key = public_keys.attester_key.encode(); + + let validator = WeightedValidator { + key: ValidatorPublicKey(validator_key.clone()), + weight: 1, + }; + let attester = WeightedAttester { + key: AttesterPublicKey(attester_key), + weight: 1, + }; + let leader = ValidatorPublicKey(validator_key); + + GenesisSpec { + chain_id: chain_config.chain_id, + protocol_version: ProtocolVersion(1), + validators: vec![validator], + attesters: vec![attester], + leader, + registry_address: None, + } +} + +pub fn get_consensus_secrets(consensus_keys: &ConsensusSecretKeys) -> ConsensusSecrets { + let validator_key = consensus_keys.validator_key.encode(); + let attester_key = consensus_keys.attester_key.encode(); + let node_key = consensus_keys.node_key.encode(); + + ConsensusSecrets { + validator_key: Some(ValidatorSecretKey(Secret::new(validator_key))), + attester_key: Some(AttesterSecretKey(Secret::new(attester_key))), + node_key: Some(NodeSecretKey(Secret::new(node_key))), + } +} + +pub fn node_public_key(secrets: &ConsensusSecrets) -> anyhow::Result> { + Ok(node_key(secrets)?.map(|node_secret_key| NodePublicKey(node_secret_key.public().encode()))) +} +fn node_key(secrets: &ConsensusSecrets) -> anyhow::Result> { + read_secret_text(secrets.node_key.as_ref().map(|x| &x.0)) +} + +fn read_secret_text(text: Option<&Secret>) -> anyhow::Result> { + text.map(|text| Text::new(text.expose_secret()).decode()) + .transpose() + .map_err(|_| anyhow::format_err!("invalid format")) +} diff --git a/zk_toolbox/crates/zk_inception/src/utils/mod.rs b/zk_toolbox/crates/zk_inception/src/utils/mod.rs index a84f0a336de5..229d3908dc3a 100644 --- a/zk_toolbox/crates/zk_inception/src/utils/mod.rs +++ b/zk_toolbox/crates/zk_inception/src/utils/mod.rs @@ -1,2 +1,3 @@ +pub mod consensus; pub mod forge; pub mod rocks_db; diff --git a/zk_toolbox/crates/zk_inception/src/utils/rocks_db.rs b/zk_toolbox/crates/zk_inception/src/utils/rocks_db.rs index 17cffa66e39d..1b7e29dd9722 100644 --- a/zk_toolbox/crates/zk_inception/src/utils/rocks_db.rs +++ b/zk_toolbox/crates/zk_inception/src/utils/rocks_db.rs @@ -4,8 +4,8 @@ use config::RocksDbs; use xshell::Shell; use crate::defaults::{ - EN_ROCKS_DB_PREFIX, MAIN_ROCKS_DB_PREFIX, ROCKS_DB_PROTECTIVE_READS, ROCKS_DB_STATE_KEEPER, - ROCKS_DB_TREE, + EN_ROCKS_DB_PREFIX, MAIN_ROCKS_DB_PREFIX, ROCKS_DB_BASIC_WITNESS_INPUT_PRODUCER, + ROCKS_DB_PROTECTIVE_READS, ROCKS_DB_STATE_KEEPER, ROCKS_DB_TREE, }; pub enum RocksDBDirOption { @@ -37,9 +37,13 @@ pub fn recreate_rocksdb_dirs( .join(option.prefix()) .join(ROCKS_DB_PROTECTIVE_READS); shell.remove_path(&protective_reads)?; + let basic_witness_input_producer = rocks_db_path + .join(option.prefix()) + .join(ROCKS_DB_BASIC_WITNESS_INPUT_PRODUCER); Ok(RocksDbs { state_keeper: shell.create_dir(state_keeper)?, merkle_tree: shell.create_dir(merkle_tree)?, protective_reads: shell.create_dir(protective_reads)?, + basic_witness_input_producer: shell.create_dir(basic_witness_input_producer)?, }) } From b359b085895da6582f1d28722107bc5b25f1232c Mon Sep 17 00:00:00 2001 From: Marcin M <128217157+mm-zk@users.noreply.github.com> Date: Thu, 12 Sep 2024 14:22:43 +0200 Subject: [PATCH 062/116] feat: Selector generator tool (#2844) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ * A small tool to generate the selector hashes based on the ABI from json files ## Why ❔ * The output json can be useful for humans to better understand some of the errors (and calldata) * It can also be read by our tools, to make the debugging easier. In the future, we could call this tool regularly on each contracts version change, but for now it can stay as manual. --- Cargo.lock | 11 + Cargo.toml | 2 + core/bin/selector_generator/Cargo.toml | 18 + core/bin/selector_generator/README.md | 13 + core/bin/selector_generator/src/main.rs | 105 +++++ etc/selector-generator-data/README.md | 3 + etc/selector-generator-data/selectors.json | 518 +++++++++++++++++++++ 7 files changed, 670 insertions(+) create mode 100644 core/bin/selector_generator/Cargo.toml create mode 100644 core/bin/selector_generator/README.md create mode 100644 core/bin/selector_generator/src/main.rs create mode 100644 etc/selector-generator-data/README.md create mode 100644 etc/selector-generator-data/selectors.json diff --git a/Cargo.lock b/Cargo.lock index 8f8d588c8fcf..0485417df8f6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6425,6 +6425,17 @@ dependencies = [ "libc", ] +[[package]] +name = "selector_generator" +version = "0.1.0" +dependencies = [ + "clap 4.4.6", + "glob", + "serde", + "serde_json", + "sha3 0.10.8", +] + [[package]] name = "semver" version = "1.0.23" diff --git a/Cargo.toml b/Cargo.toml index 84e8df61f096..145b72446b48 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,6 +6,7 @@ members = [ "core/bin/external_node", "core/bin/merkle_tree_consistency_checker", "core/bin/snapshots_creator", + "core/bin/selector_generator", "core/bin/system-constants-generator", "core/bin/verified_sources_fetcher", "core/bin/zksync_server", @@ -120,6 +121,7 @@ envy = "0.4" ethabi = "18.0.0" flate2 = "1.0.28" futures = "0.3" +glob = "0.3" google-cloud-auth = "0.16.0" google-cloud-storage = "0.20.0" governor = "0.4.2" diff --git a/core/bin/selector_generator/Cargo.toml b/core/bin/selector_generator/Cargo.toml new file mode 100644 index 000000000000..e0b0afe0ae2c --- /dev/null +++ b/core/bin/selector_generator/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "selector_generator" +version = "0.1.0" +edition.workspace = true +authors.workspace = true +homepage.workspace = true +repository.workspace = true +license.workspace = true +keywords.workspace = true +categories.workspace = true +publish = false + +[dependencies] +serde = { workspace = true, features = ["derive"] } +serde_json.workspace = true +sha3.workspace = true +glob.workspace = true +clap = { workspace = true, features = ["derive"] } \ No newline at end of file diff --git a/core/bin/selector_generator/README.md b/core/bin/selector_generator/README.md new file mode 100644 index 000000000000..a954613c7e45 --- /dev/null +++ b/core/bin/selector_generator/README.md @@ -0,0 +1,13 @@ +# Generates the list of solidity selectors + +This tool generates a mapping from solidity selectors to function names. + +The output json file can be used by multiple tools to improve debugging and readability. + +By default, it appends the newly found selectors into the list. + +To run, first make sure that you have your contracts compiled and then run: + +``` +cargo run ../../../contracts ../../../etc/selector-generator-data/selectors.json +``` diff --git a/core/bin/selector_generator/src/main.rs b/core/bin/selector_generator/src/main.rs new file mode 100644 index 000000000000..ad6180413f14 --- /dev/null +++ b/core/bin/selector_generator/src/main.rs @@ -0,0 +1,105 @@ +use std::{ + collections::HashMap, + fs::{File, OpenOptions}, + io::{self}, +}; + +use clap::Parser; +use glob::glob; +use serde::{Deserialize, Serialize}; +use sha3::{Digest, Keccak256}; + +#[derive(Debug, Serialize, Deserialize)] +struct ABIEntry { + #[serde(rename = "type")] + entry_type: String, + name: Option, + inputs: Option>, +} + +#[derive(Debug, Serialize, Deserialize)] +struct ABIInput { + #[serde(rename = "type")] + input_type: String, +} + +#[derive(Debug, Parser)] +#[command(author, version, about, long_about = None)] +struct Cli { + contracts_dir: String, + output_file: String, +} + +/// Computes solidity selector for a given method and arguments. +fn compute_selector(name: &str, inputs: &[ABIInput]) -> String { + let signature = format!( + "{}({})", + name, + inputs + .iter() + .map(|i| i.input_type.clone()) + .collect::>() + .join(",") + ); + let mut hasher = Keccak256::new(); + hasher.update(signature); + format!("{:x}", hasher.finalize())[..8].to_string() +} + +/// Analyses all the JSON files, looking for 'abi' entries, and then computing the selectors for them. +fn process_files(directory: &str, output_file: &str) -> io::Result<()> { + let mut selectors: HashMap = match File::open(output_file) { + Ok(file) => serde_json::from_reader(file).unwrap_or_default(), + Err(_) => HashMap::new(), + }; + let selectors_before = selectors.len(); + let mut analyzed_files = 0; + + for entry in glob(&format!("{}/**/*.json", directory)).expect("Failed to read glob pattern") { + match entry { + Ok(path) => { + let file_path = path.clone(); + let file = File::open(path)?; + let json: Result = serde_json::from_reader(file); + + if let Ok(json) = json { + if let Some(abi) = json.get("abi").and_then(|v| v.as_array()) { + analyzed_files += 1; + for item in abi { + let entry: ABIEntry = serde_json::from_value(item.clone()).unwrap(); + if entry.entry_type == "function" { + if let (Some(name), Some(inputs)) = (entry.name, entry.inputs) { + let selector = compute_selector(&name, &inputs); + selectors.entry(selector).or_insert(name); + } + } + } + } + } else { + eprintln!("Error parsing file: {:?} - ignoring.", file_path) + } + } + Err(e) => eprintln!("Error reading file: {:?}", e), + } + } + println!( + "Analyzed {} files. Added {} selectors (before: {} after: {})", + analyzed_files, + selectors.len() - selectors_before, + selectors_before, + selectors.len() + ); + + let file = OpenOptions::new() + .write(true) + .create(true) + .truncate(true) + .open(output_file)?; + serde_json::to_writer_pretty(file, &selectors)?; + Ok(()) +} + +fn main() -> io::Result<()> { + let args = Cli::parse(); + process_files(&args.contracts_dir, &args.output_file) +} diff --git a/etc/selector-generator-data/README.md b/etc/selector-generator-data/README.md new file mode 100644 index 000000000000..ddba2769e4f2 --- /dev/null +++ b/etc/selector-generator-data/README.md @@ -0,0 +1,3 @@ +# List of selectors from our contracts + +To regenerate the list, please use the selector_generator tool from core/bin directory. diff --git a/etc/selector-generator-data/selectors.json b/etc/selector-generator-data/selectors.json new file mode 100644 index 000000000000..6ea986e4263a --- /dev/null +++ b/etc/selector-generator-data/selectors.json @@ -0,0 +1,518 @@ +{ + "d0f2c663": "getBatchNumberAndTimestamp", + "2a79c611": "getCommitment", + "8129fc1c": "initialize", + "e2a9d554": "setUpgradeTimestamp", + "84c2ff75": "stmAssetId", + "7ac3a553": "withdrawLegacyBridge", + "e91659ae": "addNewChainIfNeeded", + "06d49e5b": "getPubdataPricingMode", + "1ff5a783": "execute", + "8310f2c6": "transferFundsFromSharedBridge", + "80b41246": "getBlockHashEVM", + "7da01cd6": "executeUpgrade", + "74044673": "addStateTransitionManager", + "82b57749": "forwardedBridgeMint", + "6478d8ed": "chainAdmin", + "4af63f02": "deploy", + "d0707b67": "aggregate", + "e0ab6368": "assetIdIsRegistered", + "27e86d6e": "getLastBlockHash", + "13bc9f20": "isOperationReady", + "4a2e35ba": "withdraw", + "1e4fba05": "getChainRoot", + "762008c2": "executeBatchesSharedBridge", + "155fd27a": "setValueUnderNonce", + "a6ae0aac": "coinbase", + "86d516e8": "getCurrentBlockGasLimit", + "3659cfe6": "upgradeTo", + "fa8f7ea6": "getAllHyperchains", + "7b510fe8": "getAccountInfo", + "40c10f19": "mint", + "e02e1bfd": "chainCount", + "015f58d7": "genesisUpgrade", + "28e439f3": "tryBlockAndAggregate", + "e76db865": "setPubdataPricingMode", + "62f84b24": "sendToL1", + "1c9f0149": "updateChainBalancesFromSharedBridge", + "38720778": "sharedBridge", + "64e130cf": "nativeTokenVault", + "adfca15e": "facetFunctionSelectors", + "af500fb7": "readBytes32", + "7b315630": "upgradeChainFromVersion", + "b6ea1757": "pushNewLeaf", + "e66c8c44": "validatorTimelock", + "4f1ef286": "upgradeToAndCall", + "fe26699e": "getTotalBlocksCommitted", + "805b9869": "executeTransactionFromOutside", + "aa4593dc": "revertReceive", + "64b554ad": "forwardedBridgeBurn", + "ba238947": "getProtocolVersion", + "07f8c636": "multicall", + "39607382": "getTotalBlocksExecuted", + "796b89b9": "getBlockTimestamp", + "9cd939e4": "l2LogsRootHash", + "b298e36b": "push", + "7890e5da": "side", + "5e1ac65f": "hashOperation", + "1806aa18": "getCodeSize", + "d4a4ca0d": "getBlockNumberAndTimestamp", + "06bed036": "setL2Block", + "aa970773": "validateAndPayForPaymasterTransaction", + "6223258e": "setDAValidatorPair", + "728cb93b": "bridgeClaimFailedBurn", + "d6abe642": "getAssetId", + "d2ef1b0e": "storedBatchZero", + "51b3c157": "hyperbridgingEnabled", + "53e61bdc": "processL2RollupDAValidatorOutputHash", + "95d89b41": "symbol", + "a37dc1d4": "forwardedBridgeClaimFailedBurn", + "db1f0bf9": "getTotalBatchesCommitted", + "beda594a": "setHyperchain", + "3977d71c": "getAggregatedRoot", + "c4d252f5": "cancel", + "2878fe74": "genesisUpgrade", + "2ab0f529": "isOperationDone", + "5d4edca7": "BRIDGE_HUB", + "d4b9f4fa": "messageRoot", + "fb1a9a57": "getDeploymentNonce", + "bb0fd610": "extendedAccountVersion", + "3cda3351": "create2", + "3a9d7f8d": "stmDeployer", + "db541184": "setShouldRevertOnExecuteBatches", + "74f4f547": "bridgeBurn", + "b852ad36": "l1SharedBridge", + "6ef25c3a": "baseFee", + "eb39e6d5": "stateTransitionManager", + "381c3f13": "checkDA", + "f92ad219": "initialize", + "9fa8826b": "depositHappened", + "01d23d4b": "diamondCut", + "55d35d18": "getValueUnderNonce", + "ee7fb38b": "calculateRoot", + "64d62353": "updateDelay", + "fd3c6b55": "processCalldataDA", + "39b34c6e": "requestBytecodeL1Publication", + "71623274": "l2TransactionBaseCost", + "53b9e632": "assetHandlerAddress", + "c987336c": "upgrade", + "5c975abb": "paused", + "4623c91d": "setValidator", + "4f1e1be0": "storeAccountConstructingCodeHash", + "b0f40a17": "processBatch", + "2c4f2a58": "bridgehubDepositBaseToken", + "ced531eb": "setHashes", + "18160ddd": "totalSupply", + "7cb9357e": "gasPerPubdataByte", + "7877a797": "blockGasLimit", + "cdc4878b": "nodeCount", + "c2eeeebd": "l1Address", + "0f23da43": "revertBatchesSharedBridge", + "e1239cd8": "incrementMinNonceIfEquals", + "8456cb59": "pause", + "9a42c2c2": "zeroPointerTest", + "f9f3ee2d": "setResult", + "7ba8be34": "decodeUint8", + "a635f01d": "delegateCall", + "2f90b184": "L1_CHAIN_ID", + "6c0960f9": "finalizeEthWithdrawal", + "31d50750": "isOperation", + "59ec65a2": "baseToken", + "a9b0d128": "setPriorityTreeStartIndex", + "c4879440": "bridgehubDepositBaseToken", + "823f1d96": "l2TokenProxyBytecodeHash", + "18876a04": "chunkPubdataToBlobs", + "699b0fb9": "bridgeBurn", + "17338945": "unfreezeDiamond", + "8a75bb09": "saveL2LogsRootHash", + "91b19874": "validators", + "63dc94b1": "forceDeploy", + "5a590335": "getDAValidatorPair", + "60144197": "setTokenMultiplierSetter", + "938b5f32": "origin", + "36ba0355": "bridgeMint", + "6dde7209": "l2TokenBeacon", + "bf54096e": "MAX_NUMBER_OF_HYPERCHAINS", + "7e44bc5e": "setImmutables", + "8e8acf87": "getL2BlockNumberAndTimestamp", + "e30c3978": "pendingOwner", + "f5e69a47": "publishCompressedBytecode", + "84da1fb4": "getNewAddressCreate2", + "47fcedb8": "setFeeParams", + "b22dd78e": "storedBatchHash", + "57e6246b": "initialCutHash", + "2b805192": "setNewVersionUpgrade", + "dbfe3e96": "updateSecurityCouncil", + "e03fe177": "getCodeHash", + "02fa5779": "setNewBatch", + "a225efcb": "setPubdataInfo", + "9cc395d0": "bridgeCheckCounterpartAddress", + "868085b1": "getBatchProofPublicInput", + "6a0cd1f5": "removeValidator", + "2ae9c600": "protocolVersion", + "61f91b2e": "initialForceDeploymentHash", + "72425d9d": "getCurrentBlockDifficulty", + "8c2a993e": "bridgeMint", + "b473318e": "l2TransactionBaseCost", + "f851a440": "admin", + "681fe70c": "isEmpty", + "ef3f0bae": "getTotalBatchesVerified", + "ba75bbd8": "front", + "cdffacc6": "facetAddress", + "89f9a072": "validatePubdata", + "66869d49": "changeFeeParams", + "e8b99b1b": "deposit", + "4d4a1eca": "setTokenMultiplier", + "a0803ef7": "currentBlockInfo", + "fb4baf17": "changeFeeParams", + "3591c1a0": "getBridgehub", + "fd791f3c": "getL2DefaultAccountBytecodeHash", + "ec8067c7": "updateNonceOrdering", + "a3912ec8": "receiveEther", + "79823c9a": "getFirstUnprocessedPriorityTx", + "235d9eb5": "setTokenMultiplier", + "dd354a06": "calculateCreate2TokenAddress", + "7efda2ae": "proveL2LeafInclusion", + "f120e6c4": "encodeTxDataHash", + "f5f15168": "l2TokenAddress", + "4d2301cc": "getEthBalance", + "ab07b2e9": "getL2GasPrice", + "363bf964": "setAddresses", + "607457f2": "setShouldRevertOnCommitBatches", + "d1ba7e97": "hyperchainAddress", + "841a9d42": "aggregate3Value", + "ea6c029c": "baseTokenGasPriceMultiplierNominator", + "de8fa431": "getSize", + "24a55db9": "markBytecodeAsPublished", + "c438a9f2": "L2_LEGACY_SHARED_BRIDGE", + "ddeaa8e6": "getBatchHash", + "8f31f052": "isWithdrawalFinalized", + "41cf49bb": "prepareChainCommitment", + "5d382700": "create2Account", + "6d9860e1": "l1AssetRouter", + "e1ad1162": "transfer", + "bf1fe420": "setGasPrice", + "a1954fc5": "getTotalPriorityTxs", + "c0a16dda": "setAssetDeploymentTracker", + "4145ca27": "removePriorityQueueFront", + "09e14277": "setStateTransitionManager", + "1f067457": "revertTransfer", + "b8c2f66f": "getTotalBatchesExecuted", + "07ee9355": "l2BridgeAddress", + "095ea7b3": "approve", + "84b0196e": "eip712Domain", + "18b1771f": "getAssetId", + "f85894c5": "forwardedBridgeBurn", + "bd7c5412": "isEthWithdrawalFinalized", + "70a08231": "balanceOf", + "3425eb89": "tokenMultiplierSetter", + "5aa9b6b5": "getRawNonce", + "7ab08472": "finalizeWithdrawalLegacyErc20Bridge", + "205c2878": "withdrawTo", + "ec3d5f88": "setPriorityTxMaxGasLimit", + "8eb7db57": "bridgehubConfirmL2Transaction", + "2a72b707": "bridgehubRequestL2Transaction", + "0f3fa211": "setNativeTokenVault", + "4bed8212": "isWithdrawalFinalized", + "0c56efe9": "initializeV2", + "501e60d5": "setUpgradeDiamondCut", + "c29f093f": "setSTM", + "f2fde38b": "transferOwnership", + "8c5a3445": "general", + "ca8f93f1": "setLegacyBaseTokenAssetId", + "71abd109": "upgrade", + "eced0bf0": "__DEPRECATED_tokenIsRegistered", + "dc8e4b26": "registerSettlementLayer", + "310ab089": "getImmutable", + "19cae462": "difficulty", + "77421056": "setFunctionToCall", + "3997d064": "tryAggregate", + "f1d357e5": "L1_SHARED_BRIDGE", + "952a3ee7": "getERC20Getters", + "29b98c67": "isDiamondStorageFrozen", + "17d7de7c": "getName", + "e81e0ba1": "isFunctionFreezable", + "7ebba672": "setTokenMultiplier", + "6ee1dc20": "validateNonceUsage", + "6a27e8b5": "getSettlementLayer", + "7a28adb2": "proveL2LogInclusion", + "671a7131": "settlementLayer", + "accdd16c": "freezeChain", + "c3bbd2d7": "isFacetFreezable", + "99a88ec4": "upgrade", + "95f11a40": "bridgeInitialize", + "c9f5c932": "requestL2TransactionTwoBridges", + "f1a78aa6": "postTransaction", + "ca65fe79": "finalizeDeposit", + "5518c73b": "getStateTransitionManager", + "b5b18fe5": "processL2Logs", + "969b53da": "l1Bridge", + "e8a71ca9": "forwardedBridgeMint", + "505e6d47": "updateAllLeaves", + "ecf95b8a": "createAccount", + "84d9fedd": "popFront", + "3f4ba83a": "unpause", + "1f98fa08": "createNewChain", + "313ce567": "decimals", + "3ce695e7": "registerSTMAssetOnL1", + "73c58a2d": "publishBlobs", + "f0e9da23": "readAddress", + "e23d2563": "getEraChainId", + "0ec6b0b7": "getPriorityTxMaxGasLimit", + "fdbb0301": "__DEPRECATED_l2BridgeAddress", + "52d1902d": "proxiableUUID", + "97bb3ce9": "tokenAddress", + "5d83b6da": "__DEPRECATED_baseToken", + "966c523e": "blockAndAggregate", + "f4943a20": "protocolVersionDeadline", + "46746c7d": "commitBatchesSharedBridge", + "87d9d023": "verify", + "57f3921f": "stmAssetIdToAddress", + "e516761e": "markFactoryDeps", + "daa51a8c": "pushBack", + "2e1a7d4d": "withdraw", + "af6ed122": "executeUpgrade", + "a6f2c076": "setDataToBeReturnedInFinalizeWithdrawal", + "01eae183": "depositAmount", + "9e8945d2": "verificationKeyHash", + "a3bd0112": "genesisUpgrade", + "927c4bf7": "upgradeExternal", + "56079ac8": "sendL2ToL1Log", + "d92f86a2": "setLegacyChainAddress", + "be6f11cf": "setPriorityTxMaxGasLimit", + "7321c485": "dummySetValidator", + "c0991525": "claimFailedDeposit", + "72d74cd7": "reinitializeToken", + "ab93d6f3": "requestL2TransactionToGatewayMailbox", + "3601e63e": "bridgeRecoverFailedTransfer", + "eb672419": "requestL2Transaction", + "af6a2dcd": "getTotalBlocksVerified", + "27eb6c0f": "securityCouncil", + "4c6314f0": "getMarker", + "49a7cc72": "payForTransaction", + "f20265d2": "setRevertTransfer", + "84bc3eb0": "withdrawWithMessage", + "79c4f929": "markBytecodeAsPublished", + "580d6bff": "updateAllNodesAtHeight", + "e5355c75": "getL2SystemContractsUpgradeBatchNumber", + "ca408c23": "bridgehubDeposit", + "6ab8f82e": "proveL2LogInclusion", + "7528c2c6": "applyL1ToL2Alias", + "59890bcb": "setExecutedBatches", + "b19f0ade": "executeUpgradeNoOverlap", + "15f9a2fe": "prepareForPaymaster", + "6e9d7899": "legacyBridge", + "ef0e2ff4": "setChainId", + "e52db4ca": "baseTokenAssetId", + "0f28c97d": "getCurrentBlockTimestamp", + "d0e30db0": "deposit", + "9623609d": "upgradeAndCall", + "5ca1e165": "getRoot", + "fe173b97": "gasPrice", + "a851ae78": "setTxOrigin", + "18717dc1": "setPorterAvailability", + "cbcf2e3c": "isTransactionAllowed", + "c4d66de8": "initialize", + "7c9bd1f3": "publishTimestampDataToL1", + "69c76df2": "readUint32", + "a75b496d": "getAllHyperchainChainIDs", + "f5ba4232": "removeStateTransitionManager", + "42cbb15c": "getBlockNumber", + "607e2cb2": "setRevertReceive", + "328ef4fe": "setBaseTokenGasMultiplierPrice", + "1c50cfea": "addTokenAssetId", + "6d1d8363": "scheduleShadow", + "9cc7f708": "balanceOf", + "933999fb": "deposit", + "c2e047ff": "aggregate3", + "bb7044b6": "stateTransitionManagerIsRegistered", + "d4ce08c2": "addNewChain", + "f34d1868": "setExecutionDelay", + "9caf9bac": "setX", + "f113c88b": "createNewChain", + "1cc5d103": "setPorterAvailability", + "cdf25430": "L1_ASSET_ROUTER", + "def9d6af": "protocolVersionIsActive", + "c21a38e2": "proveL2MessageInclusion", + "e543e5bf": "setChainCreationParams", + "4be99e1d": "getCurrentPubdataCost", + "74f4d30d": "storedBlockHash", + "f8f7cd76": "validateTransaction", + "7a0ed627": "facets", + "38a78092": "increaseMinNonce", + "8cb7f3d0": "forceDeployOnAddresses", + "a2d5a0cc": "proveBatchesSharedBridge", + "301e7765": "getChainAdmin", + "fb644fc5": "addChainBatchRoot", + "6006d8b5": "verifyCompressedStateDiffs", + "39509351": "increaseAllowance", + "51cff8d9": "withdraw", + "8ffe1b81": "setBridgeHubAddress", + "95ce3e93": "decodeString", + "09824a80": "registerToken", + "d86970d8": "getL2BootloaderBytecodeHash", + "a31ee5b0": "initialize", + "0d4651aa": "storeAccountConstructedCodeHash", + "9a188371": "requestL2TransactionDirect", + "ed1d7d97": "chainIndexToId", + "c63c4e9b": "minDelay", + "546b6d2a": "SHARED_BRIDGE", + "187598a5": "getNewAddressCreate", + "bf529569": "setFreezability", + "cfe7af7c": "finalizeDeposit", + "bcf284e5": "executeTransaction", + "3437949a": "l1GenesisUpgrade", + "f54266a2": "l1TokenAddress", + "c9d1c097": "stmAssetIdFromChainId", + "39d7d4aa": "getPriorityTreeRoot", + "41c841c3": "L1_WETH_TOKEN", + "19fa7f62": "claimFailedDeposit", + "5c60da1b": "implementation", + "dd62ed3e": "allowance", + "9cd45184": "chainBalance", + "7958004c": "getOperationState", + "8cf2b2f0": "uncheckedInc", + "715018a6": "renounceOwnership", + "30bda03e": "setL1Erc20Bridge", + "c0d5b949": "getCurrentPubdataSpent", + "4de2e468": "getRawCodeHash", + "7ecebe00": "nonces", + "0e18b681": "acceptAdmin", + "d0468156": "getPendingAdmin", + "d83e4e03": "genesisUpgrade", + "49eb3b50": "getTransactionHashes", + "ebf0c717": "root", + "8da5cb5b": "owner", + "11a2ccc1": "finalizeWithdrawal", + "1dd93b33": "keccakValidationTest", + "f088ccdc": "callCodeOracle", + "aad74262": "setProtocolVersionDeadline", + "72c84445": "callKeccak", + "21f603d7": "setTransactionFilterer", + "52ef6b2c": "facetAddresses", + "9e6ea417": "depositLegacyErc20Bridge", + "960dcf24": "getBaseTokenAssetId", + "a888cc3a": "bridgehubRequestL2TransactionOnGateway", + "c7ca373c": "initFromCommitment", + "548a5a33": "setAssetHandlerAddressThisChain", + "402efc91": "stateTransitionManager", + "7b30c8da": "getL2SystemContractsUpgradeTxHash", + "0ef26743": "height", + "79ba5097": "acceptOwnership", + "584b153e": "isOperationPending", + "06fdde03": "name", + "e717bab7": "proveL1ToL2TransactionStatusViaGateway", + "a8b0574e": "getCurrentBlockCoinbase", + "30e5ccbd": "incrementTxNumberInBatch", + "ef011dff": "ERA_CHAIN_ID", + "f8c1f0d2": "upgradeChainFromVersion", + "f3b7dead": "getProxyAdmin", + "f26f3c8f": "proveL2MessageInclusion", + "3558c188": "executeBatches", + "bcd1b23d": "updateFullTree", + "3a3f36f9": "codeOracleTest", + "1de72e34": "baseTokenGasPriceMultiplierDenominator", + "81d100a3": "scheduleTransparent", + "85e4e16a": "assetDeploymentTracker", + "204e1c7a": "getProxyImplementation", + "d566afd3": "createBatchCommitment", + "70f5c679": "setMessageRoot", + "07168226": "deployBeaconProxy", + "7b574586": "publishedBlobCommitments", + "fcc73360": "updateLeaf", + "631f4bac": "getPriorityQueueSize", + "3e64a696": "getBasefee", + "facd743b": "isValidator", + "7fb67816": "setValidatorTimelock", + "ee82ac5e": "getBlockHash", + "6e9960c3": "getAdmin", + "98acd7a6": "getBaseToken", + "06e7517b": "appendTransactionToCurrentL2Block", + "b993549e": "getCommittedBatchTimestamp", + "23dc4a09": "keccakPerformUpgrade", + "cf347e17": "setValidator", + "3408e470": "getChainId", + "ae1f6aaf": "l2Bridge", + "c2e90293": "bridgeRecoverFailedTransfer", + "86b7f856": "publishPubdataAndClearState", + "b292f5f1": "proveL1ToL2TransactionStatus", + "7a592065": "calculateRoot", + "a5277a02": "initialize", + "ef939455": "keccakUpgradeTest", + "3644e515": "DOMAIN_SEPARATOR", + "306395c6": "incrementDeploymentNonce", + "b277f199": "uncheckedAdd", + "6fadcf72": "forward", + "ae65def1": "node", + "e0bf0850": "setShouldRevertOnProveBatches", + "a457c2d7": "decreaseAllowance", + "9f3f89dc": "getZero", + "4dd18bf5": "setPendingAdmin", + "33ce93fe": "getProtocolVersion", + "c87325f1": "finalizeWithdrawal", + "40a434d5": "transferTokenToNTV", + "e9420f8c": "whitelistedSettlementLayers", + "3f704d2a": "setAssetHandlerAddress", + "ede25608": "protocolVersionToUpgradeTimestamp", + "042901c7": "proveL1ToL2TransactionStatus", + "cab7e8eb": "isNonceUsed", + "5aa6fa1f": "NATIVE_TOKEN_VAULT", + "b8776d4d": "chainRegistered", + "8fbb3711": "claimFailedDepositLegacyErc20Bridge", + "8dd14802": "setBridge", + "b3160bad": "executeBatchesSharedBridge", + "f5c1182c": "getSemverProtocolVersion", + "8b257989": "executionDelay", + "588570a5": "initialize", + "4cd40a02": "setLegacyTokenAssetId", + "d124dc4f": "send", + "23b872dd": "transferFrom", + "086a56f8": "getBaseTokenBridge", + "689992b3": "undoL1ToL2Alias", + "03c5d8af": "forwardTransactionOnGateway", + "48ceb85e": "chainIndex", + "ba334825": "hyperchain", + "b1fde1a8": "sharedTree", + "7069d0c0": "executeInstant", + "c2aaf9c4": "receiveEth", + "2986c0e5": "index", + "b5872958": "timestamps", + "c2e4ff97": "markAccountCodeHashAsConstructed", + "9c4d535b": "create", + "923b3b56": "forceDeployOnAddress", + "3635f3e6": "resetTxNumberInBatch", + "19698bc9": "infiniteFunction", + "315fff4e": "THIS_ADDRESS", + "52c9eacb": "upgradeCutHash", + "18e3a941": "getVerifierParams", + "29f172ad": "unsafeOverrideBatch", + "4b561753": "addValidator", + "a9059cbb": "transfer", + "949431dc": "approvalBased", + "8f283970": "changeAdmin", + "85df51fd": "blockHash", + "dead6f7f": "getHyperchain", + "896909dc": "getMinNonce", + "7eff275e": "changeProxyAdmin", + "27ae4c16": "freezeDiamond", + "566338a9": "getL1TokenAddress", + "8ac84c0e": "txNumberInBlock", + "53ce2061": "revertBatches", + "9a8a0592": "chainId", + "f5407abe": "setValues", + "46657fe9": "getVerifier", + "484f0505": "getHyperchainLegacy", + "b760faf9": "depositTo", + "5de097b1": "nullifyChainBalanceByNTV", + "e8295588": "zeros", + "f90eb963": "getPorterAvailability", + "57180981": "updateAccountVersion", + "579952fc": "transferFromTo", + "d505accf": "permit", + "e02da327": "readUint256", + "51d218f7": "unfreezeChain", + "8466d8d1": "getBridgeHubAddress", + "b381724e": "setFeeParams", + "d9caed12": "withdraw", + "9d1b5a81": "getL2SystemContractsUpgradeBlockNumber" +} \ No newline at end of file From 19887ef21a8bbd26977353f8ee277b711850dfd2 Mon Sep 17 00:00:00 2001 From: Igor Aleksanov Date: Thu, 12 Sep 2024 16:25:21 +0400 Subject: [PATCH 063/116] feat(prover): Optimize setup keys loading (#2847) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ - Loads setup keys to memory in parallel (for GPU prover only). - Refactors a bunch of related code for simplicity. ## Why ❔ - Locally I've observed load time going from ~30s to ~12s, so ~60% improvement for prover start time. - Readability & maintainability. ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- core/lib/basic_types/src/basic_fri_types.rs | 59 ++++---- core/lib/config/src/configs/fri_prover.rs | 2 +- .../config/src/configs/fri_prover_group.rs | 137 +++++------------- prover/Cargo.lock | 1 + prover/Cargo.toml | 2 +- prover/crates/bin/prover_fri/Cargo.toml | 3 + .../src/gpu_prover_job_processor.rs | 90 +++++++++--- prover/crates/bin/prover_fri/src/main.rs | 10 +- 8 files changed, 151 insertions(+), 153 deletions(-) diff --git a/core/lib/basic_types/src/basic_fri_types.rs b/core/lib/basic_types/src/basic_fri_types.rs index 5969cca6b8c0..9de9920e86f6 100644 --- a/core/lib/basic_types/src/basic_fri_types.rs +++ b/core/lib/basic_types/src/basic_fri_types.rs @@ -152,6 +152,29 @@ impl AggregationRound { AggregationRound::Scheduler => None, } } + + /// Returns all the circuit IDs that correspond to a particular + /// aggregation round. + /// + /// For example, in aggregation round 0, the circuit ids should be 1 to 15 + 255 (EIP4844). + /// In aggregation round 1, the circuit ids should be 3 to 18. + /// In aggregation round 2, the circuit ids should be 2. + /// In aggregation round 3, the circuit ids should be 255. + /// In aggregation round 4, the circuit ids should be 1. + pub fn circuit_ids(self) -> Vec { + match self { + AggregationRound::BasicCircuits => (1..=15) + .chain(once(255)) + .map(|circuit_id| CircuitIdRoundTuple::new(circuit_id, self as u8)) + .collect(), + AggregationRound::LeafAggregation => (3..=18) + .map(|circuit_id| CircuitIdRoundTuple::new(circuit_id, self as u8)) + .collect(), + AggregationRound::NodeAggregation => vec![CircuitIdRoundTuple::new(2, self as u8)], + AggregationRound::RecursionTip => vec![CircuitIdRoundTuple::new(255, self as u8)], + AggregationRound::Scheduler => vec![CircuitIdRoundTuple::new(1, self as u8)], + } + } } impl std::fmt::Display for AggregationRound { @@ -265,33 +288,17 @@ impl CircuitProverStats { impl Default for CircuitProverStats { fn default() -> Self { - let mut stats = HashMap::new(); - for circuit in (1..=15).chain(once(255)) { - stats.insert( - CircuitIdRoundTuple::new(circuit, 0), - JobCountStatistics::default(), - ); - } - for circuit in 3..=18 { - stats.insert( - CircuitIdRoundTuple::new(circuit, 1), - JobCountStatistics::default(), - ); - } - stats.insert( - CircuitIdRoundTuple::new(2, 2), - JobCountStatistics::default(), - ); - stats.insert( - CircuitIdRoundTuple::new(255, 3), - JobCountStatistics::default(), - ); - stats.insert( - CircuitIdRoundTuple::new(1, 4), - JobCountStatistics::default(), - ); + let circuits_prover_stats = AggregationRound::ALL_ROUNDS + .into_iter() + .flat_map(|round| { + let circuit_ids = round.circuit_ids(); + circuit_ids.into_iter().map(|circuit_id_round_tuple| { + (circuit_id_round_tuple, JobCountStatistics::default()) + }) + }) + .collect(); Self { - circuits_prover_stats: stats, + circuits_prover_stats, } } } diff --git a/core/lib/config/src/configs/fri_prover.rs b/core/lib/config/src/configs/fri_prover.rs index f6a21beaa6dc..32558dd2244b 100644 --- a/core/lib/config/src/configs/fri_prover.rs +++ b/core/lib/config/src/configs/fri_prover.rs @@ -4,7 +4,7 @@ use serde::Deserialize; use crate::ObjectStoreConfig; -#[derive(Debug, Deserialize, Clone, PartialEq)] +#[derive(Debug, Deserialize, Clone, Copy, PartialEq)] pub enum SetupLoadMode { FromDisk, FromMemory, diff --git a/core/lib/config/src/configs/fri_prover_group.rs b/core/lib/config/src/configs/fri_prover_group.rs index 0fd752b5c286..294d4d1bbd44 100644 --- a/core/lib/config/src/configs/fri_prover_group.rs +++ b/core/lib/config/src/configs/fri_prover_group.rs @@ -1,7 +1,7 @@ use std::collections::HashSet; use serde::Deserialize; -use zksync_basic_types::basic_fri_types::CircuitIdRoundTuple; +use zksync_basic_types::basic_fri_types::{AggregationRound, CircuitIdRoundTuple}; /// Configuration for the grouping of specialized provers. #[derive(Debug, Deserialize, Clone, PartialEq)] @@ -81,6 +81,7 @@ impl FriProverGroupConfig { .flatten() .collect() } + /// check all_circuit ids present exactly once /// and For each aggregation round, check that the circuit ids are in the correct range. /// For example, in aggregation round 0, the circuit ids should be 1 to 15 + 255 (EIP4844). @@ -89,7 +90,6 @@ impl FriProverGroupConfig { /// In aggregation round 3, the circuit ids should be 255. /// In aggregation round 4, the circuit ids should be 1. pub fn validate(&self) -> anyhow::Result<()> { - let mut rounds: Vec> = vec![Vec::new(); 5]; let groups = [ &self.group_0, &self.group_1, @@ -107,110 +107,45 @@ impl FriProverGroupConfig { &self.group_13, &self.group_14, ]; - for group in groups { - for circuit_round in group { - let round = match rounds.get_mut(circuit_round.aggregation_round as usize) { - Some(round) => round, - None => anyhow::bail!( - "Invalid aggregation round {}.", - circuit_round.aggregation_round - ), - }; - round.push(circuit_round.clone()); - } - } - - for (round, round_data) in rounds.iter().enumerate() { - let circuit_ids: Vec = round_data.iter().map(|x| x.circuit_id).collect(); - let unique_circuit_ids: HashSet = circuit_ids.iter().copied().collect(); - let duplicates: HashSet = circuit_ids - .iter() - .filter(|id| circuit_ids.iter().filter(|x| x == id).count() > 1) - .copied() - .collect(); + let mut expected_circuit_ids: HashSet<_> = AggregationRound::ALL_ROUNDS + .into_iter() + .flat_map(|r| r.circuit_ids()) + .collect(); - let (missing_ids, not_in_range, expected_circuits_description) = match round { - 0 => { - let mut expected_range: Vec<_> = (1..=15).collect(); - expected_range.push(255); - let missing_ids: Vec<_> = expected_range - .iter() - .copied() - .filter(|id| !circuit_ids.contains(id)) - .collect(); - - let not_in_range: Vec<_> = circuit_ids - .iter() - .filter(|&id| !expected_range.contains(id)) - .collect(); - (missing_ids, not_in_range, "circuit IDs 1 to 15 and 255") - } - 1 => { - let expected_range: Vec<_> = (3..=18).collect(); - let missing_ids: Vec<_> = expected_range - .iter() - .copied() - .filter(|id| !circuit_ids.contains(id)) - .collect(); - let not_in_range: Vec<_> = circuit_ids - .iter() - .filter(|&id| !expected_range.contains(id)) - .collect(); - (missing_ids, not_in_range, "circuit IDs 3 to 18") - } - 2 => { - let expected_range: Vec<_> = vec![2]; - let missing_ids: Vec<_> = expected_range - .iter() - .copied() - .filter(|id| !circuit_ids.contains(id)) - .collect(); - let not_in_range: Vec<_> = circuit_ids - .iter() - .filter(|&id| !expected_range.contains(id)) - .collect(); - (missing_ids, not_in_range, "circuit ID 2") + let mut provided_circuit_ids = HashSet::new(); + for (group_id, group) in groups.iter().enumerate() { + for circuit_id_round in group.iter() { + // Make sure that it's a known circuit. + if !expected_circuit_ids.contains(circuit_id_round) { + anyhow::bail!( + "Group {} contains unexpected circuit id: {:?}", + group_id, + circuit_id_round + ); } - 3 => { - let expected_range: Vec<_> = vec![255]; - let missing_ids: Vec<_> = expected_range - .iter() - .copied() - .filter(|id| !circuit_ids.contains(id)) - .collect(); - let not_in_range: Vec<_> = circuit_ids - .iter() - .filter(|&id| !expected_range.contains(id)) - .collect(); - (missing_ids, not_in_range, "circuit ID 255") - } - 4 => { - let expected_range: Vec<_> = vec![1]; - let missing_ids: Vec<_> = expected_range - .iter() - .copied() - .filter(|id| !circuit_ids.contains(id)) - .collect(); - let not_in_range: Vec<_> = circuit_ids - .iter() - .filter(|&id| !expected_range.contains(id)) - .collect(); - (missing_ids, not_in_range, "circuit ID 1") - } - _ => { - anyhow::bail!("Unknown round {}", round); + // Remove this circuit from the expected set: later we will check that all circuits + // are present. + expected_circuit_ids.remove(circuit_id_round); + + // Make sure that the circuit is not duplicated. + if provided_circuit_ids.contains(circuit_id_round) { + anyhow::bail!( + "Group {} contains duplicate circuit id: {:?}", + group_id, + circuit_id_round + ); } - }; - if !missing_ids.is_empty() { - anyhow::bail!("Circuit IDs for round {round} are missing: {missing_ids:?}"); - } - if circuit_ids.len() != unique_circuit_ids.len() { - anyhow::bail!("Circuit IDs: {duplicates:?} should be unique for round {round}.",); - } - if !not_in_range.is_empty() { - anyhow::bail!("Aggregation round {round} should only contain {expected_circuits_description}. Ids out of range: {not_in_range:?}"); + provided_circuit_ids.insert(circuit_id_round.clone()); } } + // All the circuit IDs should have been removed from the expected set. + if !expected_circuit_ids.is_empty() { + anyhow::bail!( + "Some circuit ids are missing from the groups: {:?}", + expected_circuit_ids + ); + } + Ok(()) } } diff --git a/prover/Cargo.lock b/prover/Cargo.lock index 21e2ea8b21de..cea147deccf8 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -7974,6 +7974,7 @@ dependencies = [ "shivini", "tokio", "tracing", + "tracing-subscriber", "vise", "zkevm_test_harness", "zksync_config", diff --git a/prover/Cargo.toml b/prover/Cargo.toml index 403314cc13ca..251b3b0fb082 100644 --- a/prover/Cargo.toml +++ b/prover/Cargo.toml @@ -52,7 +52,7 @@ tempfile = "3" tokio = "1" toml_edit = "0.14.4" tracing = "0.1" -tracing-subscriber = { version = "0.3" } +tracing-subscriber = "0.3" vise = "0.2.0" # Proving dependencies diff --git a/prover/crates/bin/prover_fri/Cargo.toml b/prover/crates/bin/prover_fri/Cargo.toml index ae7853427e96..e41244cecbf7 100644 --- a/prover/crates/bin/prover_fri/Cargo.toml +++ b/prover/crates/bin/prover_fri/Cargo.toml @@ -43,6 +43,9 @@ reqwest = { workspace = true, features = ["blocking"] } regex.workspace = true clap = { workspace = true, features = ["derive"] } +[dev-dependencies] +tracing-subscriber.workspace = true + [features] default = [] gpu = ["shivini", "zksync_prover_keystore/gpu"] diff --git a/prover/crates/bin/prover_fri/src/gpu_prover_job_processor.rs b/prover/crates/bin/prover_fri/src/gpu_prover_job_processor.rs index 240251df15bf..be28f2bd97ee 100644 --- a/prover/crates/bin/prover_fri/src/gpu_prover_job_processor.rs +++ b/prover/crates/bin/prover_fri/src/gpu_prover_job_processor.rs @@ -8,8 +8,8 @@ pub mod gpu_prover { ProverContextConfig, }; use tokio::task::JoinHandle; - use zksync_config::configs::{fri_prover_group::FriProverGroupConfig, FriProverConfig}; - use zksync_env_config::FromEnv; + use zksync_config::configs::fri_prover::SetupLoadMode as SetupLoadModeConfig; + use zksync_config::configs::FriProverConfig; use zksync_object_store::ObjectStore; use zksync_prover_dal::{ConnectionPool, ProverDal}; use zksync_prover_fri_types::{ @@ -341,38 +341,84 @@ pub mod gpu_prover { } } - pub fn load_setup_data_cache( + #[tracing::instrument(skip_all, fields(setup_load_mode = ?setup_load_mode, specialized_group_id = %specialized_group_id))] + pub async fn load_setup_data_cache( keystore: &Keystore, - config: &FriProverConfig, + setup_load_mode: SetupLoadModeConfig, + specialized_group_id: u8, + circuit_ids: &[CircuitIdRoundTuple], ) -> anyhow::Result { - Ok(match config.setup_load_mode { - zksync_config::configs::fri_prover::SetupLoadMode::FromDisk => SetupLoadMode::FromDisk, - zksync_config::configs::fri_prover::SetupLoadMode::FromMemory => { + Ok(match setup_load_mode { + SetupLoadModeConfig::FromDisk => SetupLoadMode::FromDisk, + SetupLoadModeConfig::FromMemory => { + anyhow::ensure!( + !circuit_ids.is_empty(), + "Circuit IDs must be provided when using FromMemory mode" + ); let mut cache = HashMap::new(); tracing::info!( "Loading setup data cache for group {}", - &config.specialized_group_id + &specialized_group_id ); - let prover_setup_metadata_list = FriProverGroupConfig::from_env() - .context("FriProverGroupConfig::from_env()")? - .get_circuit_ids_for_group_id(config.specialized_group_id) - .context( - "At least one circuit should be configured for group when running in FromMemory mode", - )?; tracing::info!( "for group {} configured setup metadata are {:?}", - &config.specialized_group_id, - prover_setup_metadata_list + &specialized_group_id, + circuit_ids ); - for prover_setup_metadata in prover_setup_metadata_list { - let key = setup_metadata_to_setup_data_key(&prover_setup_metadata); - let setup_data = keystore - .load_gpu_setup_data_for_circuit_type(key.clone()) - .context("load_gpu_setup_data_for_circuit_type()")?; - cache.insert(key, Arc::new(setup_data)); + // Load each file in parallel. Note that FS access is not necessarily parallel, but + // deserialization is (and it's not insignificant, as setup keys are large). + // Note: `collect` is important, because iterators are lazy and otherwise we won't actually + // spawn threads. + let handles: Vec<_> = circuit_ids + .into_iter() + .map(|prover_setup_metadata| { + let keystore = keystore.clone(); + let prover_setup_metadata = prover_setup_metadata.clone(); + tokio::task::spawn_blocking(move || { + let key = setup_metadata_to_setup_data_key(&prover_setup_metadata); + let setup_data = keystore + .load_gpu_setup_data_for_circuit_type(key.clone()) + .context("load_gpu_setup_data_for_circuit_type()")?; + anyhow::Ok((key, Arc::new(setup_data))) + }) + }) + .collect(); + for handle in futures::future::join_all(handles).await { + let (key, setup_data) = handle.context("Key loading future panicked")??; + cache.insert(key, setup_data); } SetupLoadMode::FromMemory(cache) } }) } + + #[cfg(test)] + mod tests { + use zksync_types::basic_fri_types::AggregationRound; + + use super::*; + + #[tokio::test] + async fn test_load_setup_data_cache() { + tracing_subscriber::fmt::try_init().ok(); + + let keystore = Keystore::locate(); + let mode = SetupLoadModeConfig::FromMemory; + let specialized_group_id = 0; + let ids: Vec<_> = AggregationRound::ALL_ROUNDS + .into_iter() + .flat_map(|r| r.circuit_ids()) + .collect(); + if !keystore.is_setup_data_present(&setup_metadata_to_setup_data_key(&ids[0])) { + // We don't want this test to fail on envs where setup keys are not present. + return; + } + + let start = Instant::now(); + let _cache = load_setup_data_cache(&keystore, mode, specialized_group_id, &ids) + .await + .expect("Unable to load keys"); + tracing::info!("Cache load time: {:?}", start.elapsed()); + } + } } diff --git a/prover/crates/bin/prover_fri/src/main.rs b/prover/crates/bin/prover_fri/src/main.rs index 8191653efec6..cbba8d0ddb4f 100644 --- a/prover/crates/bin/prover_fri/src/main.rs +++ b/prover/crates/bin/prover_fri/src/main.rs @@ -231,8 +231,14 @@ async fn get_prover_tasks( let keystore = Keystore::locate().with_setup_path(Some(prover_config.setup_data_path.clone().into())); - let setup_load_mode = gpu_prover::load_setup_data_cache(&keystore, &prover_config) - .context("load_setup_data_cache()")?; + let setup_load_mode = gpu_prover::load_setup_data_cache( + &keystore, + prover_config.setup_load_mode, + prover_config.specialized_group_id, + &circuit_ids_for_round_to_be_proven, + ) + .await + .context("load_setup_data_cache()")?; let witness_vector_queue = FixedSizeQueue::new(prover_config.queue_capacity); let shared_witness_vector_queue = Arc::new(Mutex::new(witness_vector_queue)); let consumer = shared_witness_vector_queue.clone(); From a5ffaf1b4e291d6f09ba8c1f224f5900665bffc4 Mon Sep 17 00:00:00 2001 From: Igor Aleksanov Date: Thu, 12 Sep 2024 16:28:19 +0400 Subject: [PATCH 064/116] feat: Bump crypto and protocol deps (#2825) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ - Use latest versions of crypto, protocol, gpu, and consensus crates. - Remove solved cargo deny advisories from the allowlist. ## Why ❔ - A bunch of fixes/improvements were done. - Optimization of dependency graph. - Solving cargo deny advisories. --- Cargo.lock | 526 ++++++++++++++++++---------------------- Cargo.toml | 34 +-- deny.toml | 2 - prover/Cargo.lock | 545 +++++++++++++++++------------------------- prover/Cargo.toml | 10 +- zk_toolbox/Cargo.lock | 54 +---- zk_toolbox/Cargo.toml | 6 +- 7 files changed, 479 insertions(+), 698 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0485417df8f6..9d7b19b424bc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -44,7 +44,7 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac1f845298e95f983ff1944b728ae08b8cebab80d684f0a832ed0fc74dfa27e2" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cipher", "cpufeatures", ] @@ -80,7 +80,7 @@ version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77c3a9648d43b9cd48db467b3f87fdd6e146bcc88ab0180006cef2179fe11d01" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "once_cell", "version_check", "zerocopy", @@ -282,7 +282,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "444b0228950ee6501b3568d3c93bf1176a1fdbc3b758dcd9475046d30f4dc7e8" dependencies = [ "async-lock", - "cfg-if 1.0.0", + "cfg-if", "concurrent-queue", "futures-io", "futures-lite", @@ -328,7 +328,7 @@ dependencies = [ "async-signal", "async-task", "blocking", - "cfg-if 1.0.0", + "cfg-if", "event-listener 5.3.1", "futures-lite", "rustix", @@ -345,7 +345,7 @@ dependencies = [ "async-io", "async-lock", "atomic-waker", - "cfg-if 1.0.0", + "cfg-if", "futures-core", "futures-io", "rustix", @@ -534,7 +534,7 @@ checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" dependencies = [ "addr2line", "cc", - "cfg-if 1.0.0", + "cfg-if", "libc", "miniz_oxide", "object", @@ -601,30 +601,6 @@ dependencies = [ "serde", ] -[[package]] -name = "bellman_ce" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ea340d5c1394ee4daf4415dd80e06f74e0ad9b08e21f73f6bb1fa3a9dfae80d" -dependencies = [ - "arrayvec 0.7.4", - "bit-vec", - "blake2s_const", - "blake2s_simd", - "byteorder", - "cfg-if 1.0.0", - "crossbeam 0.7.3", - "futures 0.3.30", - "hex", - "lazy_static", - "num_cpus", - "pairing_ce", - "rand 0.4.6", - "serde", - "smallvec", - "tiny-keccak 1.5.0", -] - [[package]] name = "bigdecimal" version = "0.4.5" @@ -814,17 +790,6 @@ dependencies = [ "constant_time_eq 0.3.1", ] -[[package]] -name = "blake2s_const" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f39d933cb38939f885001867874c65699c36f30f0c78aae9f4c9f01b3e4b306a" -dependencies = [ - "arrayref", - "arrayvec 0.5.2", - "constant_time_eq 0.1.5", -] - [[package]] name = "blake2s_simd" version = "0.5.11" @@ -907,18 +872,17 @@ dependencies = [ [[package]] name = "boojum" -version = "0.2.2" +version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df88daa33db46d683967ca09a4f04817c38950483f2501a771d497669a8a4bb1" +checksum = "68ec2f007ff8f90cc459f03e9f30ca1065440170f013c868823646e2e48d0234" dependencies = [ "arrayvec 0.7.4", "bincode", "blake2 0.10.6", "const_format", "convert_case 0.6.0", - "crossbeam 0.8.4", + "crossbeam", "crypto-bigint 0.5.3", - "cs_derive", "derivative", "ethereum-types", "firestorm", @@ -926,7 +890,6 @@ dependencies = [ "lazy_static", "num-modular", "num_cpus", - "pairing_ce", "rand 0.8.5", "rayon", "serde", @@ -934,6 +897,8 @@ dependencies = [ "sha3_ce", "smallvec", "unroll", + "zksync_cs_derive", + "zksync_pairing", ] [[package]] @@ -1107,12 +1072,6 @@ dependencies = [ "nom", ] -[[package]] -name = "cfg-if" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" - [[package]] name = "cfg-if" version = "1.0.0" @@ -1131,7 +1090,7 @@ version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3613f74bd2eac03dad61bd53dbe620703d4371614fe0bc3b9f04dd36fe4e818" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cipher", "cpufeatures", ] @@ -1240,82 +1199,82 @@ dependencies = [ [[package]] name = "circuit_encodings" -version = "0.150.4" +version = "0.150.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2593c02ad6b4b31ba63506c3f807f666133dd36bf47422f99b1d2947cf3c8dc1" +checksum = "e67617688c66640c84f9b98ff26d48f7898dca4faeb45241a4f21ec333788e7b" dependencies = [ "derivative", "serde", - "zk_evm 0.150.4", - "zkevm_circuits 0.150.4", + "zk_evm 0.150.5", + "zkevm_circuits 0.150.5", ] [[package]] name = "circuit_sequencer_api" -version = "0.133.0" +version = "0.133.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a87dc7bee6630d4954ac7982eb77e2007476662250cf18e5c460bbc5ee435f1" +checksum = "eb959b1f8c6bbd8be711994d182e85452a26a5d2213a709290b71c8262af1331" dependencies = [ - "bellman_ce", "derivative", "rayon", "serde", "zk_evm 0.133.0", + "zksync_bellman", ] [[package]] name = "circuit_sequencer_api" -version = "0.140.0" +version = "0.140.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b5138e6524c73e6d49fc1d0822b26e62a8d78b2c07e4e1c56061a447c10bec0" +checksum = "fa5f22311ce609d852d7d9f4943535ea4610aeb785129ae6ff83d5201c4fb387" dependencies = [ - "bellman_ce", "circuit_encodings 0.140.1", "derivative", "rayon", "serde", "zk_evm 0.140.0", + "zksync_bellman", ] [[package]] name = "circuit_sequencer_api" -version = "0.141.1" +version = "0.141.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55a257b31a8ea1c1723cab4fb5661c6b4c0ebe022d4b73bea9eb7c9150bd3bc1" +checksum = "4c47c71d6ba83a8beb0af13af70beffd627f5497caf3d44c6f96363e788b07ea" dependencies = [ - "bellman_ce", "circuit_encodings 0.141.1", "derivative", "rayon", "serde", "zk_evm 0.141.0", + "zksync_bellman", ] [[package]] name = "circuit_sequencer_api" -version = "0.142.0" +version = "0.142.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1d861a7a9b8df9389c63092985fc993c46954771da86462d7cab8cbf55a6497" +checksum = "e264723359e6a1aad98110bdccf1ae3ad596e93e7d31da9e40f6adc07e4add54" dependencies = [ - "bellman_ce", "circuit_encodings 0.142.1", "derivative", "rayon", "serde", "zk_evm 0.141.0", + "zksync_bellman", ] [[package]] name = "circuit_sequencer_api" -version = "0.150.4" +version = "0.150.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42d1a86b9c2207f3bb2dff5f00d1af1cb95004b6d07e9bacb6519fe08f12c04b" +checksum = "21017310971d4a051e4a52ad70eed11d1ae69defeca8314f73a3a4bad16705a9" dependencies = [ - "bellman_ce", - "circuit_encodings 0.150.4", + "circuit_encodings 0.150.5", "derivative", "rayon", "serde", + "zksync_bellman", ] [[package]] @@ -1451,7 +1410,7 @@ version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" dependencies = [ - "crossbeam-utils 0.8.20", + "crossbeam-utils", ] [[package]] @@ -1565,7 +1524,7 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -1604,41 +1563,17 @@ dependencies = [ "itertools 0.10.5", ] -[[package]] -name = "crossbeam" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69323bff1fb41c635347b8ead484a5ca6c3f11914d784170b158d8449ab07f8e" -dependencies = [ - "cfg-if 0.1.10", - "crossbeam-channel 0.4.4", - "crossbeam-deque 0.7.4", - "crossbeam-epoch 0.8.2", - "crossbeam-queue 0.2.3", - "crossbeam-utils 0.7.2", -] - [[package]] name = "crossbeam" version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1137cd7e7fc0fb5d3c5a8678be38ec56e819125d8d7907411fe24ccb943faca8" dependencies = [ - "crossbeam-channel 0.5.13", - "crossbeam-deque 0.8.5", - "crossbeam-epoch 0.9.18", - "crossbeam-queue 0.3.11", - "crossbeam-utils 0.8.20", -] - -[[package]] -name = "crossbeam-channel" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b153fe7cbef478c567df0f972e02e6d736db11affe43dfc9c56a9374d1adfb87" -dependencies = [ - "crossbeam-utils 0.7.2", - "maybe-uninit", + "crossbeam-channel", + "crossbeam-deque", + "crossbeam-epoch", + "crossbeam-queue", + "crossbeam-utils", ] [[package]] @@ -1647,18 +1582,7 @@ version = "0.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2" dependencies = [ - "crossbeam-utils 0.8.20", -] - -[[package]] -name = "crossbeam-deque" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c20ff29ded3204c5106278a81a38f4b482636ed4fa1e6cfbeef193291beb29ed" -dependencies = [ - "crossbeam-epoch 0.8.2", - "crossbeam-utils 0.7.2", - "maybe-uninit", + "crossbeam-utils", ] [[package]] @@ -1667,23 +1591,8 @@ version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" dependencies = [ - "crossbeam-epoch 0.9.18", - "crossbeam-utils 0.8.20", -] - -[[package]] -name = "crossbeam-epoch" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace" -dependencies = [ - "autocfg", - "cfg-if 0.1.10", - "crossbeam-utils 0.7.2", - "lazy_static", - "maybe-uninit", - "memoffset", - "scopeguard", + "crossbeam-epoch", + "crossbeam-utils", ] [[package]] @@ -1692,18 +1601,7 @@ version = "0.9.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" dependencies = [ - "crossbeam-utils 0.8.20", -] - -[[package]] -name = "crossbeam-queue" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "774ba60a54c213d409d5353bda12d49cd68d14e45036a285234c8d6f91f92570" -dependencies = [ - "cfg-if 0.1.10", - "crossbeam-utils 0.7.2", - "maybe-uninit", + "crossbeam-utils", ] [[package]] @@ -1712,18 +1610,7 @@ version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df0346b5d5e76ac2fe4e327c5fd1118d6be7c51dfb18f9b7922923f287471e35" dependencies = [ - "crossbeam-utils 0.8.20", -] - -[[package]] -name = "crossbeam-utils" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" -dependencies = [ - "autocfg", - "cfg-if 0.1.10", - "lazy_static", + "crossbeam-utils", ] [[package]] @@ -1830,7 +1717,7 @@ version = "4.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "curve25519-dalek-derive", "digest 0.10.7", @@ -1962,7 +1849,7 @@ version = "5.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "hashbrown 0.14.5", "lock_api", "once_cell", @@ -2241,7 +2128,7 @@ version = "0.8.33" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7268b386296a025e474d5140678f75d6de9493ae55a5d709eeb9dd08149945e1" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -2323,7 +2210,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "136d1b5283a1ab77bd9257427ffd09d8667ced0570b6f938942bc7568ed5b943" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "home", "windows-sys 0.48.0", ] @@ -2436,27 +2323,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b538e4231443a5b9c507caee3356f016d832cf7393d2d90f03ea3180d4e3fbc" dependencies = [ "byteorder", - "ff_derive_ce", "hex", "rand 0.4.6", "serde", ] -[[package]] -name = "ff_derive_ce" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b96fbccd88dbb1fac4ee4a07c2fcc4ca719a74ffbd9d2b9d41d8c8eb073d8b20" -dependencies = [ - "num-bigint 0.4.6", - "num-integer", - "num-traits", - "proc-macro2 1.0.86", - "quote 1.0.36", - "serde", - "syn 1.0.109", -] - [[package]] name = "fiat-crypto" version = "0.2.3" @@ -2572,7 +2443,7 @@ version = "15.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "878babb0b136e731cc77ec2fd883ff02745ff21e6fb662729953d44923df009c" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "parity-scale-codec", "scale-info", ] @@ -2583,7 +2454,7 @@ version = "16.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87cf1549fba25a6fcac22785b61698317d958e96cac72a59102ea45b9ae64692" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "parity-scale-codec", "scale-info", "serde", @@ -2591,17 +2462,18 @@ dependencies = [ [[package]] name = "franklin-crypto" -version = "0.1.0" +version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "178bca54fc449a6f4cb45321ed9d769353143ac7ef314ea310f3a0c61bed2da2" +checksum = "971289216ea5c91872e5e0bb6989214b537bbce375d09fabea5c3ccfe031b204" dependencies = [ "arr_macro", - "bellman_ce", "bit-vec", "blake2 0.9.2", "blake2-rfc_bellman_edition", "blake2s_simd", + "boojum", "byteorder", + "derivative", "digest 0.9.0", "hex", "indexmap 1.9.3", @@ -2618,6 +2490,7 @@ dependencies = [ "smallvec", "splitmut", "tiny-keccak 1.5.0", + "zksync_bellman", ] [[package]] @@ -2810,7 +2683,7 @@ version = "0.2.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "js-sys", "libc", "wasi 0.11.0+wasi-snapshot-preview1", @@ -3564,7 +3437,7 @@ version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -3930,7 +3803,7 @@ version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72c1e0b51e7ec0a97369623508396067a486bd0cbed95a2659a4b863d28cfc8b" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "ecdsa 0.14.8", "elliptic-curve 0.12.3", "sha2 0.10.8", @@ -3942,7 +3815,7 @@ version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "956ff9b67e26e1a6a866cb758f12c6f8746208489e3e4a4b5580802f2f0a587b" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "ecdsa 0.16.9", "elliptic-curve 0.13.8", "once_cell", @@ -3992,7 +3865,7 @@ version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "winapi", ] @@ -4229,19 +4102,13 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" -[[package]] -name = "maybe-uninit" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" - [[package]] name = "md-5" version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "digest 0.10.7", ] @@ -4251,15 +4118,6 @@ version = "2.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" -[[package]] -name = "memoffset" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "043175f069eda7b85febe4a74abbaeff828d9f8b448515d3151a14a3542811aa" -dependencies = [ - "autocfg", -] - [[package]] name = "merkle_tree_consistency_checker" version = "0.1.0" @@ -4332,8 +4190,8 @@ version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23e0b72e7c9042467008b10279fc732326bd605459ae03bda88825909dd19b56" dependencies = [ - "crossbeam-channel 0.5.13", - "crossbeam-utils 0.8.20", + "crossbeam-channel", + "crossbeam-utils", "dashmap", "skeptic", "smallvec", @@ -4422,7 +4280,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053" dependencies = [ "bitflags 2.6.0", - "cfg-if 1.0.0", + "cfg-if", "libc", ] @@ -4688,7 +4546,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9529f4786b70a3e8c61e11179af17ab6188ad8d0ded78c5529441ed39d4bd9c1" dependencies = [ "bitflags 2.6.0", - "cfg-if 1.0.0", + "cfg-if", "foreign-types", "libc", "once_cell", @@ -4867,19 +4725,6 @@ dependencies = [ "sha2 0.10.8", ] -[[package]] -name = "pairing_ce" -version = "0.28.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "843b5b6fb63f00460f611dbc87a50bbbb745f0dfe5cbf67ca89299c79098640e" -dependencies = [ - "byteorder", - "cfg-if 1.0.0", - "ff_ce", - "rand 0.4.6", - "serde", -] - [[package]] name = "parity-scale-codec" version = "3.6.12" @@ -4928,7 +4773,7 @@ version = "0.9.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "redox_syscall", "smallvec", @@ -5150,7 +4995,7 @@ version = "3.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cc2790cd301dec6cd3b7a025e4815cf825724a51c98dccfe6a3e55f05ffb6511" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "concurrent-queue", "hermit-abi 0.4.0", "pin-project-lite", @@ -5176,7 +5021,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d52cff9d1d4dee5fe6d03729099f4a310a41179e0a10dbf542039873f2e826fb" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "opaque-debug", "universal-hash", @@ -5493,7 +5338,7 @@ version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "20afe714292d5e879d8b12740aa223c6a88f118af41870e8b6196e39a02238a8" dependencies = [ - "crossbeam-utils 0.8.20", + "crossbeam-utils", "libc", "mach", "once_cell", @@ -5634,8 +5479,8 @@ version = "1.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" dependencies = [ - "crossbeam-deque 0.8.5", - "crossbeam-utils 0.8.20", + "crossbeam-deque", + "crossbeam-utils", ] [[package]] @@ -5811,15 +5656,18 @@ dependencies = [ [[package]] name = "rescue_poseidon" -version = "0.4.1" +version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ada2124f92cf32b813e50f6f7d9e92f05addc321edb8b68f9b4e2bb6e0d5af8b" +checksum = "82900c877a0ba5362ac5756efbd82c5b795dc509011c1253e2389d8708f1389d" dependencies = [ "addchain", "arrayvec 0.7.4", "blake2 0.10.6", "byteorder", + "derivative", "franklin-crypto", + "lazy_static", + "log", "num-bigint 0.3.3", "num-integer", "num-iter", @@ -5828,6 +5676,7 @@ dependencies = [ "serde", "sha3 0.9.1", "smallvec", + "typemap_rev", ] [[package]] @@ -6243,7 +6092,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eca070c12893629e2cc820a9761bedf6ce1dcddc9852984d1dc734b8bd9bd024" dependencies = [ "bitvec", - "cfg-if 1.0.0", + "cfg-if", "derive_more 0.99.18", "parity-scale-codec", "scale-info-derive", @@ -6567,9 +6416,9 @@ checksum = "a3f0bf26fd526d2a95683cd0f87bf103b8539e2ca1ef48ce002d67aad59aa0b4" [[package]] name = "serde" -version = "1.0.208" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cff085d2cb684faa248efb494c39b68e522822ac0de72ccf08109abde717cfb2" +checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" dependencies = [ "serde_derive", ] @@ -6595,9 +6444,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.208" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24008e81ff7613ed8e5ba0cfaf24e2c2f1e5b8a0495711e44fcd4882fca62bcf" +checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2 1.0.86", "quote 1.0.36", @@ -6682,7 +6531,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6" dependencies = [ "block-buffer 0.9.0", - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "digest 0.9.0", "opaque-debug", @@ -6694,7 +6543,7 @@ version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "digest 0.10.7", ] @@ -6706,7 +6555,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" dependencies = [ "block-buffer 0.9.0", - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "digest 0.9.0", "opaque-debug", @@ -6718,7 +6567,7 @@ version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "digest 0.10.7", ] @@ -6729,7 +6578,7 @@ version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eca2daa77078f4ddff27e75c4bf59e4c2697525f56dbb3c842d34a5d1f2b04a2" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "digest 0.10.7", ] @@ -6914,7 +6763,7 @@ dependencies = [ "blake2-rfc", "bs58", "chacha20", - "crossbeam-queue 0.3.11", + "crossbeam-queue", "derive_more 0.99.18", "ed25519-zebra", "either", @@ -7153,7 +7002,7 @@ dependencies = [ "bytes", "chrono", "crc", - "crossbeam-queue 0.3.11", + "crossbeam-queue", "either", "event-listener 5.3.1", "futures-channel", @@ -7662,7 +7511,7 @@ version = "3.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "fastrand", "rustix", "windows-sys 0.52.0", @@ -7759,7 +7608,7 @@ version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "once_cell", ] @@ -8232,11 +8081,17 @@ version = "1.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "digest 0.10.7", "static_assertions", ] +[[package]] +name = "typemap_rev" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74b08b0c1257381af16a5c3605254d529d3e7e109f3c62befc5d168968192998" + [[package]] name = "typenum" version = "1.17.0" @@ -8509,8 +8364,8 @@ dependencies = [ "enum_dispatch", "eravm-stable-interface", "primitive-types", - "zk_evm_abstractions 0.150.4", - "zkevm_opcode_defs 0.150.4", + "zk_evm_abstractions 0.150.5", + "zkevm_opcode_defs 0.150.5", ] [[package]] @@ -8556,7 +8411,7 @@ version = "0.2.87" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "wasm-bindgen-macro", ] @@ -8581,7 +8436,7 @@ version = "0.4.37" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "js-sys", "wasm-bindgen", "web-sys", @@ -8989,7 +8844,7 @@ version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "windows-sys 0.48.0", ] @@ -8999,7 +8854,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a277a57398d4bfa075df44f501a17cfdf8542d224f0d36095a2adc7aee4ef0a5" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "windows-sys 0.48.0", ] @@ -9153,9 +9008,9 @@ dependencies = [ [[package]] name = "zk_evm" -version = "0.150.4" +version = "0.150.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2dbb0ed38d61fbd04bd7575755924d1303e129c04c909abba7f5bfcc6260bcf" +checksum = "5a6e69931f24db5cf333b714721e8d80ff88bfdb7da8c3dc7882612ffddb8d27" dependencies = [ "anyhow", "lazy_static", @@ -9163,7 +9018,7 @@ dependencies = [ "serde", "serde_json", "static_assertions", - "zk_evm_abstractions 0.150.4", + "zk_evm_abstractions 0.150.5", ] [[package]] @@ -9194,15 +9049,15 @@ dependencies = [ [[package]] name = "zk_evm_abstractions" -version = "0.150.4" +version = "0.150.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31460aacfe65b39ac484a2a2e0bbb02baf141f65264bf48e1e4f59ab375fe933" +checksum = "93d6b0720261ab55490fe3a96e96de30d5d7b277940b52ea7f52dbf564eb1748" dependencies = [ "anyhow", "num_enum 0.6.1", "serde", "static_assertions", - "zkevm_opcode_defs 0.150.4", + "zkevm_opcode_defs 0.150.5", ] [[package]] @@ -9251,13 +9106,12 @@ dependencies = [ [[package]] name = "zkevm_circuits" -version = "0.150.4" +version = "0.150.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abdfaa95dfe0878fda219dd17a6cc8c28711e2067785910c0e06d3ffdca78629" +checksum = "784fa7cfb51e17c5ced112bca43da30b3468b2347b7af0427ad9638759fb140e" dependencies = [ "arrayvec 0.7.4", "boojum", - "cs_derive", "derivative", "hex", "itertools 0.10.5", @@ -9266,7 +9120,8 @@ dependencies = [ "seq-macro", "serde", "smallvec", - "zkevm_opcode_defs 0.150.4", + "zkevm_opcode_defs 0.150.5", + "zksync_cs_derive", ] [[package]] @@ -9313,9 +9168,9 @@ dependencies = [ [[package]] name = "zkevm_opcode_defs" -version = "0.150.4" +version = "0.150.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb7c5c7b4481a646f8696b08cee64a8dec097509a6378d18242f81022f327f1e" +checksum = "79055eae1b6c1ab80793ed9d77d2964c9c896afa4b5dfed278cf58cd10acfe8f" dependencies = [ "bitflags 2.6.0", "blake2 0.10.6", @@ -9370,6 +9225,29 @@ dependencies = [ "url", ] +[[package]] +name = "zksync_bellman" +version = "0.30.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ffa03efe9bdb137a4b36b97d1a74237e18c9ae42b755163d903a9d48c1a5d80" +dependencies = [ + "arrayvec 0.7.4", + "bit-vec", + "blake2s_simd", + "byteorder", + "cfg-if", + "crossbeam", + "futures 0.3.30", + "hex", + "lazy_static", + "num_cpus", + "rand 0.4.6", + "serde", + "smallvec", + "tiny-keccak 1.5.0", + "zksync_pairing", +] + [[package]] name = "zksync_block_reverter" version = "0.1.0" @@ -9413,9 +9291,9 @@ name = "zksync_commitment_generator" version = "0.1.0" dependencies = [ "anyhow", - "circuit_sequencer_api 0.140.0", - "circuit_sequencer_api 0.141.1", - "circuit_sequencer_api 0.150.4", + "circuit_sequencer_api 0.140.3", + "circuit_sequencer_api 0.141.2", + "circuit_sequencer_api 0.150.5", "futures 0.3.30", "itertools 0.10.5", "num_cpus", @@ -9427,7 +9305,7 @@ dependencies = [ "vise", "zk_evm 0.133.0", "zk_evm 0.141.0", - "zk_evm 0.150.4", + "zk_evm 0.150.5", "zksync_contracts", "zksync_dal", "zksync_eth_client", @@ -9443,9 +9321,9 @@ dependencies = [ [[package]] name = "zksync_concurrency" -version = "0.1.0-rc.12" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a49ad68bfaf6fb8542c68894b68b28be31514786549855aaa8a46b36defbb100" +checksum = "c1c8cf6c689ab5922b52d81b775cd2d9cffbfc8fb8da65985e11b06546dfb3bf" dependencies = [ "anyhow", "once_cell", @@ -9480,9 +9358,9 @@ dependencies = [ [[package]] name = "zksync_consensus_bft" -version = "0.1.0-rc.12" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1dcab481683131c093271c19602bd495b1d682f7a94f764f2227111a0a104f0" +checksum = "45c409ae915056cf9cadd9304dbc8718fa38edfcb346d06e5b3582dcd2489ef9" dependencies = [ "anyhow", "async-trait", @@ -9502,9 +9380,9 @@ dependencies = [ [[package]] name = "zksync_consensus_crypto" -version = "0.1.0-rc.12" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ace39bdf50b8421c4d546381fe1ecc5212f953ce61cf93d4fa69172078dbe4af" +checksum = "cc7baced4e811015038322dad10239f2d631d9e339e8d6b7b6e6b146bee30f41" dependencies = [ "anyhow", "blst", @@ -9515,7 +9393,6 @@ dependencies = [ "k256 0.13.3", "num-bigint 0.4.6", "num-traits", - "pairing_ce", "rand 0.4.6", "rand 0.8.5", "sha3 0.10.8", @@ -9526,9 +9403,9 @@ dependencies = [ [[package]] name = "zksync_consensus_executor" -version = "0.1.0-rc.12" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "216e3d9f3df8c119e037e44c41db12fa6448dafbf1eaf5015d13b22400866980" +checksum = "6b018b8a76fc2cbecb51683ce97532501c45d44cbc8bb856d1956e5998259335" dependencies = [ "anyhow", "async-trait", @@ -9548,9 +9425,9 @@ dependencies = [ [[package]] name = "zksync_consensus_network" -version = "0.1.0-rc.12" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19d7dd832b1bbcd0a2b977b2d85986437105fd5e1e82bd4becb2e6a9db112655" +checksum = "f5bb2988e41af3083cebfc11f47f2615adae8d829bf9237aa084dede9629a687" dependencies = [ "anyhow", "async-trait", @@ -9584,9 +9461,9 @@ dependencies = [ [[package]] name = "zksync_consensus_roles" -version = "0.1.0-rc.12" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06277266e31efdc1465f6a27ce96c7435392a270978a91956b8a848732df2cfa" +checksum = "0aab4ddf62f6001903c5fe9f65afb1bdc42464928c9d1c6ce52e4d7e9944f5dc" dependencies = [ "anyhow", "bit-vec", @@ -9606,9 +9483,9 @@ dependencies = [ [[package]] name = "zksync_consensus_storage" -version = "0.1.0-rc.12" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9099b2295f550176d824b5287f2f31b7739c4d10247faec1132f1c6e9d18059c" +checksum = "7b9dbcb923fa201af03f49f70c11a923b416915d2ddf8b2de3a2e861f22898a4" dependencies = [ "anyhow", "async-trait", @@ -9626,9 +9503,9 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" -version = "0.1.0-rc.12" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d624f55e2449f43b2c85588b5dd2a28b3c5ea629effc89df76e3254f8d9d2fb" +checksum = "29e69dffc0fbc7c096548c997f5ca157a490b34b3d49fd524fa3d51840f7fb22" dependencies = [ "anyhow", "rand 0.8.5", @@ -9768,6 +9645,18 @@ dependencies = [ "zksync_utils", ] +[[package]] +name = "zksync_cs_derive" +version = "0.30.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5939e2df4288c263c706ff18ac718e984149223ad4289d6d957d767dcfc04c81" +dependencies = [ + "proc-macro-error", + "proc-macro2 1.0.86", + "quote 1.0.36", + "syn 1.0.109", +] + [[package]] name = "zksync_da_client" version = "0.1.0" @@ -10060,6 +9949,34 @@ dependencies = [ "zksync_prover_interface", ] +[[package]] +name = "zksync_ff" +version = "0.30.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9524b06780b5e164e84b38840c7c428c739f051f35af6efc4d1285f629ceb88e" +dependencies = [ + "byteorder", + "hex", + "rand 0.4.6", + "serde", + "zksync_ff_derive", +] + +[[package]] +name = "zksync_ff_derive" +version = "0.30.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f91e58e75d65877f09f83bc3dca8f054847ae7ec4f3e64bfa610a557edd8e8e" +dependencies = [ + "num-bigint 0.4.6", + "num-integer", + "num-traits", + "proc-macro2 1.0.86", + "quote 1.0.36", + "serde", + "syn 1.0.109", +] + [[package]] name = "zksync_health_check" version = "0.1.0" @@ -10092,9 +10009,9 @@ dependencies = [ [[package]] name = "zksync_kzg" -version = "0.150.4" +version = "0.150.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9949f48ea1a9f9a0e73242d4d1e87e681095181827486b3fcc2cf93e5aa03280" +checksum = "edb8a9c76c172a6d639855ee342b9a670e3ba472f5ae302f771b1c3ee777dc88" dependencies = [ "boojum", "derivative", @@ -10104,7 +10021,7 @@ dependencies = [ "serde", "serde_json", "serde_with", - "zkevm_circuits 0.150.4", + "zkevm_circuits 0.150.5", ] [[package]] @@ -10225,11 +10142,11 @@ version = "0.1.0" dependencies = [ "anyhow", "assert_matches", - "circuit_sequencer_api 0.133.0", - "circuit_sequencer_api 0.140.0", - "circuit_sequencer_api 0.141.1", - "circuit_sequencer_api 0.142.0", - "circuit_sequencer_api 0.150.4", + "circuit_sequencer_api 0.133.1", + "circuit_sequencer_api 0.140.3", + "circuit_sequencer_api 0.141.2", + "circuit_sequencer_api 0.142.2", + "circuit_sequencer_api 0.150.5", "ethabi", "hex", "itertools 0.10.5", @@ -10244,7 +10161,7 @@ dependencies = [ "zk_evm 0.133.0", "zk_evm 0.140.0", "zk_evm 0.141.0", - "zk_evm 0.150.4", + "zk_evm 0.150.5", "zksync_contracts", "zksync_eth_signer", "zksync_system_constants", @@ -10577,6 +10494,19 @@ dependencies = [ "zksync_types", ] +[[package]] +name = "zksync_pairing" +version = "0.30.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8412ae5574472fa567a097e183f9a01974b99dd0b5da3bfa1bbe6c57c579aa2" +dependencies = [ + "byteorder", + "cfg-if", + "rand 0.4.6", + "serde", + "zksync_ff", +] + [[package]] name = "zksync_proof_data_handler" version = "0.1.0" @@ -10602,9 +10532,9 @@ dependencies = [ [[package]] name = "zksync_protobuf" -version = "0.1.0-rc.12" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d26fb2beb3aeafb5e9babf1acf6494662cc7157b893fa248bd151494f931d07f" +checksum = "df5467dfe2f845ca1fd6ceec623bbd32187589793d3c4023dcd2f5172369d198" dependencies = [ "anyhow", "bit-vec", @@ -10623,9 +10553,9 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" -version = "0.1.0-rc.12" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58e86c198e056d921b4f3f1d2755c23d090e942b5a70b03bcb7e7c02445aa491" +checksum = "33d35280660b11be2a4ebdf531184eb729acebfdc3368d27176ec104f8bf9c5f" dependencies = [ "anyhow", "heck 0.5.0", @@ -10663,7 +10593,7 @@ version = "0.1.0" dependencies = [ "bincode", "chrono", - "circuit_sequencer_api 0.150.4", + "circuit_sequencer_api 0.150.5", "serde", "serde_json", "serde_with", @@ -10774,9 +10704,9 @@ dependencies = [ [[package]] name = "zksync_solidity_vk_codegen" -version = "0.1.0" +version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bac71750012656b207e8cdb67415823318909077d8c8e235111f0d2feeeeeda" +checksum = "b310ab8a21681270e73f177ddf7974cabb7a96f0624ab8b008fd6ee1f9b4f687" dependencies = [ "ethereum-types", "franklin-crypto", diff --git a/Cargo.toml b/Cargo.toml index 145b72446b48..6ee04692d8c7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -215,30 +215,30 @@ circuit_sequencer_api_1_3_3 = { package = "circuit_sequencer_api", version = "0. circuit_sequencer_api_1_4_0 = { package = "circuit_sequencer_api", version = "0.140" } circuit_sequencer_api_1_4_1 = { package = "circuit_sequencer_api", version = "0.141" } circuit_sequencer_api_1_4_2 = { package = "circuit_sequencer_api", version = "0.142" } -circuit_sequencer_api_1_5_0 = { package = "circuit_sequencer_api", version = "=0.150.4" } -crypto_codegen = { package = "zksync_solidity_vk_codegen", version = "=0.1.0" } -kzg = { package = "zksync_kzg", version = "=0.150.4" } +circuit_sequencer_api_1_5_0 = { package = "circuit_sequencer_api", version = "=0.150.5" } +crypto_codegen = { package = "zksync_solidity_vk_codegen", version = "=0.30.1" } +kzg = { package = "zksync_kzg", version = "=0.150.5" } zk_evm = { version = "=0.133.0" } zk_evm_1_3_1 = { package = "zk_evm", version = "0.131.0-rc.2" } -zk_evm_1_3_3 = { package = "zk_evm", version = "0.133.0" } -zk_evm_1_4_0 = { package = "zk_evm", version = "0.140.0" } -zk_evm_1_4_1 = { package = "zk_evm", version = "0.141.0" } -zk_evm_1_5_0 = { package = "zk_evm", version = "=0.150.4" } +zk_evm_1_3_3 = { package = "zk_evm", version = "0.133" } +zk_evm_1_4_0 = { package = "zk_evm", version = "0.140" } +zk_evm_1_4_1 = { package = "zk_evm", version = "0.141" } +zk_evm_1_5_0 = { package = "zk_evm", version = "=0.150.5" } # New VM; pinned to a specific commit because of instability vm2 = { git = "https://github.com/matter-labs/vm2.git", rev = "4ef15d46410ffc11744771a3a6c7c09dd9470c90" } # Consensus dependencies. -zksync_concurrency = "=0.1.0-rc.12" -zksync_consensus_bft = "=0.1.0-rc.12" -zksync_consensus_crypto = "=0.1.0-rc.12" -zksync_consensus_executor = "=0.1.0-rc.12" -zksync_consensus_network = "=0.1.0-rc.12" -zksync_consensus_roles = "=0.1.0-rc.12" -zksync_consensus_storage = "=0.1.0-rc.12" -zksync_consensus_utils = "=0.1.0-rc.12" -zksync_protobuf = "=0.1.0-rc.12" -zksync_protobuf_build = "=0.1.0-rc.12" +zksync_concurrency = "=0.1.1" +zksync_consensus_bft = "=0.1.1" +zksync_consensus_crypto = "=0.1.1" +zksync_consensus_executor = "=0.1.1" +zksync_consensus_network = "=0.1.1" +zksync_consensus_roles = "=0.1.1" +zksync_consensus_storage = "=0.1.1" +zksync_consensus_utils = "=0.1.1" +zksync_protobuf = "=0.1.1" +zksync_protobuf_build = "=0.1.1" # "Local" dependencies zksync_multivm = { version = "0.1.0", path = "core/lib/multivm" } diff --git a/deny.toml b/deny.toml index 83a8709a69c6..c2775fc057c8 100644 --- a/deny.toml +++ b/deny.toml @@ -8,8 +8,6 @@ feature-depth = 1 [advisories] ignore = [ - "RUSTSEC-2023-0045", # memoffset vulnerability, dependency coming from bellman_ce - "RUSTSEC-2022-0041", # crossbeam-utils vulnerability, dependency coming from bellman_ce "RUSTSEC-2024-0320", # yaml_rust dependency being unmaintained, dependency in core, we should consider moving to yaml_rust2 fork "RUSTSEC-2020-0168", # mach dependency being unmaintained, dependency in consensus, we should consider moving to mach2 fork "RUSTSEC-2024-0370", # `cs_derive` needs to be updated to not rely on `proc-macro-error` diff --git a/prover/Cargo.lock b/prover/Cargo.lock index cea147deccf8..17f27737aa21 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -45,7 +45,7 @@ version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "once_cell", "version_check", "zerocopy", @@ -355,7 +355,7 @@ checksum = "17c6a35df3749d2e8bb1b7b21a976d82b15548788d2735b9d82f329268f71a11" dependencies = [ "addr2line", "cc", - "cfg-if 1.0.0", + "cfg-if", "libc", "miniz_oxide", "object", @@ -407,54 +407,6 @@ dependencies = [ "serde", ] -[[package]] -name = "bellman_ce" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ea340d5c1394ee4daf4415dd80e06f74e0ad9b08e21f73f6bb1fa3a9dfae80d" -dependencies = [ - "arrayvec 0.7.4", - "bit-vec", - "blake2s_const 0.7.0", - "blake2s_simd", - "byteorder", - "cfg-if 1.0.0", - "crossbeam 0.7.3", - "futures 0.3.30", - "hex", - "lazy_static", - "num_cpus", - "pairing_ce", - "rand 0.4.6", - "serde", - "smallvec", - "tiny-keccak 1.5.0", -] - -[[package]] -name = "bellman_ce" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7aab6627603565b664e6c643a1dc7ea8bbff25b776f5fecd80ac88308fc7007b" -dependencies = [ - "arrayvec 0.7.4", - "bit-vec", - "blake2s_const 0.8.0", - "blake2s_simd", - "byteorder", - "cfg-if 1.0.0", - "crossbeam 0.7.3", - "futures 0.3.30", - "hex", - "lazy_static", - "num_cpus", - "pairing_ce", - "rand 0.4.6", - "serde", - "smallvec", - "tiny-keccak 1.5.0", -] - [[package]] name = "bigdecimal" version = "0.4.5" @@ -608,28 +560,6 @@ dependencies = [ "digest 0.10.7", ] -[[package]] -name = "blake2s_const" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f39d933cb38939f885001867874c65699c36f30f0c78aae9f4c9f01b3e4b306a" -dependencies = [ - "arrayref", - "arrayvec 0.5.2", - "constant_time_eq", -] - -[[package]] -name = "blake2s_const" -version = "0.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1db04f0f5f88d8c95977159949b23d2ed24d33309901cf7f7e48ed40f36de667" -dependencies = [ - "arrayref", - "arrayvec 0.5.2", - "constant_time_eq", -] - [[package]] name = "blake2s_simd" version = "0.5.11" @@ -680,18 +610,17 @@ dependencies = [ [[package]] name = "boojum" -version = "0.2.2" +version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df88daa33db46d683967ca09a4f04817c38950483f2501a771d497669a8a4bb1" +checksum = "68ec2f007ff8f90cc459f03e9f30ca1065440170f013c868823646e2e48d0234" dependencies = [ "arrayvec 0.7.4", "bincode", "blake2 0.10.6", "const_format", "convert_case", - "crossbeam 0.8.4", + "crossbeam", "crypto-bigint 0.5.5", - "cs_derive", "derivative", "ethereum-types", "firestorm", @@ -699,7 +628,6 @@ dependencies = [ "lazy_static", "num-modular", "num_cpus", - "pairing_ce", "rand 0.8.5", "rayon", "serde", @@ -708,13 +636,15 @@ dependencies = [ "smallvec", "tracing", "unroll", + "zksync_cs_derive", + "zksync_pairing", ] [[package]] name = "boojum-cuda" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "252c28bc729eb32a053de0cbd1c8c55b2f51d00ca0c656f30bc70d255c2d8753" +checksum = "ac7735446f2263e8d12435fc4d5a02c7727838eaffc7c518a961b3e839fb59e7" dependencies = [ "boojum", "cmake", @@ -831,12 +761,6 @@ dependencies = [ "nom", ] -[[package]] -name = "cfg-if" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" - [[package]] name = "cfg-if" version = "1.0.0" @@ -866,12 +790,12 @@ dependencies = [ [[package]] name = "circuit_definitions" -version = "0.150.4" +version = "0.150.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fffaa17c1585fbf010b9340bb1fd7f4c4eedec2c15cb74a72162fd2d16435d55" +checksum = "9b532214f063e5e0ee5c0fc1d3afd56dec541efa68b8985f14cc55cc324f4c48" dependencies = [ - "circuit_encodings 0.150.4", - "crossbeam 0.8.4", + "circuit_encodings 0.150.5", + "crossbeam", "derivative", "seq-macro", "serde", @@ -916,82 +840,82 @@ dependencies = [ [[package]] name = "circuit_encodings" -version = "0.150.4" +version = "0.150.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2593c02ad6b4b31ba63506c3f807f666133dd36bf47422f99b1d2947cf3c8dc1" +checksum = "e67617688c66640c84f9b98ff26d48f7898dca4faeb45241a4f21ec333788e7b" dependencies = [ "derivative", "serde", - "zk_evm 0.150.4", - "zkevm_circuits 0.150.4", + "zk_evm 0.150.5", + "zkevm_circuits 0.150.5", ] [[package]] name = "circuit_sequencer_api" -version = "0.133.0" +version = "0.133.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a87dc7bee6630d4954ac7982eb77e2007476662250cf18e5c460bbc5ee435f1" +checksum = "eb959b1f8c6bbd8be711994d182e85452a26a5d2213a709290b71c8262af1331" dependencies = [ - "bellman_ce 0.7.0", "derivative", "rayon", "serde", "zk_evm 0.133.0", + "zksync_bellman", ] [[package]] name = "circuit_sequencer_api" -version = "0.140.0" +version = "0.140.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b5138e6524c73e6d49fc1d0822b26e62a8d78b2c07e4e1c56061a447c10bec0" +checksum = "fa5f22311ce609d852d7d9f4943535ea4610aeb785129ae6ff83d5201c4fb387" dependencies = [ - "bellman_ce 0.7.0", "circuit_encodings 0.140.1", "derivative", "rayon", "serde", "zk_evm 0.140.0", + "zksync_bellman", ] [[package]] name = "circuit_sequencer_api" -version = "0.141.1" +version = "0.141.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55a257b31a8ea1c1723cab4fb5661c6b4c0ebe022d4b73bea9eb7c9150bd3bc1" +checksum = "4c47c71d6ba83a8beb0af13af70beffd627f5497caf3d44c6f96363e788b07ea" dependencies = [ - "bellman_ce 0.8.0", "circuit_encodings 0.141.1", "derivative", "rayon", "serde", "zk_evm 0.141.0", + "zksync_bellman", ] [[package]] name = "circuit_sequencer_api" -version = "0.142.0" +version = "0.142.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1d861a7a9b8df9389c63092985fc993c46954771da86462d7cab8cbf55a6497" +checksum = "e264723359e6a1aad98110bdccf1ae3ad596e93e7d31da9e40f6adc07e4add54" dependencies = [ - "bellman_ce 0.7.0", "circuit_encodings 0.142.1", "derivative", "rayon", "serde", "zk_evm 0.141.0", + "zksync_bellman", ] [[package]] name = "circuit_sequencer_api" -version = "0.150.4" +version = "0.150.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42d1a86b9c2207f3bb2dff5f00d1af1cb95004b6d07e9bacb6519fe08f12c04b" +checksum = "21017310971d4a051e4a52ad70eed11d1ae69defeca8314f73a3a4bad16705a9" dependencies = [ - "bellman_ce 0.7.0", - "circuit_encodings 0.150.4", + "circuit_encodings 0.150.5", "derivative", "rayon", "serde", + "zksync_bellman", ] [[package]] @@ -1116,7 +1040,7 @@ version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" dependencies = [ - "crossbeam-utils 0.8.20", + "crossbeam-utils", ] [[package]] @@ -1225,21 +1149,7 @@ version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" dependencies = [ - "cfg-if 1.0.0", -] - -[[package]] -name = "crossbeam" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69323bff1fb41c635347b8ead484a5ca6c3f11914d784170b158d8449ab07f8e" -dependencies = [ - "cfg-if 0.1.10", - "crossbeam-channel 0.4.4", - "crossbeam-deque 0.7.4", - "crossbeam-epoch 0.8.2", - "crossbeam-queue 0.2.3", - "crossbeam-utils 0.7.2", + "cfg-if", ] [[package]] @@ -1248,21 +1158,11 @@ version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1137cd7e7fc0fb5d3c5a8678be38ec56e819125d8d7907411fe24ccb943faca8" dependencies = [ - "crossbeam-channel 0.5.13", - "crossbeam-deque 0.8.5", - "crossbeam-epoch 0.9.18", - "crossbeam-queue 0.3.11", - "crossbeam-utils 0.8.20", -] - -[[package]] -name = "crossbeam-channel" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b153fe7cbef478c567df0f972e02e6d736db11affe43dfc9c56a9374d1adfb87" -dependencies = [ - "crossbeam-utils 0.7.2", - "maybe-uninit", + "crossbeam-channel", + "crossbeam-deque", + "crossbeam-epoch", + "crossbeam-queue", + "crossbeam-utils", ] [[package]] @@ -1271,18 +1171,7 @@ version = "0.5.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33480d6946193aa8033910124896ca395333cae7e2d1113d1fef6c3272217df2" dependencies = [ - "crossbeam-utils 0.8.20", -] - -[[package]] -name = "crossbeam-deque" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c20ff29ded3204c5106278a81a38f4b482636ed4fa1e6cfbeef193291beb29ed" -dependencies = [ - "crossbeam-epoch 0.8.2", - "crossbeam-utils 0.7.2", - "maybe-uninit", + "crossbeam-utils", ] [[package]] @@ -1291,23 +1180,8 @@ version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "613f8cc01fe9cf1a3eb3d7f488fd2fa8388403e97039e2f73692932e291a770d" dependencies = [ - "crossbeam-epoch 0.9.18", - "crossbeam-utils 0.8.20", -] - -[[package]] -name = "crossbeam-epoch" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace" -dependencies = [ - "autocfg", - "cfg-if 0.1.10", - "crossbeam-utils 0.7.2", - "lazy_static", - "maybe-uninit", - "memoffset", - "scopeguard", + "crossbeam-epoch", + "crossbeam-utils", ] [[package]] @@ -1316,18 +1190,7 @@ version = "0.9.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b82ac4a3c2ca9c3460964f020e1402edd5753411d7737aa39c3714ad1b5420e" dependencies = [ - "crossbeam-utils 0.8.20", -] - -[[package]] -name = "crossbeam-queue" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "774ba60a54c213d409d5353bda12d49cd68d14e45036a285234c8d6f91f92570" -dependencies = [ - "cfg-if 0.1.10", - "crossbeam-utils 0.7.2", - "maybe-uninit", + "crossbeam-utils", ] [[package]] @@ -1336,18 +1199,7 @@ version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df0346b5d5e76ac2fe4e327c5fd1118d6be7c51dfb18f9b7922923f287471e35" dependencies = [ - "crossbeam-utils 0.8.20", -] - -[[package]] -name = "crossbeam-utils" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" -dependencies = [ - "autocfg", - "cfg-if 0.1.10", - "lazy_static", + "crossbeam-utils", ] [[package]] @@ -1444,7 +1296,7 @@ version = "4.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "curve25519-dalek-derive", "digest 0.10.7", @@ -1764,7 +1616,7 @@ version = "0.8.34" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b45de904aa0b010bce2ab45264d0631681847fa7b6f2eaa7dab7619943bc4f59" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -1830,9 +1682,9 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "era_cudart" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "803be147b389086e33254a6c9fe26a0d1d21a11f9f73181cad06cf5b1beb7d16" +checksum = "f76aa50bd291b43ad56fb7da3e63c4c3cecb3c7e19db76c8097856371bc0d84a" dependencies = [ "bitflags 2.6.0", "era_cudart_sys", @@ -1841,9 +1693,9 @@ dependencies = [ [[package]] name = "era_cudart_sys" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49f9a3d87f3d45d11bc835e5fc78fe6e3fe243355d435f6b3e794b98df7d3323" +checksum = "e7d2db304df6b72141d45b140ec6df68ecd2300a7ab27de18b3e0e3af38c9776" dependencies = [ "serde_json", ] @@ -1872,7 +1724,7 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "136d1b5283a1ab77bd9257427ffd09d8667ced0570b6f938942bc7568ed5b943" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "home", "windows-sys 0.48.0", ] @@ -1965,27 +1817,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b538e4231443a5b9c507caee3356f016d832cf7393d2d90f03ea3180d4e3fbc" dependencies = [ "byteorder", - "ff_derive_ce", "hex", "rand 0.4.6", "serde", ] -[[package]] -name = "ff_derive_ce" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b96fbccd88dbb1fac4ee4a07c2fcc4ca719a74ffbd9d2b9d41d8c8eb073d8b20" -dependencies = [ - "num-bigint 0.4.5", - "num-integer", - "num-traits", - "proc-macro2 1.0.85", - "quote 1.0.36", - "serde", - "syn 1.0.109", -] - [[package]] name = "fiat-crypto" version = "0.2.9" @@ -2081,12 +1917,11 @@ dependencies = [ [[package]] name = "franklin-crypto" -version = "0.2.2" +version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05eab544ba915269919b5f158a061b540a4e3a04150c1346481f4f7b80eb6311" +checksum = "971289216ea5c91872e5e0bb6989214b537bbce375d09fabea5c3ccfe031b204" dependencies = [ "arr_macro", - "bellman_ce 0.8.0", "bit-vec", "blake2 0.9.2", "blake2-rfc_bellman_edition", @@ -2110,6 +1945,7 @@ dependencies = [ "smallvec", "splitmut", "tiny-keccak 1.5.0", + "zksync_bellman", ] [[package]] @@ -2276,7 +2112,7 @@ version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "js-sys", "libc", "wasi", @@ -3162,7 +2998,7 @@ version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72c1e0b51e7ec0a97369623508396067a486bd0cbed95a2659a4b863d28cfc8b" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "ecdsa 0.14.8", "elliptic-curve 0.12.3", "sha2 0.10.8", @@ -3174,7 +3010,7 @@ version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "956ff9b67e26e1a6a866cb758f12c6f8746208489e3e4a4b5580802f2f0a587b" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "ecdsa 0.16.9", "elliptic-curve 0.13.8", "once_cell", @@ -3218,7 +3054,7 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c2a198fb6b0eada2a8df47933734e6d35d350665a33a3593d7164fa52c75c19" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "windows-targets 0.52.5", ] @@ -3326,19 +3162,13 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" -[[package]] -name = "maybe-uninit" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" - [[package]] name = "md-5" version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d89e7ee0cfbedfc4da3340218492196241d89eefb6dab27de5df917a6d2e78cf" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "digest 0.10.7", ] @@ -3354,15 +3184,6 @@ version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d" -[[package]] -name = "memoffset" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "043175f069eda7b85febe4a74abbaeff828d9f8b448515d3151a14a3542811aa" -dependencies = [ - "autocfg", -] - [[package]] name = "miette" version = "5.10.0" @@ -3489,7 +3310,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab2156c4fce2f8df6c499cc1c763e4394b7482525bf2a9701c9d79d215f519e4" dependencies = [ "bitflags 2.6.0", - "cfg-if 1.0.0", + "cfg-if", "cfg_aliases", "libc", ] @@ -3738,7 +3559,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9529f4786b70a3e8c61e11179af17ab6188ad8d0ded78c5529441ed39d4bd9c1" dependencies = [ "bitflags 2.6.0", - "cfg-if 1.0.0", + "cfg-if", "foreign-types", "libc", "once_cell", @@ -3911,19 +3732,6 @@ dependencies = [ "sha2 0.10.8", ] -[[package]] -name = "pairing_ce" -version = "0.28.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "843b5b6fb63f00460f611dbc87a50bbbb745f0dfe5cbf67ca89299c79098640e" -dependencies = [ - "byteorder", - "cfg-if 1.0.0", - "ff_ce", - "rand 0.4.6", - "serde", -] - [[package]] name = "parity-scale-codec" version = "3.6.11" @@ -3972,7 +3780,7 @@ version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "redox_syscall 0.5.1", "smallvec", @@ -4596,8 +4404,8 @@ version = "1.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1465873a3dfdaa8ae7cb14b4383657caab0b3e8a0aa9ae8e04b044854c8dfce2" dependencies = [ - "crossbeam-deque 0.8.5", - "crossbeam-utils 0.8.20", + "crossbeam-deque", + "crossbeam-utils", ] [[package]] @@ -4784,9 +4592,9 @@ dependencies = [ [[package]] name = "rescue_poseidon" -version = "0.5.2" +version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f27fbc6ba44baf99a0ca8387b1fa1cf90d3d7062860c1afedbbb64454829acc5" +checksum = "82900c877a0ba5362ac5756efbd82c5b795dc509011c1253e2389d8708f1389d" dependencies = [ "addchain", "arrayvec 0.7.4", @@ -4835,7 +4643,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" dependencies = [ "cc", - "cfg-if 1.0.0", + "cfg-if", "getrandom", "libc", "spin", @@ -5406,7 +5214,7 @@ version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "digest 0.10.7", ] @@ -5418,7 +5226,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" dependencies = [ "block-buffer 0.9.0", - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "digest 0.9.0", "opaque-debug", @@ -5430,7 +5238,7 @@ version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "digest 0.10.7", ] @@ -5441,7 +5249,7 @@ version = "0.10.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eca2daa77078f4ddff27e75c4bf59e4c2697525f56dbb3c842d34a5d1f2b04a2" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "digest 0.10.7", ] @@ -5495,9 +5303,9 @@ checksum = "24188a676b6ae68c3b2cb3a01be17fbf7240ce009799bb56d5b1409051e78fde" [[package]] name = "shivini" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "331868b8d92ffec8887c17e786632cf0c9bd4750986fc1400a6d1fbf3739cba4" +checksum = "3f11e6942c89861aecb72261f8220800a1b69b8a5463c07c24df75b81fd809b0" dependencies = [ "bincode", "blake2 0.10.6", @@ -5587,9 +5395,9 @@ dependencies = [ [[package]] name = "snark_wrapper" -version = "0.1.2" +version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71aa5bffe5e7daca634bf2fedf0bf566273cb7eae01711d1aa6e5223d36d987d" +checksum = "0b5dfdc3eed51d79541adff827593743750fe6626a65006814f8cfa4273371de" dependencies = [ "derivative", "rand 0.4.6", @@ -5692,7 +5500,7 @@ dependencies = [ "bytes", "chrono", "crc", - "crossbeam-queue 0.3.11", + "crossbeam-queue", "either", "event-listener", "futures-channel", @@ -6058,7 +5866,7 @@ version = "3.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "fastrand", "rustix", "windows-sys 0.52.0", @@ -6136,7 +5944,7 @@ version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "once_cell", ] @@ -6744,8 +6552,8 @@ dependencies = [ "enum_dispatch", "eravm-stable-interface", "primitive-types", - "zk_evm_abstractions 0.150.4", - "zkevm_opcode_defs 0.150.4", + "zk_evm_abstractions 0.150.5", + "zkevm_opcode_defs 0.150.5", ] [[package]] @@ -6794,7 +6602,7 @@ version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "wasm-bindgen-macro", ] @@ -6819,7 +6627,7 @@ version = "0.4.42" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76bc14366121efc8dbb487ab05bcc9d346b3b5ec0eaa76e46594cabbe51762c0" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "js-sys", "wasm-bindgen", "web-sys", @@ -7112,7 +6920,7 @@ version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "windows-sys 0.48.0", ] @@ -7122,7 +6930,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a277a57398d4bfa075df44f501a17cfdf8542d224f0d36095a2adc7aee4ef0a5" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "windows-sys 0.48.0", ] @@ -7249,9 +7057,9 @@ dependencies = [ [[package]] name = "zk_evm" -version = "0.150.4" +version = "0.150.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2dbb0ed38d61fbd04bd7575755924d1303e129c04c909abba7f5bfcc6260bcf" +checksum = "5a6e69931f24db5cf333b714721e8d80ff88bfdb7da8c3dc7882612ffddb8d27" dependencies = [ "anyhow", "lazy_static", @@ -7259,7 +7067,7 @@ dependencies = [ "serde", "serde_json", "static_assertions", - "zk_evm_abstractions 0.150.4", + "zk_evm_abstractions 0.150.5", ] [[package]] @@ -7290,22 +7098,22 @@ dependencies = [ [[package]] name = "zk_evm_abstractions" -version = "0.150.4" +version = "0.150.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31460aacfe65b39ac484a2a2e0bbb02baf141f65264bf48e1e4f59ab375fe933" +checksum = "93d6b0720261ab55490fe3a96e96de30d5d7b277940b52ea7f52dbf564eb1748" dependencies = [ "anyhow", "num_enum 0.6.1", "serde", "static_assertions", - "zkevm_opcode_defs 0.150.4", + "zkevm_opcode_defs 0.150.5", ] [[package]] name = "zkevm-assembly" -version = "0.150.4" +version = "0.150.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b69d09d125b94767847c4cdc4ae399654b9e2a2f9304bd8935a7033bef4b07c" +checksum = "e99106038062537c05b4e6e7754d1bbba28ba16185a3e5ee5ad22e2f8be883bb" dependencies = [ "env_logger 0.9.3", "hex", @@ -7318,7 +7126,7 @@ dependencies = [ "smallvec", "structopt", "thiserror", - "zkevm_opcode_defs 0.150.4", + "zkevm_opcode_defs 0.150.5", ] [[package]] @@ -7367,13 +7175,12 @@ dependencies = [ [[package]] name = "zkevm_circuits" -version = "0.150.4" +version = "0.150.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abdfaa95dfe0878fda219dd17a6cc8c28711e2067785910c0e06d3ffdca78629" +checksum = "784fa7cfb51e17c5ced112bca43da30b3468b2347b7af0427ad9638759fb140e" dependencies = [ "arrayvec 0.7.4", "boojum", - "cs_derive", "derivative", "hex", "itertools 0.10.5", @@ -7382,7 +7189,8 @@ dependencies = [ "seq-macro", "serde", "smallvec", - "zkevm_opcode_defs 0.150.4", + "zkevm_opcode_defs 0.150.5", + "zksync_cs_derive", ] [[package]] @@ -7429,9 +7237,9 @@ dependencies = [ [[package]] name = "zkevm_opcode_defs" -version = "0.150.4" +version = "0.150.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb7c5c7b4481a646f8696b08cee64a8dec097509a6378d18242f81022f327f1e" +checksum = "79055eae1b6c1ab80793ed9d77d2964c9c896afa4b5dfed278cf58cd10acfe8f" dependencies = [ "bitflags 2.6.0", "blake2 0.10.6", @@ -7446,15 +7254,15 @@ dependencies = [ [[package]] name = "zkevm_test_harness" -version = "0.150.4" +version = "0.150.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9416dc5fcf7bc403d4c24d37f0e9a492a81926ff0e89a7792dc8a29de69aec1b" +checksum = "550f82d3b7448c35168dc13bfadbccd5fd306097b6e1ea01793151c1c9137a36" dependencies = [ "bincode", "circuit_definitions", - "circuit_sequencer_api 0.150.4", + "circuit_sequencer_api 0.150.5", "codegen", - "crossbeam 0.8.4", + "crossbeam", "derivative", "env_logger 0.9.3", "hex", @@ -7473,13 +7281,13 @@ dependencies = [ [[package]] name = "zksync-gpu-ffi" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae694dc0ad818e4d45af70b2cf579ff46f1ac938b42ee55543529beb45ba1464" +checksum = "aecd7f624185b785e9d8457986ac34685d478e2baa78417d51b102b7d0fa27fd" dependencies = [ "bindgen 0.59.2", "cmake", - "crossbeam 0.8.4", + "crossbeam", "derivative", "era_cudart_sys", "futures 0.3.30", @@ -7489,13 +7297,13 @@ dependencies = [ [[package]] name = "zksync-gpu-prover" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8156dbaf36764409cc93424d43dc86c993601d73f5aa9a5938e6552a14dc2df" +checksum = "a089b11fcdbd37065acaf427545cb50b87e6712951a10f3761b3d370e4b8f9bc" dependencies = [ "bit-vec", - "cfg-if 1.0.0", - "crossbeam 0.8.4", + "cfg-if", + "crossbeam", "franklin-crypto", "itertools 0.10.5", "num_cpus", @@ -7506,9 +7314,9 @@ dependencies = [ [[package]] name = "zksync-wrapper-prover" -version = "0.150.6" +version = "0.150.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83975189451bfacfa97dbcce899fde9db15a0c072196a9b92ddfabbe756bab9d" +checksum = "dc764c21d4ae15c5bc2c07c14c814c5e3ba8d194ddcca543b8cec95456031832" dependencies = [ "circuit_definitions", "zkevm_test_harness", @@ -7533,11 +7341,34 @@ dependencies = [ "url", ] +[[package]] +name = "zksync_bellman" +version = "0.30.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ffa03efe9bdb137a4b36b97d1a74237e18c9ae42b755163d903a9d48c1a5d80" +dependencies = [ + "arrayvec 0.7.4", + "bit-vec", + "blake2s_simd", + "byteorder", + "cfg-if", + "crossbeam", + "futures 0.3.30", + "hex", + "lazy_static", + "num_cpus", + "rand 0.4.6", + "serde", + "smallvec", + "tiny-keccak 1.5.0", + "zksync_pairing", +] + [[package]] name = "zksync_concurrency" -version = "0.1.0-rc.12" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a49ad68bfaf6fb8542c68894b68b28be31514786549855aaa8a46b36defbb100" +checksum = "c1c8cf6c689ab5922b52d81b775cd2d9cffbfc8fb8da65985e11b06546dfb3bf" dependencies = [ "anyhow", "once_cell", @@ -7571,9 +7402,9 @@ dependencies = [ [[package]] name = "zksync_consensus_crypto" -version = "0.1.0-rc.12" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ace39bdf50b8421c4d546381fe1ecc5212f953ce61cf93d4fa69172078dbe4af" +checksum = "cc7baced4e811015038322dad10239f2d631d9e339e8d6b7b6e6b146bee30f41" dependencies = [ "anyhow", "blst", @@ -7584,7 +7415,6 @@ dependencies = [ "k256 0.13.3", "num-bigint 0.4.5", "num-traits", - "pairing_ce", "rand 0.4.6", "rand 0.8.5", "sha3 0.10.8", @@ -7595,9 +7425,9 @@ dependencies = [ [[package]] name = "zksync_consensus_roles" -version = "0.1.0-rc.12" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06277266e31efdc1465f6a27ce96c7435392a270978a91956b8a848732df2cfa" +checksum = "0aab4ddf62f6001903c5fe9f65afb1bdc42464928c9d1c6ce52e4d7e9944f5dc" dependencies = [ "anyhow", "bit-vec", @@ -7617,9 +7447,9 @@ dependencies = [ [[package]] name = "zksync_consensus_storage" -version = "0.1.0-rc.12" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9099b2295f550176d824b5287f2f31b7739c4d10247faec1132f1c6e9d18059c" +checksum = "7b9dbcb923fa201af03f49f70c11a923b416915d2ddf8b2de3a2e861f22898a4" dependencies = [ "anyhow", "async-trait", @@ -7637,9 +7467,9 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" -version = "0.1.0-rc.12" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d624f55e2449f43b2c85588b5dd2a28b3c5ea629effc89df76e3254f8d9d2fb" +checksum = "29e69dffc0fbc7c096548c997f5ca157a490b34b3d49fd524fa3d51840f7fb22" dependencies = [ "anyhow", "rand 0.8.5", @@ -7691,6 +7521,18 @@ dependencies = [ "zksync_utils", ] +[[package]] +name = "zksync_cs_derive" +version = "0.30.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5939e2df4288c263c706ff18ac718e984149223ad4289d6d957d767dcfc04c81" +dependencies = [ + "proc-macro-error", + "proc-macro2 1.0.85", + "quote 1.0.36", + "syn 1.0.109", +] + [[package]] name = "zksync_dal" version = "0.1.0" @@ -7777,11 +7619,39 @@ dependencies = [ "zksync_types", ] +[[package]] +name = "zksync_ff" +version = "0.30.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9524b06780b5e164e84b38840c7c428c739f051f35af6efc4d1285f629ceb88e" +dependencies = [ + "byteorder", + "hex", + "rand 0.4.6", + "serde", + "zksync_ff_derive", +] + +[[package]] +name = "zksync_ff_derive" +version = "0.30.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f91e58e75d65877f09f83bc3dca8f054847ae7ec4f3e64bfa610a557edd8e8e" +dependencies = [ + "num-bigint 0.4.5", + "num-integer", + "num-traits", + "proc-macro2 1.0.85", + "quote 1.0.36", + "serde", + "syn 1.0.109", +] + [[package]] name = "zksync_kzg" -version = "0.150.4" +version = "0.150.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9949f48ea1a9f9a0e73242d4d1e87e681095181827486b3fcc2cf93e5aa03280" +checksum = "edb8a9c76c172a6d639855ee342b9a670e3ba472f5ae302f771b1c3ee777dc88" dependencies = [ "boojum", "derivative", @@ -7791,7 +7661,7 @@ dependencies = [ "serde", "serde_json", "serde_with", - "zkevm_circuits 0.150.4", + "zkevm_circuits 0.150.5", ] [[package]] @@ -7808,11 +7678,11 @@ name = "zksync_multivm" version = "0.1.0" dependencies = [ "anyhow", - "circuit_sequencer_api 0.133.0", - "circuit_sequencer_api 0.140.0", - "circuit_sequencer_api 0.141.1", - "circuit_sequencer_api 0.142.0", - "circuit_sequencer_api 0.150.4", + "circuit_sequencer_api 0.133.1", + "circuit_sequencer_api 0.140.3", + "circuit_sequencer_api 0.141.2", + "circuit_sequencer_api 0.142.2", + "circuit_sequencer_api 0.150.5", "hex", "itertools 0.10.5", "once_cell", @@ -7825,7 +7695,7 @@ dependencies = [ "zk_evm 0.133.0", "zk_evm 0.140.0", "zk_evm 0.141.0", - "zk_evm 0.150.4", + "zk_evm 0.150.5", "zksync_contracts", "zksync_system_constants", "zksync_types", @@ -7856,6 +7726,19 @@ dependencies = [ "zksync_types", ] +[[package]] +name = "zksync_pairing" +version = "0.30.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8412ae5574472fa567a097e183f9a01974b99dd0b5da3bfa1bbe6c57c579aa2" +dependencies = [ + "byteorder", + "cfg-if", + "rand 0.4.6", + "serde", + "zksync_ff", +] + [[package]] name = "zksync_proof_fri_compressor" version = "0.1.0" @@ -7863,7 +7746,7 @@ dependencies = [ "anyhow", "async-trait", "bincode", - "circuit_sequencer_api 0.150.4", + "circuit_sequencer_api 0.150.5", "clap 4.5.4", "ctrlc", "futures 0.3.30", @@ -7892,9 +7775,9 @@ dependencies = [ [[package]] name = "zksync_protobuf" -version = "0.1.0-rc.12" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d26fb2beb3aeafb5e9babf1acf6494662cc7157b893fa248bd151494f931d07f" +checksum = "df5467dfe2f845ca1fd6ceec623bbd32187589793d3c4023dcd2f5172369d198" dependencies = [ "anyhow", "bit-vec", @@ -7913,9 +7796,9 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" -version = "0.1.0-rc.12" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58e86c198e056d921b4f3f1d2755c23d090e942b5a70b03bcb7e7c02445aa491" +checksum = "33d35280660b11be2a4ebdf531184eb729acebfdc3368d27176ec104f8bf9c5f" dependencies = [ "anyhow", "heck 0.5.0", @@ -8050,7 +7933,7 @@ name = "zksync_prover_interface" version = "0.1.0" dependencies = [ "chrono", - "circuit_sequencer_api 0.150.4", + "circuit_sequencer_api 0.150.5", "serde", "serde_with", "strum", diff --git a/prover/Cargo.toml b/prover/Cargo.toml index 251b3b0fb082..624661adc8dc 100644 --- a/prover/Cargo.toml +++ b/prover/Cargo.toml @@ -56,13 +56,13 @@ tracing-subscriber = "0.3" vise = "0.2.0" # Proving dependencies -circuit_definitions = "=0.150.4" -circuit_sequencer_api = "=0.150.4" -zkevm_test_harness = "=0.150.4" +circuit_definitions = "=0.150.5" +circuit_sequencer_api = "=0.150.5" +zkevm_test_harness = "=0.150.5" # GPU proving dependencies -wrapper_prover = { package = "zksync-wrapper-prover", version = "=0.150.6" } -shivini = "=0.150.6" +wrapper_prover = { package = "zksync-wrapper-prover", version = "=0.150.7" } +shivini = "=0.150.7" # Core workspace dependencies zksync_multivm = { path = "../core/lib/multivm", version = "0.1.0" } diff --git a/zk_toolbox/Cargo.lock b/zk_toolbox/Cargo.lock index eb16477382c2..291c24dbf846 100644 --- a/zk_toolbox/Cargo.lock +++ b/zk_toolbox/Cargo.lock @@ -1691,27 +1691,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b538e4231443a5b9c507caee3356f016d832cf7393d2d90f03ea3180d4e3fbc" dependencies = [ "byteorder", - "ff_derive_ce", "hex", "rand 0.4.6", "serde", ] -[[package]] -name = "ff_derive_ce" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b96fbccd88dbb1fac4ee4a07c2fcc4ca719a74ffbd9d2b9d41d8c8eb073d8b20" -dependencies = [ - "num-bigint", - "num-integer", - "num-traits", - "proc-macro2", - "quote", - "serde", - "syn 1.0.109", -] - [[package]] name = "fiat-crypto" version = "0.2.9" @@ -3262,19 +3246,6 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" -[[package]] -name = "pairing_ce" -version = "0.28.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "843b5b6fb63f00460f611dbc87a50bbbb745f0dfe5cbf67ca89299c79098640e" -dependencies = [ - "byteorder", - "cfg-if", - "ff_ce", - "rand 0.4.6", - "serde", -] - [[package]] name = "parity-scale-codec" version = "3.6.12" @@ -6515,9 +6486,9 @@ dependencies = [ [[package]] name = "zksync_concurrency" -version = "0.1.0-rc.12" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a49ad68bfaf6fb8542c68894b68b28be31514786549855aaa8a46b36defbb100" +checksum = "c1c8cf6c689ab5922b52d81b775cd2d9cffbfc8fb8da65985e11b06546dfb3bf" dependencies = [ "anyhow", "once_cell", @@ -6549,9 +6520,9 @@ dependencies = [ [[package]] name = "zksync_consensus_crypto" -version = "0.1.0-rc.12" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ace39bdf50b8421c4d546381fe1ecc5212f953ce61cf93d4fa69172078dbe4af" +checksum = "cc7baced4e811015038322dad10239f2d631d9e339e8d6b7b6e6b146bee30f41" dependencies = [ "anyhow", "blst", @@ -6562,7 +6533,6 @@ dependencies = [ "k256 0.13.3", "num-bigint", "num-traits", - "pairing_ce", "rand 0.4.6", "rand 0.8.5", "sha3", @@ -6573,9 +6543,9 @@ dependencies = [ [[package]] name = "zksync_consensus_roles" -version = "0.1.0-rc.12" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06277266e31efdc1465f6a27ce96c7435392a270978a91956b8a848732df2cfa" +checksum = "0aab4ddf62f6001903c5fe9f65afb1bdc42464928c9d1c6ce52e4d7e9944f5dc" dependencies = [ "anyhow", "bit-vec", @@ -6595,9 +6565,9 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" -version = "0.1.0-rc.12" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d624f55e2449f43b2c85588b5dd2a28b3c5ea629effc89df76e3254f8d9d2fb" +checksum = "29e69dffc0fbc7c096548c997f5ca157a490b34b3d49fd524fa3d51840f7fb22" dependencies = [ "anyhow", "rand 0.8.5", @@ -6646,9 +6616,9 @@ dependencies = [ [[package]] name = "zksync_protobuf" -version = "0.1.0-rc.12" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d26fb2beb3aeafb5e9babf1acf6494662cc7157b893fa248bd151494f931d07f" +checksum = "df5467dfe2f845ca1fd6ceec623bbd32187589793d3c4023dcd2f5172369d198" dependencies = [ "anyhow", "bit-vec", @@ -6667,9 +6637,9 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" -version = "0.1.0-rc.12" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58e86c198e056d921b4f3f1d2755c23d090e942b5a70b03bcb7e7c02445aa491" +checksum = "33d35280660b11be2a4ebdf531184eb729acebfdc3368d27176ec104f8bf9c5f" dependencies = [ "anyhow", "heck", diff --git a/zk_toolbox/Cargo.toml b/zk_toolbox/Cargo.toml index 126c44f0eaeb..33309872ea3b 100644 --- a/zk_toolbox/Cargo.toml +++ b/zk_toolbox/Cargo.toml @@ -30,9 +30,9 @@ types = { path = "crates/types" } zksync_config = { path = "../core/lib/config" } zksync_protobuf_config = { path = "../core/lib/protobuf_config" } zksync_basic_types = { path = "../core/lib/basic_types" } -zksync_consensus_roles = "=0.1.0-rc.12" -zksync_consensus_crypto = "=0.1.0-rc.12" -zksync_protobuf = "=0.1.0-rc.12" +zksync_consensus_roles = "=0.1.1" +zksync_consensus_crypto = "=0.1.1" +zksync_protobuf = "=0.1.1" # External dependencies anyhow = "1.0.82" From 934634b149377c730ec39e904508c40628ff4019 Mon Sep 17 00:00:00 2001 From: Artem Fomiuk <88630083+Artemka374@users.noreply.github.com> Date: Thu, 12 Sep 2024 16:30:54 +0300 Subject: [PATCH 065/116] feat(prover): Refactor WitnessGenerator (#2845) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Introduce new structure for witness generators. Introduce `ArtifactsManager` trait responsible for operations with object store and artifacts. ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- .../proof_fri_compressor/src/compressor.rs | 8 +- .../bin/witness_generator/src/artifacts.rs | 50 +++ .../src/basic_circuits/artifacts.rs | 108 ++++++ .../src/basic_circuits/job_processor.rs | 153 +++++++++ .../mod.rs} | 312 +++--------------- .../src/leaf_aggregation/artifacts.rs | 150 +++++++++ .../src/leaf_aggregation/job_processor.rs | 124 +++++++ .../mod.rs} | 265 +-------------- .../crates/bin/witness_generator/src/lib.rs | 1 + .../src/node_aggregation/artifacts.rs | 146 ++++++++ .../src/node_aggregation/job_processor.rs | 115 +++++++ .../mod.rs} | 247 +------------- .../src/recursion_tip/artifacts.rs | 141 ++++++++ .../src/recursion_tip/job_processor.rs | 130 ++++++++ .../mod.rs} | 162 +-------- .../src/scheduler/artifacts.rs | 94 ++++++ .../src/scheduler/job_processor.rs | 129 ++++++++ .../src/{scheduler.rs => scheduler/mod.rs} | 157 +-------- .../crates/bin/witness_generator/src/utils.rs | 62 +--- 19 files changed, 1440 insertions(+), 1114 deletions(-) create mode 100644 prover/crates/bin/witness_generator/src/artifacts.rs create mode 100644 prover/crates/bin/witness_generator/src/basic_circuits/artifacts.rs create mode 100644 prover/crates/bin/witness_generator/src/basic_circuits/job_processor.rs rename prover/crates/bin/witness_generator/src/{basic_circuits.rs => basic_circuits/mod.rs} (63%) create mode 100644 prover/crates/bin/witness_generator/src/leaf_aggregation/artifacts.rs create mode 100644 prover/crates/bin/witness_generator/src/leaf_aggregation/job_processor.rs rename prover/crates/bin/witness_generator/src/{leaf_aggregation.rs => leaf_aggregation/mod.rs} (52%) create mode 100644 prover/crates/bin/witness_generator/src/node_aggregation/artifacts.rs create mode 100644 prover/crates/bin/witness_generator/src/node_aggregation/job_processor.rs rename prover/crates/bin/witness_generator/src/{node_aggregation.rs => node_aggregation/mod.rs} (52%) create mode 100644 prover/crates/bin/witness_generator/src/recursion_tip/artifacts.rs create mode 100644 prover/crates/bin/witness_generator/src/recursion_tip/job_processor.rs rename prover/crates/bin/witness_generator/src/{recursion_tip.rs => recursion_tip/mod.rs} (58%) create mode 100644 prover/crates/bin/witness_generator/src/scheduler/artifacts.rs create mode 100644 prover/crates/bin/witness_generator/src/scheduler/job_processor.rs rename prover/crates/bin/witness_generator/src/{scheduler.rs => scheduler/mod.rs} (54%) diff --git a/prover/crates/bin/proof_fri_compressor/src/compressor.rs b/prover/crates/bin/proof_fri_compressor/src/compressor.rs index 077347bce9be..e462097e38d0 100644 --- a/prover/crates/bin/proof_fri_compressor/src/compressor.rs +++ b/prover/crates/bin/proof_fri_compressor/src/compressor.rs @@ -59,7 +59,6 @@ impl ProofCompressor { #[tracing::instrument(skip(proof, _compression_mode))] pub fn compress_proof( - l1_batch: L1BatchNumber, proof: ZkSyncRecursionLayerProof, _compression_mode: u8, keystore: Keystore, @@ -171,16 +170,13 @@ impl JobProcessor for ProofCompressor { async fn process_job( &self, - job_id: &L1BatchNumber, + _job_id: &L1BatchNumber, job: ZkSyncRecursionLayerProof, _started_at: Instant, ) -> JoinHandle> { let compression_mode = self.compression_mode; - let block_number = *job_id; let keystore = self.keystore.clone(); - tokio::task::spawn_blocking(move || { - Self::compress_proof(block_number, job, compression_mode, keystore) - }) + tokio::task::spawn_blocking(move || Self::compress_proof(job, compression_mode, keystore)) } async fn save_result( diff --git a/prover/crates/bin/witness_generator/src/artifacts.rs b/prover/crates/bin/witness_generator/src/artifacts.rs new file mode 100644 index 000000000000..f509d3b2f64a --- /dev/null +++ b/prover/crates/bin/witness_generator/src/artifacts.rs @@ -0,0 +1,50 @@ +use std::time::Instant; + +use async_trait::async_trait; +use zksync_object_store::ObjectStore; +use zksync_prover_dal::{ConnectionPool, Prover}; + +#[derive(Debug)] +pub(crate) struct AggregationBlobUrls { + pub aggregations_urls: String, + pub circuit_ids_and_urls: Vec<(u8, String)>, +} + +#[derive(Debug)] +pub(crate) struct SchedulerBlobUrls { + pub circuit_ids_and_urls: Vec<(u8, String)>, + pub closed_form_inputs_and_urls: Vec<(u8, String, usize)>, + pub scheduler_witness_url: String, +} + +pub(crate) enum BlobUrls { + Url(String), + Aggregation(AggregationBlobUrls), + Scheduler(SchedulerBlobUrls), +} + +#[async_trait] +pub(crate) trait ArtifactsManager { + type InputMetadata; + type InputArtifacts; + type OutputArtifacts; + + async fn get_artifacts( + metadata: &Self::InputMetadata, + object_store: &dyn ObjectStore, + ) -> anyhow::Result; + + async fn save_artifacts( + job_id: u32, + artifacts: Self::OutputArtifacts, + object_store: &dyn ObjectStore, + ) -> BlobUrls; + + async fn update_database( + connection_pool: &ConnectionPool, + job_id: u32, + started_at: Instant, + blob_urls: BlobUrls, + artifacts: Self::OutputArtifacts, + ) -> anyhow::Result<()>; +} diff --git a/prover/crates/bin/witness_generator/src/basic_circuits/artifacts.rs b/prover/crates/bin/witness_generator/src/basic_circuits/artifacts.rs new file mode 100644 index 000000000000..3447659f8296 --- /dev/null +++ b/prover/crates/bin/witness_generator/src/basic_circuits/artifacts.rs @@ -0,0 +1,108 @@ +use std::time::Instant; + +use async_trait::async_trait; +use zksync_object_store::ObjectStore; +use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; +use zksync_prover_fri_types::AuxOutputWitnessWrapper; +use zksync_prover_fri_utils::get_recursive_layer_circuit_id_for_base_layer; +use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; + +use crate::{ + artifacts::{ArtifactsManager, BlobUrls}, + basic_circuits::{BasicCircuitArtifacts, BasicWitnessGenerator, BasicWitnessGeneratorJob}, + utils::SchedulerPartialInputWrapper, +}; + +#[async_trait] +impl ArtifactsManager for BasicWitnessGenerator { + type InputMetadata = L1BatchNumber; + type InputArtifacts = BasicWitnessGeneratorJob; + type OutputArtifacts = BasicCircuitArtifacts; + + async fn get_artifacts( + metadata: &Self::InputMetadata, + object_store: &dyn ObjectStore, + ) -> anyhow::Result { + let l1_batch_number = *metadata; + let data = object_store.get(l1_batch_number).await.unwrap(); + Ok(BasicWitnessGeneratorJob { + block_number: l1_batch_number, + data, + }) + } + + async fn save_artifacts( + job_id: u32, + artifacts: Self::OutputArtifacts, + object_store: &dyn ObjectStore, + ) -> BlobUrls { + let aux_output_witness_wrapper = AuxOutputWitnessWrapper(artifacts.aux_output_witness); + object_store + .put(L1BatchNumber(job_id), &aux_output_witness_wrapper) + .await + .unwrap(); + let wrapper = SchedulerPartialInputWrapper(artifacts.scheduler_witness); + let url = object_store + .put(L1BatchNumber(job_id), &wrapper) + .await + .unwrap(); + + BlobUrls::Url(url) + } + + #[tracing::instrument(skip_all, fields(l1_batch = %job_id))] + async fn update_database( + connection_pool: &ConnectionPool, + job_id: u32, + started_at: Instant, + blob_urls: BlobUrls, + _artifacts: Self::OutputArtifacts, + ) -> anyhow::Result<()> { + let blob_urls = match blob_urls { + BlobUrls::Scheduler(blobs) => blobs, + _ => unreachable!(), + }; + + let mut connection = connection_pool + .connection() + .await + .expect("failed to get database connection"); + let mut transaction = connection + .start_transaction() + .await + .expect("failed to get database transaction"); + let protocol_version_id = transaction + .fri_witness_generator_dal() + .protocol_version_for_l1_batch(L1BatchNumber(job_id)) + .await; + transaction + .fri_prover_jobs_dal() + .insert_prover_jobs( + L1BatchNumber(job_id), + blob_urls.circuit_ids_and_urls, + AggregationRound::BasicCircuits, + 0, + protocol_version_id, + ) + .await; + transaction + .fri_witness_generator_dal() + .create_aggregation_jobs( + L1BatchNumber(job_id), + &blob_urls.closed_form_inputs_and_urls, + &blob_urls.scheduler_witness_url, + get_recursive_layer_circuit_id_for_base_layer, + protocol_version_id, + ) + .await; + transaction + .fri_witness_generator_dal() + .mark_witness_job_as_successful(L1BatchNumber(job_id), started_at.elapsed()) + .await; + transaction + .commit() + .await + .expect("failed to commit database transaction"); + Ok(()) + } +} diff --git a/prover/crates/bin/witness_generator/src/basic_circuits/job_processor.rs b/prover/crates/bin/witness_generator/src/basic_circuits/job_processor.rs new file mode 100644 index 000000000000..08732689e3a6 --- /dev/null +++ b/prover/crates/bin/witness_generator/src/basic_circuits/job_processor.rs @@ -0,0 +1,153 @@ +use std::{sync::Arc, time::Instant}; + +use anyhow::Context as _; +use tracing::Instrument; +use zksync_prover_dal::ProverDal; +use zksync_prover_fri_types::{get_current_pod_name, AuxOutputWitnessWrapper}; +use zksync_queued_job_processor::{async_trait, JobProcessor}; +use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; + +use crate::{ + artifacts::{ArtifactsManager, BlobUrls, SchedulerBlobUrls}, + basic_circuits::{BasicCircuitArtifacts, BasicWitnessGenerator, BasicWitnessGeneratorJob}, + metrics::WITNESS_GENERATOR_METRICS, +}; + +#[async_trait] +impl JobProcessor for BasicWitnessGenerator { + type Job = BasicWitnessGeneratorJob; + type JobId = L1BatchNumber; + // The artifact is optional to support skipping blocks when sampling is enabled. + type JobArtifacts = Option; + + const SERVICE_NAME: &'static str = "fri_basic_circuit_witness_generator"; + + async fn get_next_job(&self) -> anyhow::Result> { + let mut prover_connection = self.prover_connection_pool.connection().await?; + let last_l1_batch_to_process = self.config.last_l1_batch_to_process(); + let pod_name = get_current_pod_name(); + match prover_connection + .fri_witness_generator_dal() + .get_next_basic_circuit_witness_job( + last_l1_batch_to_process, + self.protocol_version, + &pod_name, + ) + .await + { + Some(block_number) => { + tracing::info!( + "Processing FRI basic witness-gen for block {}", + block_number + ); + let started_at = Instant::now(); + let job = Self::get_artifacts(&block_number, &*self.object_store).await?; + + WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::BasicCircuits.into()] + .observe(started_at.elapsed()); + + Ok(Some((block_number, job))) + } + None => Ok(None), + } + } + + async fn save_failure(&self, job_id: L1BatchNumber, _started_at: Instant, error: String) -> () { + self.prover_connection_pool + .connection() + .await + .unwrap() + .fri_witness_generator_dal() + .mark_witness_job_failed(&error, job_id) + .await; + } + + #[allow(clippy::async_yields_async)] + async fn process_job( + &self, + _job_id: &Self::JobId, + job: BasicWitnessGeneratorJob, + started_at: Instant, + ) -> tokio::task::JoinHandle>> { + let object_store = Arc::clone(&self.object_store); + let max_circuits_in_flight = self.config.max_circuits_in_flight; + tokio::spawn(async move { + let block_number = job.block_number; + Ok( + Self::process_job_impl(object_store, job, started_at, max_circuits_in_flight) + .instrument(tracing::info_span!("basic_circuit", %block_number)) + .await, + ) + }) + } + + #[tracing::instrument(skip_all, fields(l1_batch = %job_id))] + async fn save_result( + &self, + job_id: L1BatchNumber, + started_at: Instant, + optional_artifacts: Option, + ) -> anyhow::Result<()> { + match optional_artifacts { + None => Ok(()), + Some(artifacts) => { + let blob_started_at = Instant::now(); + let circuit_urls = artifacts.circuit_urls.clone(); + let queue_urls = artifacts.queue_urls.clone(); + + let aux_output_witness_wrapper = + AuxOutputWitnessWrapper(artifacts.aux_output_witness.clone()); + if self.config.shall_save_to_public_bucket { + self.public_blob_store.as_deref() + .expect("public_object_store shall not be empty while running with shall_save_to_public_bucket config") + .put(job_id, &aux_output_witness_wrapper) + .await + .unwrap(); + } + + let scheduler_witness_url = + match Self::save_artifacts(job_id.0, artifacts.clone(), &*self.object_store) + .await + { + BlobUrls::Url(url) => url, + _ => unreachable!(), + }; + + WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::BasicCircuits.into()] + .observe(blob_started_at.elapsed()); + + Self::update_database( + &self.prover_connection_pool, + job_id.0, + started_at, + BlobUrls::Scheduler(SchedulerBlobUrls { + circuit_ids_and_urls: circuit_urls, + closed_form_inputs_and_urls: queue_urls, + scheduler_witness_url, + }), + artifacts, + ) + .await?; + Ok(()) + } + } + } + + fn max_attempts(&self) -> u32 { + self.config.max_attempts + } + + async fn get_job_attempts(&self, job_id: &L1BatchNumber) -> anyhow::Result { + let mut prover_storage = self + .prover_connection_pool + .connection() + .await + .context("failed to acquire DB connection for BasicWitnessGenerator")?; + prover_storage + .fri_witness_generator_dal() + .get_basic_circuit_witness_job_attempts(*job_id) + .await + .map(|attempts| attempts.unwrap_or(0)) + .context("failed to get job attempts for BasicWitnessGenerator") + } +} diff --git a/prover/crates/bin/witness_generator/src/basic_circuits.rs b/prover/crates/bin/witness_generator/src/basic_circuits/mod.rs similarity index 63% rename from prover/crates/bin/witness_generator/src/basic_circuits.rs rename to prover/crates/bin/witness_generator/src/basic_circuits/mod.rs index 00a4d99ba9a9..c9755c333dad 100644 --- a/prover/crates/bin/witness_generator/src/basic_circuits.rs +++ b/prover/crates/bin/witness_generator/src/basic_circuits/mod.rs @@ -1,49 +1,43 @@ use std::{ - collections::{hash_map::DefaultHasher, HashSet}, - hash::{Hash, Hasher}, + collections::HashSet, + hash::{DefaultHasher, Hash, Hasher}, sync::Arc, time::Instant, }; -use anyhow::Context as _; -use async_trait::async_trait; use circuit_definitions::{ circuit_definitions::base_layer::{ZkSyncBaseLayerCircuit, ZkSyncBaseLayerStorage}, encodings::recursion_request::RecursionQueueSimulator, - zkevm_circuits::fsm_input_output::ClosedFormInputCompactFormWitness, + zkevm_circuits::{ + fsm_input_output::ClosedFormInputCompactFormWitness, + scheduler::{ + block_header::BlockAuxilaryOutputWitness, input::SchedulerCircuitInstanceWitness, + }, + }, }; use tokio::sync::Semaphore; use tracing::Instrument; -use zkevm_test_harness::{ - geometry_config::get_geometry_config, witness::oracle::WitnessGenerationArtifact, -}; +use zkevm_test_harness::witness::oracle::WitnessGenerationArtifact; use zksync_config::configs::FriWitnessGeneratorConfig; use zksync_multivm::{ - interface::storage::StorageView, - vm_latest::{constants::MAX_CYCLES_FOR_TX, HistoryDisabled, StorageOracle as VmStorageOracle}, -}; -use zksync_object_store::ObjectStore; -use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; -use zksync_prover_fri_types::{ - circuit_definitions::{ + circuit_sequencer_api_latest::{ boojum::{ field::goldilocks::{GoldilocksExt2, GoldilocksField}, gadgets::recursion::recursive_tree_hasher::CircuitGoldilocksPoseidon2Sponge, }, - zkevm_circuits::scheduler::{ - block_header::BlockAuxilaryOutputWitness, input::SchedulerCircuitInstanceWitness, - }, + geometry_config::get_geometry_config, }, - get_current_pod_name, - keys::ClosedFormInputKey, - AuxOutputWitnessWrapper, CircuitAuxData, + interface::storage::StorageView, + vm_latest::{constants::MAX_CYCLES_FOR_TX, HistoryDisabled, StorageOracle as VmStorageOracle}, + zk_evm_latest::ethereum_types::Address, }; -use zksync_prover_fri_utils::get_recursive_layer_circuit_id_for_base_layer; +use zksync_object_store::ObjectStore; +use zksync_prover_dal::{ConnectionPool, Prover}; +use zksync_prover_fri_types::{keys::ClosedFormInputKey, CircuitAuxData}; use zksync_prover_interface::inputs::WitnessInputData; -use zksync_queued_job_processor::JobProcessor; +use zksync_system_constants::BOOTLOADER_ADDRESS; use zksync_types::{ - basic_fri_types::AggregationRound, protocol_version::ProtocolSemanticVersion, Address, - L1BatchNumber, BOOTLOADER_ADDRESS, + basic_fri_types::AggregationRound, protocol_version::ProtocolSemanticVersion, L1BatchNumber, }; use crate::{ @@ -52,33 +46,30 @@ use crate::{ storage_oracle::StorageOracle, utils::{ expand_bootloader_contents, save_circuit, save_ram_premutation_queue_witness, - ClosedFormInputWrapper, SchedulerPartialInputWrapper, KZG_TRUSTED_SETUP_FILE, + ClosedFormInputWrapper, KZG_TRUSTED_SETUP_FILE, }, witness::WitnessStorage, }; +mod artifacts; +pub mod job_processor; + +#[derive(Clone)] pub struct BasicCircuitArtifacts { - circuit_urls: Vec<(u8, String)>, - queue_urls: Vec<(u8, String, usize)>, - scheduler_witness: SchedulerCircuitInstanceWitness< + pub(super) circuit_urls: Vec<(u8, String)>, + pub(super) queue_urls: Vec<(u8, String, usize)>, + pub(super) scheduler_witness: SchedulerCircuitInstanceWitness< GoldilocksField, CircuitGoldilocksPoseidon2Sponge, GoldilocksExt2, >, - aux_output_witness: BlockAuxilaryOutputWitness, -} - -#[derive(Debug)] -struct BlobUrls { - circuit_ids_and_urls: Vec<(u8, String)>, - closed_form_inputs_and_urls: Vec<(u8, String, usize)>, - scheduler_witness_url: String, + pub(super) aux_output_witness: BlockAuxilaryOutputWitness, } #[derive(Clone)] pub struct BasicWitnessGeneratorJob { - block_number: L1BatchNumber, - job: WitnessInputData, + pub(super) block_number: L1BatchNumber, + pub(super) data: WitnessInputData, } #[derive(Debug)] @@ -90,6 +81,17 @@ pub struct BasicWitnessGenerator { protocol_version: ProtocolSemanticVersion, } +type Witness = ( + Vec<(u8, String)>, + Vec<(u8, String, usize)>, + SchedulerCircuitInstanceWitness< + GoldilocksField, + CircuitGoldilocksPoseidon2Sponge, + GoldilocksExt2, + >, + BlockAuxilaryOutputWitness, +); + impl BasicWitnessGenerator { pub fn new( config: FriWitnessGeneratorConfig, @@ -113,7 +115,10 @@ impl BasicWitnessGenerator { started_at: Instant, max_circuits_in_flight: usize, ) -> Option { - let BasicWitnessGeneratorJob { block_number, job } = basic_job; + let BasicWitnessGeneratorJob { + block_number, + data: job, + } = basic_job; tracing::info!( "Starting witness generation of type {:?} for block {}", @@ -134,135 +139,8 @@ impl BasicWitnessGenerator { } } -#[async_trait] -impl JobProcessor for BasicWitnessGenerator { - type Job = BasicWitnessGeneratorJob; - type JobId = L1BatchNumber; - // The artifact is optional to support skipping blocks when sampling is enabled. - type JobArtifacts = Option; - - const SERVICE_NAME: &'static str = "fri_basic_circuit_witness_generator"; - - async fn get_next_job(&self) -> anyhow::Result> { - let mut prover_connection = self.prover_connection_pool.connection().await?; - let last_l1_batch_to_process = self.config.last_l1_batch_to_process(); - let pod_name = get_current_pod_name(); - match prover_connection - .fri_witness_generator_dal() - .get_next_basic_circuit_witness_job( - last_l1_batch_to_process, - self.protocol_version, - &pod_name, - ) - .await - { - Some(block_number) => { - tracing::info!( - "Processing FRI basic witness-gen for block {}", - block_number - ); - let started_at = Instant::now(); - let job = get_artifacts(block_number, &*self.object_store).await; - - WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::BasicCircuits.into()] - .observe(started_at.elapsed()); - - Ok(Some((block_number, job))) - } - None => Ok(None), - } - } - - async fn save_failure(&self, job_id: L1BatchNumber, _started_at: Instant, error: String) -> () { - self.prover_connection_pool - .connection() - .await - .unwrap() - .fri_witness_generator_dal() - .mark_witness_job_failed(&error, job_id) - .await; - } - - #[allow(clippy::async_yields_async)] - async fn process_job( - &self, - _job_id: &Self::JobId, - job: BasicWitnessGeneratorJob, - started_at: Instant, - ) -> tokio::task::JoinHandle>> { - let object_store = Arc::clone(&self.object_store); - let max_circuits_in_flight = self.config.max_circuits_in_flight; - tokio::spawn(async move { - let block_number = job.block_number; - Ok( - Self::process_job_impl(object_store, job, started_at, max_circuits_in_flight) - .instrument(tracing::info_span!("basic_circuit", %block_number)) - .await, - ) - }) - } - - #[tracing::instrument(skip_all, fields(l1_batch = %job_id))] - async fn save_result( - &self, - job_id: L1BatchNumber, - started_at: Instant, - optional_artifacts: Option, - ) -> anyhow::Result<()> { - match optional_artifacts { - None => Ok(()), - Some(artifacts) => { - let blob_started_at = Instant::now(); - let scheduler_witness_url = save_scheduler_artifacts( - job_id, - artifacts.scheduler_witness, - artifacts.aux_output_witness, - &*self.object_store, - self.public_blob_store.as_deref(), - self.config.shall_save_to_public_bucket, - ) - .await; - - WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::BasicCircuits.into()] - .observe(blob_started_at.elapsed()); - - update_database( - &self.prover_connection_pool, - started_at, - job_id, - BlobUrls { - circuit_ids_and_urls: artifacts.circuit_urls, - closed_form_inputs_and_urls: artifacts.queue_urls, - scheduler_witness_url, - }, - ) - .await; - Ok(()) - } - } - } - - fn max_attempts(&self) -> u32 { - self.config.max_attempts - } - - async fn get_job_attempts(&self, job_id: &L1BatchNumber) -> anyhow::Result { - let mut prover_storage = self - .prover_connection_pool - .connection() - .await - .context("failed to acquire DB connection for BasicWitnessGenerator")?; - prover_storage - .fri_witness_generator_dal() - .get_basic_circuit_witness_job_attempts(*job_id) - .await - .map(|attempts| attempts.unwrap_or(0)) - .context("failed to get job attempts for BasicWitnessGenerator") - } -} - #[tracing::instrument(skip_all, fields(l1_batch = %block_number))] -async fn process_basic_circuits_job( +pub(super) async fn process_basic_circuits_job( object_store: Arc, started_at: Instant, block_number: L1BatchNumber, @@ -287,93 +165,6 @@ async fn process_basic_circuits_job( } } -#[tracing::instrument(skip_all, fields(l1_batch = %block_number))] -async fn update_database( - prover_connection_pool: &ConnectionPool, - started_at: Instant, - block_number: L1BatchNumber, - blob_urls: BlobUrls, -) { - let mut connection = prover_connection_pool - .connection() - .await - .expect("failed to get database connection"); - let mut transaction = connection - .start_transaction() - .await - .expect("failed to get database transaction"); - let protocol_version_id = transaction - .fri_witness_generator_dal() - .protocol_version_for_l1_batch(block_number) - .await; - transaction - .fri_prover_jobs_dal() - .insert_prover_jobs( - block_number, - blob_urls.circuit_ids_and_urls, - AggregationRound::BasicCircuits, - 0, - protocol_version_id, - ) - .await; - transaction - .fri_witness_generator_dal() - .create_aggregation_jobs( - block_number, - &blob_urls.closed_form_inputs_and_urls, - &blob_urls.scheduler_witness_url, - get_recursive_layer_circuit_id_for_base_layer, - protocol_version_id, - ) - .await; - transaction - .fri_witness_generator_dal() - .mark_witness_job_as_successful(block_number, started_at.elapsed()) - .await; - transaction - .commit() - .await - .expect("failed to commit database transaction"); -} - -#[tracing::instrument(skip_all, fields(l1_batch = %block_number))] -async fn get_artifacts( - block_number: L1BatchNumber, - object_store: &dyn ObjectStore, -) -> BasicWitnessGeneratorJob { - let job = object_store.get(block_number).await.unwrap(); - BasicWitnessGeneratorJob { block_number, job } -} - -#[tracing::instrument(skip_all, fields(l1_batch = %block_number))] -async fn save_scheduler_artifacts( - block_number: L1BatchNumber, - scheduler_partial_input: SchedulerCircuitInstanceWitness< - GoldilocksField, - CircuitGoldilocksPoseidon2Sponge, - GoldilocksExt2, - >, - aux_output_witness: BlockAuxilaryOutputWitness, - object_store: &dyn ObjectStore, - public_object_store: Option<&dyn ObjectStore>, - shall_save_to_public_bucket: bool, -) -> String { - let aux_output_witness_wrapper = AuxOutputWitnessWrapper(aux_output_witness); - if shall_save_to_public_bucket { - public_object_store - .expect("public_object_store shall not be empty while running with shall_save_to_public_bucket config") - .put(block_number, &aux_output_witness_wrapper) - .await - .unwrap(); - } - object_store - .put(block_number, &aux_output_witness_wrapper) - .await - .unwrap(); - let wrapper = SchedulerPartialInputWrapper(scheduler_partial_input); - object_store.put(block_number, &wrapper).await.unwrap() -} - #[tracing::instrument(skip_all, fields(l1_batch = %block_number, circuit_id = %circuit_id))] async fn save_recursion_queue( block_number: L1BatchNumber, @@ -396,17 +187,6 @@ async fn save_recursion_queue( (circuit_id, blob_url, basic_circuit_count) } -type Witness = ( - Vec<(u8, String)>, - Vec<(u8, String, usize)>, - SchedulerCircuitInstanceWitness< - GoldilocksField, - CircuitGoldilocksPoseidon2Sponge, - GoldilocksExt2, - >, - BlockAuxilaryOutputWitness, -); - #[tracing::instrument(skip_all, fields(l1_batch = %block_number))] async fn generate_witness( block_number: L1BatchNumber, diff --git a/prover/crates/bin/witness_generator/src/leaf_aggregation/artifacts.rs b/prover/crates/bin/witness_generator/src/leaf_aggregation/artifacts.rs new file mode 100644 index 000000000000..a94587d00ec6 --- /dev/null +++ b/prover/crates/bin/witness_generator/src/leaf_aggregation/artifacts.rs @@ -0,0 +1,150 @@ +use std::time::Instant; + +use async_trait::async_trait; +use zksync_object_store::ObjectStore; +use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; +use zksync_prover_fri_types::keys::ClosedFormInputKey; +use zksync_prover_fri_utils::get_recursive_layer_circuit_id_for_base_layer; +use zksync_types::{basic_fri_types::AggregationRound, prover_dal::LeafAggregationJobMetadata}; + +use crate::{ + artifacts::{AggregationBlobUrls, ArtifactsManager, BlobUrls}, + leaf_aggregation::{LeafAggregationArtifacts, LeafAggregationWitnessGenerator}, + metrics::WITNESS_GENERATOR_METRICS, + utils::{save_node_aggregations_artifacts, ClosedFormInputWrapper}, +}; + +#[async_trait] +impl ArtifactsManager for LeafAggregationWitnessGenerator { + type InputMetadata = LeafAggregationJobMetadata; + type InputArtifacts = ClosedFormInputWrapper; + type OutputArtifacts = LeafAggregationArtifacts; + + async fn get_artifacts( + metadata: &Self::InputMetadata, + object_store: &dyn ObjectStore, + ) -> anyhow::Result { + let key = ClosedFormInputKey { + block_number: metadata.block_number, + circuit_id: metadata.circuit_id, + }; + + let artifacts = object_store + .get(key) + .await + .unwrap_or_else(|_| panic!("leaf aggregation job artifacts missing: {:?}", key)); + + Ok(artifacts) + } + + #[tracing::instrument( + skip_all, + fields(l1_batch = %artifacts.block_number, circuit_id = %artifacts.circuit_id) + )] + async fn save_artifacts( + _job_id: u32, + artifacts: Self::OutputArtifacts, + object_store: &dyn ObjectStore, + ) -> BlobUrls { + let started_at = Instant::now(); + let aggregations_urls = save_node_aggregations_artifacts( + artifacts.block_number, + get_recursive_layer_circuit_id_for_base_layer(artifacts.circuit_id), + 0, + artifacts.aggregations, + object_store, + ) + .await; + WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::LeafAggregation.into()] + .observe(started_at.elapsed()); + + BlobUrls::Aggregation(AggregationBlobUrls { + aggregations_urls, + circuit_ids_and_urls: artifacts.circuit_ids_and_urls, + }) + } + + #[tracing::instrument( + skip_all, + fields(l1_batch = %job_id) + )] + async fn update_database( + connection_pool: &ConnectionPool, + job_id: u32, + started_at: Instant, + blob_urls: BlobUrls, + artifacts: Self::OutputArtifacts, + ) -> anyhow::Result<()> { + tracing::info!( + "Updating database for job_id {}, block {} with circuit id {}", + job_id, + artifacts.block_number.0, + artifacts.circuit_id, + ); + + let blob_urls = match blob_urls { + BlobUrls::Aggregation(blob_urls) => blob_urls, + _ => panic!("Unexpected blob urls type"), + }; + + let mut prover_connection = connection_pool.connection().await.unwrap(); + let mut transaction = prover_connection.start_transaction().await.unwrap(); + let number_of_dependent_jobs = blob_urls.circuit_ids_and_urls.len(); + let protocol_version_id = transaction + .fri_witness_generator_dal() + .protocol_version_for_l1_batch(artifacts.block_number) + .await; + tracing::info!( + "Inserting {} prover jobs for job_id {}, block {} with circuit id {}", + blob_urls.circuit_ids_and_urls.len(), + job_id, + artifacts.block_number.0, + artifacts.circuit_id, + ); + transaction + .fri_prover_jobs_dal() + .insert_prover_jobs( + artifacts.block_number, + blob_urls.circuit_ids_and_urls, + AggregationRound::LeafAggregation, + 0, + protocol_version_id, + ) + .await; + tracing::info!( + "Updating node aggregation jobs url for job_id {}, block {} with circuit id {}", + job_id, + artifacts.block_number.0, + artifacts.circuit_id, + ); + transaction + .fri_witness_generator_dal() + .update_node_aggregation_jobs_url( + artifacts.block_number, + get_recursive_layer_circuit_id_for_base_layer(artifacts.circuit_id), + number_of_dependent_jobs, + 0, + blob_urls.aggregations_urls, + ) + .await; + tracing::info!( + "Marking leaf aggregation job as successful for job id {}, block {} with circuit id {}", + job_id, + artifacts.block_number.0, + artifacts.circuit_id, + ); + transaction + .fri_witness_generator_dal() + .mark_leaf_aggregation_as_successful(job_id, started_at.elapsed()) + .await; + + tracing::info!( + "Committing transaction for job_id {}, block {} with circuit id {}", + job_id, + artifacts.block_number.0, + artifacts.circuit_id, + ); + transaction.commit().await?; + Ok(()) + } +} diff --git a/prover/crates/bin/witness_generator/src/leaf_aggregation/job_processor.rs b/prover/crates/bin/witness_generator/src/leaf_aggregation/job_processor.rs new file mode 100644 index 000000000000..e032084151eb --- /dev/null +++ b/prover/crates/bin/witness_generator/src/leaf_aggregation/job_processor.rs @@ -0,0 +1,124 @@ +use std::time::Instant; + +use anyhow::Context as _; +use async_trait::async_trait; +use zksync_prover_dal::ProverDal; +use zksync_prover_fri_types::get_current_pod_name; +use zksync_queued_job_processor::JobProcessor; +use zksync_types::basic_fri_types::AggregationRound; + +use crate::{ + artifacts::ArtifactsManager, + leaf_aggregation::{ + prepare_leaf_aggregation_job, LeafAggregationArtifacts, LeafAggregationWitnessGenerator, + LeafAggregationWitnessGeneratorJob, + }, + metrics::WITNESS_GENERATOR_METRICS, +}; + +#[async_trait] +impl JobProcessor for LeafAggregationWitnessGenerator { + type Job = LeafAggregationWitnessGeneratorJob; + type JobId = u32; + type JobArtifacts = LeafAggregationArtifacts; + + const SERVICE_NAME: &'static str = "fri_leaf_aggregation_witness_generator"; + + async fn get_next_job(&self) -> anyhow::Result> { + let mut prover_connection = self.prover_connection_pool.connection().await?; + let pod_name = get_current_pod_name(); + let Some(metadata) = prover_connection + .fri_witness_generator_dal() + .get_next_leaf_aggregation_job(self.protocol_version, &pod_name) + .await + else { + return Ok(None); + }; + tracing::info!("Processing leaf aggregation job {:?}", metadata.id); + Ok(Some(( + metadata.id, + prepare_leaf_aggregation_job(metadata, &*self.object_store, self.keystore.clone()) + .await + .context("prepare_leaf_aggregation_job()")?, + ))) + } + + async fn save_failure(&self, job_id: u32, _started_at: Instant, error: String) -> () { + self.prover_connection_pool + .connection() + .await + .unwrap() + .fri_witness_generator_dal() + .mark_leaf_aggregation_job_failed(&error, job_id) + .await; + } + + #[allow(clippy::async_yields_async)] + async fn process_job( + &self, + _job_id: &Self::JobId, + job: LeafAggregationWitnessGeneratorJob, + started_at: Instant, + ) -> tokio::task::JoinHandle> { + let object_store = self.object_store.clone(); + let max_circuits_in_flight = self.config.max_circuits_in_flight; + tokio::spawn(async move { + Ok(Self::process_job_impl(job, started_at, object_store, max_circuits_in_flight).await) + }) + } + + async fn save_result( + &self, + job_id: u32, + started_at: Instant, + artifacts: LeafAggregationArtifacts, + ) -> anyhow::Result<()> { + let block_number = artifacts.block_number; + let circuit_id = artifacts.circuit_id; + tracing::info!( + "Saving leaf aggregation artifacts for block {} with circuit {}", + block_number.0, + circuit_id, + ); + + let blob_save_started_at = Instant::now(); + + let blob_urls = Self::save_artifacts(job_id, artifacts.clone(), &*self.object_store).await; + + WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::LeafAggregation.into()] + .observe(blob_save_started_at.elapsed()); + + tracing::info!( + "Saved leaf aggregation artifacts for block {} with circuit {}", + block_number.0, + circuit_id, + ); + Self::update_database( + &self.prover_connection_pool, + job_id, + started_at, + blob_urls, + artifacts, + ) + .await?; + Ok(()) + } + + fn max_attempts(&self) -> u32 { + self.config.max_attempts + } + + async fn get_job_attempts(&self, job_id: &u32) -> anyhow::Result { + let mut prover_storage = self + .prover_connection_pool + .connection() + .await + .context("failed to acquire DB connection for LeafAggregationWitnessGenerator")?; + prover_storage + .fri_witness_generator_dal() + .get_leaf_aggregation_job_attempts(*job_id) + .await + .map(|attempts| attempts.unwrap_or(0)) + .context("failed to get job attempts for LeafAggregationWitnessGenerator") + } +} diff --git a/prover/crates/bin/witness_generator/src/leaf_aggregation.rs b/prover/crates/bin/witness_generator/src/leaf_aggregation/mod.rs similarity index 52% rename from prover/crates/bin/witness_generator/src/leaf_aggregation.rs rename to prover/crates/bin/witness_generator/src/leaf_aggregation/mod.rs index 503c46e41bbd..d669a4cc97e3 100644 --- a/prover/crates/bin/witness_generator/src/leaf_aggregation.rs +++ b/prover/crates/bin/witness_generator/src/leaf_aggregation/mod.rs @@ -1,7 +1,6 @@ use std::{sync::Arc, time::Instant}; use anyhow::Context as _; -use async_trait::async_trait; use circuit_definitions::circuit_definitions::recursion_layer::base_circuit_type_into_recursive_leaf_circuit_type; use tokio::sync::Semaphore; use zkevm_test_harness::{ @@ -12,7 +11,7 @@ use zkevm_test_harness::{ }; use zksync_config::configs::FriWitnessGeneratorConfig; use zksync_object_store::ObjectStore; -use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; +use zksync_prover_dal::{ConnectionPool, Prover}; use zksync_prover_fri_types::{ circuit_definitions::{ boojum::field::goldilocks::GoldilocksField, @@ -22,40 +21,25 @@ use zksync_prover_fri_types::{ encodings::recursion_request::RecursionQueueSimulator, zkevm_circuits::recursion::leaf_layer::input::RecursionLeafParametersWitness, }, - get_current_pod_name, - keys::ClosedFormInputKey, FriProofWrapper, }; -use zksync_prover_fri_utils::get_recursive_layer_circuit_id_for_base_layer; use zksync_prover_keystore::keystore::Keystore; -use zksync_queued_job_processor::JobProcessor; use zksync_types::{ basic_fri_types::AggregationRound, protocol_version::ProtocolSemanticVersion, prover_dal::LeafAggregationJobMetadata, L1BatchNumber, }; use crate::{ + artifacts::ArtifactsManager, metrics::WITNESS_GENERATOR_METRICS, utils::{ - load_proofs_for_job_ids, save_node_aggregations_artifacts, - save_recursive_layer_prover_input_artifacts, ClosedFormInputWrapper, + load_proofs_for_job_ids, save_recursive_layer_prover_input_artifacts, + ClosedFormInputWrapper, }, }; -pub struct LeafAggregationArtifacts { - circuit_id: u8, - block_number: L1BatchNumber, - pub aggregations: Vec<(u64, RecursionQueueSimulator)>, - pub circuit_ids_and_urls: Vec<(u8, String)>, - #[allow(dead_code)] - closed_form_inputs: Vec>, -} - -#[derive(Debug)] -struct BlobUrls { - circuit_ids_and_urls: Vec<(u8, String)>, - aggregations_urls: String, -} +mod artifacts; +mod job_processor; pub struct LeafAggregationWitnessGeneratorJob { pub(crate) circuit_id: u8, @@ -75,6 +59,16 @@ pub struct LeafAggregationWitnessGenerator { keystore: Keystore, } +#[derive(Clone)] +pub struct LeafAggregationArtifacts { + circuit_id: u8, + block_number: L1BatchNumber, + pub aggregations: Vec<(u64, RecursionQueueSimulator)>, + pub circuit_ids_and_urls: Vec<(u8, String)>, + #[allow(dead_code)] + closed_form_inputs: Vec>, +} + impl LeafAggregationWitnessGenerator { pub fn new( config: FriWitnessGeneratorConfig, @@ -113,108 +107,6 @@ impl LeafAggregationWitnessGenerator { } } -#[async_trait] -impl JobProcessor for LeafAggregationWitnessGenerator { - type Job = LeafAggregationWitnessGeneratorJob; - type JobId = u32; - type JobArtifacts = LeafAggregationArtifacts; - - const SERVICE_NAME: &'static str = "fri_leaf_aggregation_witness_generator"; - - async fn get_next_job(&self) -> anyhow::Result> { - let mut prover_connection = self.prover_connection_pool.connection().await?; - let pod_name = get_current_pod_name(); - let Some(metadata) = prover_connection - .fri_witness_generator_dal() - .get_next_leaf_aggregation_job(self.protocol_version, &pod_name) - .await - else { - return Ok(None); - }; - tracing::info!("Processing leaf aggregation job {:?}", metadata.id); - Ok(Some(( - metadata.id, - prepare_leaf_aggregation_job(metadata, &*self.object_store, self.keystore.clone()) - .await - .context("prepare_leaf_aggregation_job()")?, - ))) - } - - async fn save_failure(&self, job_id: u32, _started_at: Instant, error: String) -> () { - self.prover_connection_pool - .connection() - .await - .unwrap() - .fri_witness_generator_dal() - .mark_leaf_aggregation_job_failed(&error, job_id) - .await; - } - - #[allow(clippy::async_yields_async)] - async fn process_job( - &self, - _job_id: &Self::JobId, - job: LeafAggregationWitnessGeneratorJob, - started_at: Instant, - ) -> tokio::task::JoinHandle> { - let object_store = self.object_store.clone(); - let max_circuits_in_flight = self.config.max_circuits_in_flight; - tokio::spawn(async move { - Ok(Self::process_job_impl(job, started_at, object_store, max_circuits_in_flight).await) - }) - } - - async fn save_result( - &self, - job_id: u32, - started_at: Instant, - artifacts: LeafAggregationArtifacts, - ) -> anyhow::Result<()> { - let block_number = artifacts.block_number; - let circuit_id = artifacts.circuit_id; - tracing::info!( - "Saving leaf aggregation artifacts for block {} with circuit {}", - block_number.0, - circuit_id, - ); - let blob_urls = save_artifacts(artifacts, &*self.object_store).await; - tracing::info!( - "Saved leaf aggregation artifacts for block {} with circuit {} (count: {})", - block_number.0, - circuit_id, - blob_urls.circuit_ids_and_urls.len(), - ); - update_database( - &self.prover_connection_pool, - started_at, - block_number, - job_id, - blob_urls, - circuit_id, - ) - .await; - Ok(()) - } - - fn max_attempts(&self) -> u32 { - self.config.max_attempts - } - - async fn get_job_attempts(&self, job_id: &u32) -> anyhow::Result { - let mut prover_storage = self - .prover_connection_pool - .connection() - .await - .context("failed to acquire DB connection for LeafAggregationWitnessGenerator")?; - prover_storage - .fri_witness_generator_dal() - .get_leaf_aggregation_job_attempts(*job_id) - .await - .map(|attempts| attempts.unwrap_or(0)) - .context("failed to get job attempts for LeafAggregationWitnessGenerator") - } -} - #[tracing::instrument( skip_all, fields(l1_batch = %metadata.block_number, circuit_id = %metadata.circuit_id) @@ -225,7 +117,8 @@ pub async fn prepare_leaf_aggregation_job( keystore: Keystore, ) -> anyhow::Result { let started_at = Instant::now(); - let closed_form_input = get_artifacts(&metadata, object_store).await; + let closed_form_input = + LeafAggregationWitnessGenerator::get_artifacts(&metadata, object_store).await?; WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::LeafAggregation.into()] .observe(started_at.elapsed()); @@ -368,125 +261,3 @@ pub async fn process_leaf_aggregation_job( closed_form_inputs: job.closed_form_inputs.0, } } - -#[tracing::instrument( - skip_all, - fields(l1_batch = %block_number, circuit_id = %circuit_id) -)] -async fn update_database( - prover_connection_pool: &ConnectionPool, - started_at: Instant, - block_number: L1BatchNumber, - job_id: u32, - blob_urls: BlobUrls, - circuit_id: u8, -) { - tracing::info!( - "Updating database for job_id {}, block {} with circuit id {}", - job_id, - block_number.0, - circuit_id, - ); - let mut prover_connection = prover_connection_pool.connection().await.unwrap(); - let mut transaction = prover_connection.start_transaction().await.unwrap(); - let number_of_dependent_jobs = blob_urls.circuit_ids_and_urls.len(); - let protocol_version_id = transaction - .fri_witness_generator_dal() - .protocol_version_for_l1_batch(block_number) - .await; - tracing::info!( - "Inserting {} prover jobs for job_id {}, block {} with circuit id {}", - blob_urls.circuit_ids_and_urls.len(), - job_id, - block_number.0, - circuit_id, - ); - transaction - .fri_prover_jobs_dal() - .insert_prover_jobs( - block_number, - blob_urls.circuit_ids_and_urls, - AggregationRound::LeafAggregation, - 0, - protocol_version_id, - ) - .await; - tracing::info!( - "Updating node aggregation jobs url for job_id {}, block {} with circuit id {}", - job_id, - block_number.0, - circuit_id, - ); - transaction - .fri_witness_generator_dal() - .update_node_aggregation_jobs_url( - block_number, - get_recursive_layer_circuit_id_for_base_layer(circuit_id), - number_of_dependent_jobs, - 0, - blob_urls.aggregations_urls, - ) - .await; - tracing::info!( - "Marking leaf aggregation job as successful for job id {}, block {} with circuit id {}", - job_id, - block_number.0, - circuit_id, - ); - transaction - .fri_witness_generator_dal() - .mark_leaf_aggregation_as_successful(job_id, started_at.elapsed()) - .await; - - tracing::info!( - "Committing transaction for job_id {}, block {} with circuit id {}", - job_id, - block_number.0, - circuit_id, - ); - transaction.commit().await.unwrap(); -} - -#[tracing::instrument( - skip_all, - fields(l1_batch = %metadata.block_number, circuit_id = %metadata.circuit_id) -)] -async fn get_artifacts( - metadata: &LeafAggregationJobMetadata, - object_store: &dyn ObjectStore, -) -> ClosedFormInputWrapper { - let key = ClosedFormInputKey { - block_number: metadata.block_number, - circuit_id: metadata.circuit_id, - }; - object_store - .get(key) - .await - .unwrap_or_else(|_| panic!("leaf aggregation job artifacts missing: {:?}", key)) -} - -#[tracing::instrument( - skip_all, - fields(l1_batch = %artifacts.block_number, circuit_id = %artifacts.circuit_id) -)] -async fn save_artifacts( - artifacts: LeafAggregationArtifacts, - object_store: &dyn ObjectStore, -) -> BlobUrls { - let started_at = Instant::now(); - let aggregations_urls = save_node_aggregations_artifacts( - artifacts.block_number, - get_recursive_layer_circuit_id_for_base_layer(artifacts.circuit_id), - 0, - artifacts.aggregations, - object_store, - ) - .await; - WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::LeafAggregation.into()] - .observe(started_at.elapsed()); - - BlobUrls { - circuit_ids_and_urls: artifacts.circuit_ids_and_urls, - aggregations_urls, - } -} diff --git a/prover/crates/bin/witness_generator/src/lib.rs b/prover/crates/bin/witness_generator/src/lib.rs index 00d2ebf2bb3d..c0ac9718c6ee 100644 --- a/prover/crates/bin/witness_generator/src/lib.rs +++ b/prover/crates/bin/witness_generator/src/lib.rs @@ -1,6 +1,7 @@ #![allow(incomplete_features)] // We have to use generic const exprs. #![feature(generic_const_exprs)] +pub mod artifacts; pub mod basic_circuits; pub mod leaf_aggregation; pub mod metrics; diff --git a/prover/crates/bin/witness_generator/src/node_aggregation/artifacts.rs b/prover/crates/bin/witness_generator/src/node_aggregation/artifacts.rs new file mode 100644 index 000000000000..245027f0d677 --- /dev/null +++ b/prover/crates/bin/witness_generator/src/node_aggregation/artifacts.rs @@ -0,0 +1,146 @@ +use std::time::Instant; + +use async_trait::async_trait; +use zksync_object_store::ObjectStore; +use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; +use zksync_prover_fri_types::keys::AggregationsKey; +use zksync_types::{basic_fri_types::AggregationRound, prover_dal::NodeAggregationJobMetadata}; + +use crate::{ + artifacts::{AggregationBlobUrls, ArtifactsManager, BlobUrls}, + metrics::WITNESS_GENERATOR_METRICS, + node_aggregation::{NodeAggregationArtifacts, NodeAggregationWitnessGenerator}, + utils::{save_node_aggregations_artifacts, AggregationWrapper}, +}; + +#[async_trait] +impl ArtifactsManager for NodeAggregationWitnessGenerator { + type InputMetadata = NodeAggregationJobMetadata; + type InputArtifacts = AggregationWrapper; + type OutputArtifacts = NodeAggregationArtifacts; + + #[tracing::instrument( + skip_all, + fields(l1_batch = % metadata.block_number, circuit_id = % metadata.circuit_id) + )] + async fn get_artifacts( + metadata: &Self::InputMetadata, + object_store: &dyn ObjectStore, + ) -> anyhow::Result { + let key = AggregationsKey { + block_number: metadata.block_number, + circuit_id: metadata.circuit_id, + depth: metadata.depth, + }; + let artifacts = object_store.get(key).await.unwrap_or_else(|error| { + panic!( + "node aggregation job artifacts getting error. Key: {:?}, error: {:?}", + key, error + ) + }); + + Ok(artifacts) + } + + #[tracing::instrument( + skip_all, + fields(l1_batch = %artifacts.block_number, circuit_id = %artifacts.circuit_id) + )] + async fn save_artifacts( + _job_id: u32, + artifacts: Self::OutputArtifacts, + object_store: &dyn ObjectStore, + ) -> BlobUrls { + let started_at = Instant::now(); + let aggregations_urls = save_node_aggregations_artifacts( + artifacts.block_number, + artifacts.circuit_id, + artifacts.depth, + artifacts.next_aggregations, + object_store, + ) + .await; + + WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::NodeAggregation.into()] + .observe(started_at.elapsed()); + + BlobUrls::Aggregation(AggregationBlobUrls { + aggregations_urls, + circuit_ids_and_urls: artifacts.recursive_circuit_ids_and_urls, + }) + } + + #[tracing::instrument( + skip_all, + fields(l1_batch = % job_id) + )] + async fn update_database( + connection_pool: &ConnectionPool, + job_id: u32, + started_at: Instant, + blob_urls: BlobUrls, + artifacts: Self::OutputArtifacts, + ) -> anyhow::Result<()> { + let mut prover_connection = connection_pool.connection().await.unwrap(); + let blob_urls = match blob_urls { + BlobUrls::Aggregation(blobs) => blobs, + _ => unreachable!(), + }; + let mut transaction = prover_connection.start_transaction().await.unwrap(); + let dependent_jobs = blob_urls.circuit_ids_and_urls.len(); + let protocol_version_id = transaction + .fri_witness_generator_dal() + .protocol_version_for_l1_batch(artifacts.block_number) + .await; + match artifacts.next_aggregations.len() > 1 { + true => { + transaction + .fri_prover_jobs_dal() + .insert_prover_jobs( + artifacts.block_number, + blob_urls.circuit_ids_and_urls, + AggregationRound::NodeAggregation, + artifacts.depth, + protocol_version_id, + ) + .await; + transaction + .fri_witness_generator_dal() + .insert_node_aggregation_jobs( + artifacts.block_number, + artifacts.circuit_id, + Some(dependent_jobs as i32), + artifacts.depth, + &blob_urls.aggregations_urls, + protocol_version_id, + ) + .await; + } + false => { + let (_, blob_url) = blob_urls.circuit_ids_and_urls[0].clone(); + transaction + .fri_prover_jobs_dal() + .insert_prover_job( + artifacts.block_number, + artifacts.circuit_id, + artifacts.depth, + 0, + AggregationRound::NodeAggregation, + &blob_url, + true, + protocol_version_id, + ) + .await + } + } + + transaction + .fri_witness_generator_dal() + .mark_node_aggregation_as_successful(job_id, started_at.elapsed()) + .await; + + transaction.commit().await?; + + Ok(()) + } +} diff --git a/prover/crates/bin/witness_generator/src/node_aggregation/job_processor.rs b/prover/crates/bin/witness_generator/src/node_aggregation/job_processor.rs new file mode 100644 index 000000000000..a015462cd6fe --- /dev/null +++ b/prover/crates/bin/witness_generator/src/node_aggregation/job_processor.rs @@ -0,0 +1,115 @@ +use std::time::Instant; + +use anyhow::Context as _; +use async_trait::async_trait; +use zksync_prover_dal::ProverDal; +use zksync_prover_fri_types::get_current_pod_name; +use zksync_queued_job_processor::JobProcessor; +use zksync_types::basic_fri_types::AggregationRound; + +use crate::{ + artifacts::ArtifactsManager, + metrics::WITNESS_GENERATOR_METRICS, + node_aggregation::{ + prepare_job, NodeAggregationArtifacts, NodeAggregationWitnessGenerator, + NodeAggregationWitnessGeneratorJob, + }, +}; + +#[async_trait] +impl JobProcessor for NodeAggregationWitnessGenerator { + type Job = NodeAggregationWitnessGeneratorJob; + type JobId = u32; + type JobArtifacts = NodeAggregationArtifacts; + + const SERVICE_NAME: &'static str = "fri_node_aggregation_witness_generator"; + + async fn get_next_job(&self) -> anyhow::Result> { + let mut prover_connection = self.prover_connection_pool.connection().await?; + let pod_name = get_current_pod_name(); + let Some(metadata) = prover_connection + .fri_witness_generator_dal() + .get_next_node_aggregation_job(self.protocol_version, &pod_name) + .await + else { + return Ok(None); + }; + tracing::info!("Processing node aggregation job {:?}", metadata.id); + Ok(Some(( + metadata.id, + prepare_job(metadata, &*self.object_store, self.keystore.clone()) + .await + .context("prepare_job()")?, + ))) + } + + async fn save_failure(&self, job_id: u32, _started_at: Instant, error: String) -> () { + self.prover_connection_pool + .connection() + .await + .unwrap() + .fri_witness_generator_dal() + .mark_node_aggregation_job_failed(&error, job_id) + .await; + } + + #[allow(clippy::async_yields_async)] + async fn process_job( + &self, + _job_id: &Self::JobId, + job: NodeAggregationWitnessGeneratorJob, + started_at: Instant, + ) -> tokio::task::JoinHandle> { + let object_store = self.object_store.clone(); + let max_circuits_in_flight = self.config.max_circuits_in_flight; + tokio::spawn(async move { + Ok(Self::process_job_impl(job, started_at, object_store, max_circuits_in_flight).await) + }) + } + + #[tracing::instrument( + skip_all, + fields(l1_batch = % artifacts.block_number, circuit_id = % artifacts.circuit_id) + )] + async fn save_result( + &self, + job_id: u32, + started_at: Instant, + artifacts: NodeAggregationArtifacts, + ) -> anyhow::Result<()> { + let blob_save_started_at = Instant::now(); + + let blob_urls = Self::save_artifacts(job_id, artifacts.clone(), &*self.object_store).await; + + WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::NodeAggregation.into()] + .observe(blob_save_started_at.elapsed()); + + Self::update_database( + &self.prover_connection_pool, + job_id, + started_at, + blob_urls, + artifacts, + ) + .await?; + Ok(()) + } + + fn max_attempts(&self) -> u32 { + self.config.max_attempts + } + + async fn get_job_attempts(&self, job_id: &u32) -> anyhow::Result { + let mut prover_storage = self + .prover_connection_pool + .connection() + .await + .context("failed to acquire DB connection for NodeAggregationWitnessGenerator")?; + prover_storage + .fri_witness_generator_dal() + .get_node_aggregation_job_attempts(*job_id) + .await + .map(|attempts| attempts.unwrap_or(0)) + .context("failed to get job attempts for NodeAggregationWitnessGenerator") + } +} diff --git a/prover/crates/bin/witness_generator/src/node_aggregation.rs b/prover/crates/bin/witness_generator/src/node_aggregation/mod.rs similarity index 52% rename from prover/crates/bin/witness_generator/src/node_aggregation.rs rename to prover/crates/bin/witness_generator/src/node_aggregation/mod.rs index 72bdebde572a..047caa363a89 100644 --- a/prover/crates/bin/witness_generator/src/node_aggregation.rs +++ b/prover/crates/bin/witness_generator/src/node_aggregation/mod.rs @@ -1,7 +1,6 @@ use std::{sync::Arc, time::Instant}; use anyhow::Context as _; -use async_trait::async_trait; use circuit_definitions::circuit_definitions::recursion_layer::RECURSION_ARITY; use tokio::sync::Semaphore; use zkevm_test_harness::witness::recursive_aggregation::{ @@ -9,7 +8,7 @@ use zkevm_test_harness::witness::recursive_aggregation::{ }; use zksync_config::configs::FriWitnessGeneratorConfig; use zksync_object_store::ObjectStore; -use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; +use zksync_prover_dal::{ConnectionPool, Prover}; use zksync_prover_fri_types::{ circuit_definitions::{ boojum::field::goldilocks::GoldilocksField, @@ -19,25 +18,24 @@ use zksync_prover_fri_types::{ encodings::recursion_request::RecursionQueueSimulator, zkevm_circuits::recursion::leaf_layer::input::RecursionLeafParametersWitness, }, - get_current_pod_name, - keys::AggregationsKey, FriProofWrapper, }; use zksync_prover_keystore::{keystore::Keystore, utils::get_leaf_vk_params}; -use zksync_queued_job_processor::JobProcessor; use zksync_types::{ basic_fri_types::AggregationRound, protocol_version::ProtocolSemanticVersion, prover_dal::NodeAggregationJobMetadata, L1BatchNumber, }; use crate::{ + artifacts::ArtifactsManager, metrics::WITNESS_GENERATOR_METRICS, - utils::{ - load_proofs_for_job_ids, save_node_aggregations_artifacts, - save_recursive_layer_prover_input_artifacts, AggregationWrapper, - }, + utils::{load_proofs_for_job_ids, save_recursive_layer_prover_input_artifacts}, }; +mod artifacts; +mod job_processor; + +#[derive(Clone)] pub struct NodeAggregationArtifacts { circuit_id: u8, block_number: L1BatchNumber, @@ -46,12 +44,6 @@ pub struct NodeAggregationArtifacts { pub recursive_circuit_ids_and_urls: Vec<(u8, String)>, } -#[derive(Debug)] -struct BlobUrls { - node_aggregations_url: String, - circuit_ids_and_urls: Vec<(u8, String)>, -} - #[derive(Clone)] pub struct NodeAggregationWitnessGeneratorJob { circuit_id: u8, @@ -92,7 +84,7 @@ impl NodeAggregationWitnessGenerator { #[tracing::instrument( skip_all, - fields(l1_batch = %job.block_number, circuit_id = %job.circuit_id) + fields(l1_batch = % job.block_number, circuit_id = % job.circuit_id) )] pub async fn process_job_impl( job: NodeAggregationWitnessGeneratorJob, @@ -223,108 +215,9 @@ impl NodeAggregationWitnessGenerator { } } -#[async_trait] -impl JobProcessor for NodeAggregationWitnessGenerator { - type Job = NodeAggregationWitnessGeneratorJob; - type JobId = u32; - type JobArtifacts = NodeAggregationArtifacts; - - const SERVICE_NAME: &'static str = "fri_node_aggregation_witness_generator"; - - async fn get_next_job(&self) -> anyhow::Result> { - let mut prover_connection = self.prover_connection_pool.connection().await?; - let pod_name = get_current_pod_name(); - let Some(metadata) = prover_connection - .fri_witness_generator_dal() - .get_next_node_aggregation_job(self.protocol_version, &pod_name) - .await - else { - return Ok(None); - }; - tracing::info!("Processing node aggregation job {:?}", metadata.id); - Ok(Some(( - metadata.id, - prepare_job(metadata, &*self.object_store, self.keystore.clone()) - .await - .context("prepare_job()")?, - ))) - } - - async fn save_failure(&self, job_id: u32, _started_at: Instant, error: String) -> () { - self.prover_connection_pool - .connection() - .await - .unwrap() - .fri_witness_generator_dal() - .mark_node_aggregation_job_failed(&error, job_id) - .await; - } - - #[allow(clippy::async_yields_async)] - async fn process_job( - &self, - _job_id: &Self::JobId, - job: NodeAggregationWitnessGeneratorJob, - started_at: Instant, - ) -> tokio::task::JoinHandle> { - let object_store = self.object_store.clone(); - let max_circuits_in_flight = self.config.max_circuits_in_flight; - tokio::spawn(async move { - Ok(Self::process_job_impl(job, started_at, object_store, max_circuits_in_flight).await) - }) - } - - #[tracing::instrument( - skip_all, - fields(l1_batch = %artifacts.block_number, circuit_id = %artifacts.circuit_id) - )] - async fn save_result( - &self, - job_id: u32, - started_at: Instant, - artifacts: NodeAggregationArtifacts, - ) -> anyhow::Result<()> { - let block_number = artifacts.block_number; - let circuit_id = artifacts.circuit_id; - let depth = artifacts.depth; - let shall_continue_node_aggregations = artifacts.next_aggregations.len() > 1; - let blob_urls = save_artifacts(artifacts, &*self.object_store).await; - update_database( - &self.prover_connection_pool, - started_at, - job_id, - block_number, - depth, - circuit_id, - blob_urls, - shall_continue_node_aggregations, - ) - .await; - Ok(()) - } - - fn max_attempts(&self) -> u32 { - self.config.max_attempts - } - - async fn get_job_attempts(&self, job_id: &u32) -> anyhow::Result { - let mut prover_storage = self - .prover_connection_pool - .connection() - .await - .context("failed to acquire DB connection for NodeAggregationWitnessGenerator")?; - prover_storage - .fri_witness_generator_dal() - .get_node_aggregation_job_attempts(*job_id) - .await - .map(|attempts| attempts.unwrap_or(0)) - .context("failed to get job attempts for NodeAggregationWitnessGenerator") - } -} - #[tracing::instrument( skip_all, - fields(l1_batch = %metadata.block_number, circuit_id = %metadata.circuit_id) + fields(l1_batch = % metadata.block_number, circuit_id = % metadata.circuit_id) )] pub async fn prepare_job( metadata: NodeAggregationJobMetadata, @@ -332,7 +225,7 @@ pub async fn prepare_job( keystore: Keystore, ) -> anyhow::Result { let started_at = Instant::now(); - let artifacts = get_artifacts(&metadata, object_store).await; + let artifacts = NodeAggregationWitnessGenerator::get_artifacts(&metadata, object_store).await?; WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::NodeAggregation.into()] .observe(started_at.elapsed()); @@ -361,123 +254,3 @@ pub async fn prepare_job( all_leafs_layer_params: get_leaf_vk_params(&keystore).context("get_leaf_vk_params()")?, }) } - -#[allow(clippy::too_many_arguments)] -#[tracing::instrument( - skip_all, - fields(l1_batch = %block_number, circuit_id = %circuit_id) -)] -async fn update_database( - prover_connection_pool: &ConnectionPool, - started_at: Instant, - id: u32, - block_number: L1BatchNumber, - depth: u16, - circuit_id: u8, - blob_urls: BlobUrls, - shall_continue_node_aggregations: bool, -) { - let mut prover_connection = prover_connection_pool.connection().await.unwrap(); - let mut transaction = prover_connection.start_transaction().await.unwrap(); - let dependent_jobs = blob_urls.circuit_ids_and_urls.len(); - let protocol_version_id = transaction - .fri_witness_generator_dal() - .protocol_version_for_l1_batch(block_number) - .await; - match shall_continue_node_aggregations { - true => { - transaction - .fri_prover_jobs_dal() - .insert_prover_jobs( - block_number, - blob_urls.circuit_ids_and_urls, - AggregationRound::NodeAggregation, - depth, - protocol_version_id, - ) - .await; - transaction - .fri_witness_generator_dal() - .insert_node_aggregation_jobs( - block_number, - circuit_id, - Some(dependent_jobs as i32), - depth, - &blob_urls.node_aggregations_url, - protocol_version_id, - ) - .await; - } - false => { - let (_, blob_url) = blob_urls.circuit_ids_and_urls[0].clone(); - transaction - .fri_prover_jobs_dal() - .insert_prover_job( - block_number, - circuit_id, - depth, - 0, - AggregationRound::NodeAggregation, - &blob_url, - true, - protocol_version_id, - ) - .await - } - } - - transaction - .fri_witness_generator_dal() - .mark_node_aggregation_as_successful(id, started_at.elapsed()) - .await; - - transaction.commit().await.unwrap(); -} - -#[tracing::instrument( - skip_all, - fields(l1_batch = %metadata.block_number, circuit_id = %metadata.circuit_id) -)] -async fn get_artifacts( - metadata: &NodeAggregationJobMetadata, - object_store: &dyn ObjectStore, -) -> AggregationWrapper { - let key = AggregationsKey { - block_number: metadata.block_number, - circuit_id: metadata.circuit_id, - depth: metadata.depth, - }; - object_store.get(key).await.unwrap_or_else(|error| { - panic!( - "node aggregation job artifacts getting error. Key: {:?}, error: {:?}", - key, error - ) - }) -} - -#[tracing::instrument( - skip_all, - fields(l1_batch = %artifacts.block_number, circuit_id = %artifacts.circuit_id) -)] -async fn save_artifacts( - artifacts: NodeAggregationArtifacts, - object_store: &dyn ObjectStore, -) -> BlobUrls { - let started_at = Instant::now(); - let aggregations_urls = save_node_aggregations_artifacts( - artifacts.block_number, - artifacts.circuit_id, - artifacts.depth, - artifacts.next_aggregations, - object_store, - ) - .await; - - WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::NodeAggregation.into()] - .observe(started_at.elapsed()); - - BlobUrls { - node_aggregations_url: aggregations_urls, - circuit_ids_and_urls: artifacts.recursive_circuit_ids_and_urls, - } -} diff --git a/prover/crates/bin/witness_generator/src/recursion_tip/artifacts.rs b/prover/crates/bin/witness_generator/src/recursion_tip/artifacts.rs new file mode 100644 index 000000000000..8379fcf9f933 --- /dev/null +++ b/prover/crates/bin/witness_generator/src/recursion_tip/artifacts.rs @@ -0,0 +1,141 @@ +use std::{collections::HashMap, time::Instant}; + +use async_trait::async_trait; +use circuit_definitions::{ + circuit_definitions::recursion_layer::{ZkSyncRecursionLayerStorageType, ZkSyncRecursionProof}, + zkevm_circuits::scheduler::aux::BaseLayerCircuitType, +}; +use zkevm_test_harness::empty_node_proof; +use zksync_object_store::ObjectStore; +use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; +use zksync_prover_fri_types::{keys::FriCircuitKey, CircuitWrapper, FriProofWrapper}; +use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; + +use crate::{ + artifacts::{ArtifactsManager, BlobUrls}, + recursion_tip::{RecursionTipArtifacts, RecursionTipWitnessGenerator}, +}; + +#[async_trait] +impl ArtifactsManager for RecursionTipWitnessGenerator { + type InputMetadata = Vec<(u8, u32)>; + type InputArtifacts = Vec; + type OutputArtifacts = RecursionTipArtifacts; + + /// Loads all proofs for a given recursion tip's job ids. + /// Note that recursion tip may not have proofs for some specific circuits (because the batch didn't contain them). + /// In this scenario, we still need to pass a proof, but it won't be taken into account during proving. + /// For this scenario, we use an empty_proof, but any proof would suffice. + async fn get_artifacts( + metadata: &Vec<(u8, u32)>, + object_store: &dyn ObjectStore, + ) -> anyhow::Result> { + let job_mapping: HashMap = metadata + .clone() + .into_iter() + .map(|(leaf_circuit_id, job_id)| { + ( + ZkSyncRecursionLayerStorageType::from_leaf_u8_to_basic_u8(leaf_circuit_id), + job_id, + ) + }) + .collect(); + + let empty_proof = empty_node_proof().into_inner(); + + let mut proofs = Vec::new(); + for circuit_id in BaseLayerCircuitType::as_iter_u8() { + if job_mapping.contains_key(&circuit_id) { + let fri_proof_wrapper = object_store + .get(*job_mapping.get(&circuit_id).unwrap()) + .await + .unwrap_or_else(|_| { + panic!( + "Failed to load proof with circuit_id {} for recursion tip", + circuit_id + ) + }); + match fri_proof_wrapper { + FriProofWrapper::Base(_) => { + return Err(anyhow::anyhow!( + "Expected only recursive proofs for recursion tip, got Base for circuit {}", + circuit_id + )); + } + FriProofWrapper::Recursive(recursive_proof) => { + proofs.push(recursive_proof.into_inner()); + } + } + } else { + proofs.push(empty_proof.clone()); + } + } + Ok(proofs) + } + + async fn save_artifacts( + job_id: u32, + artifacts: Self::OutputArtifacts, + object_store: &dyn ObjectStore, + ) -> BlobUrls { + let key = FriCircuitKey { + block_number: L1BatchNumber(job_id), + circuit_id: 255, + sequence_number: 0, + depth: 0, + aggregation_round: AggregationRound::RecursionTip, + }; + + let blob_url = object_store + .put( + key, + &CircuitWrapper::Recursive(artifacts.recursion_tip_circuit.clone()), + ) + .await + .unwrap(); + + BlobUrls::Url(blob_url) + } + + async fn update_database( + connection_pool: &ConnectionPool, + job_id: u32, + started_at: Instant, + blob_urls: BlobUrls, + _artifacts: Self::OutputArtifacts, + ) -> anyhow::Result<()> { + let blob_url = match blob_urls { + BlobUrls::Url(url) => url, + _ => panic!("Unexpected blob urls type"), + }; + + let mut prover_connection = connection_pool.connection().await?; + let mut transaction = prover_connection.start_transaction().await?; + let protocol_version_id = transaction + .fri_witness_generator_dal() + .protocol_version_for_l1_batch(L1BatchNumber(job_id)) + .await; + transaction + .fri_prover_jobs_dal() + .insert_prover_job( + L1BatchNumber(job_id), + ZkSyncRecursionLayerStorageType::RecursionTipCircuit as u8, + 0, + 0, + AggregationRound::RecursionTip, + &blob_url, + false, + protocol_version_id, + ) + .await; + + transaction + .fri_witness_generator_dal() + .mark_recursion_tip_job_as_successful(L1BatchNumber(job_id), started_at.elapsed()) + .await; + + transaction.commit().await?; + + Ok(()) + } +} diff --git a/prover/crates/bin/witness_generator/src/recursion_tip/job_processor.rs b/prover/crates/bin/witness_generator/src/recursion_tip/job_processor.rs new file mode 100644 index 000000000000..f114724cfec4 --- /dev/null +++ b/prover/crates/bin/witness_generator/src/recursion_tip/job_processor.rs @@ -0,0 +1,130 @@ +use std::time::Instant; + +use anyhow::Context as _; +use async_trait::async_trait; +use zksync_prover_dal::ProverDal; +use zksync_prover_fri_types::get_current_pod_name; +use zksync_queued_job_processor::JobProcessor; +use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; + +use crate::{ + artifacts::ArtifactsManager, + metrics::WITNESS_GENERATOR_METRICS, + recursion_tip::{ + prepare_job, RecursionTipArtifacts, RecursionTipWitnessGenerator, + RecursionTipWitnessGeneratorJob, + }, +}; + +#[async_trait] +impl JobProcessor for RecursionTipWitnessGenerator { + type Job = RecursionTipWitnessGeneratorJob; + type JobId = L1BatchNumber; + type JobArtifacts = RecursionTipArtifacts; + + const SERVICE_NAME: &'static str = "recursion_tip_witness_generator"; + + async fn get_next_job(&self) -> anyhow::Result> { + let mut prover_connection = self.prover_connection_pool.connection().await?; + let pod_name = get_current_pod_name(); + let Some((l1_batch_number, number_of_final_node_jobs)) = prover_connection + .fri_witness_generator_dal() + .get_next_recursion_tip_witness_job(self.protocol_version, &pod_name) + .await + else { + return Ok(None); + }; + + let final_node_proof_job_ids = prover_connection + .fri_prover_jobs_dal() + .get_final_node_proof_job_ids_for(l1_batch_number) + .await; + + assert_eq!( + final_node_proof_job_ids.len(), + number_of_final_node_jobs as usize, + "recursion tip witness job was scheduled without all final node jobs being completed; expected {}, got {}", + number_of_final_node_jobs, final_node_proof_job_ids.len() + ); + + Ok(Some(( + l1_batch_number, + prepare_job( + l1_batch_number, + final_node_proof_job_ids, + &*self.object_store, + self.keystore.clone(), + ) + .await + .context("prepare_job()")?, + ))) + } + + async fn save_failure(&self, job_id: L1BatchNumber, _started_at: Instant, error: String) -> () { + self.prover_connection_pool + .connection() + .await + .unwrap() + .fri_witness_generator_dal() + .mark_recursion_tip_job_failed(&error, job_id) + .await; + } + + #[allow(clippy::async_yields_async)] + async fn process_job( + &self, + _job_id: &Self::JobId, + job: RecursionTipWitnessGeneratorJob, + started_at: Instant, + ) -> tokio::task::JoinHandle> { + tokio::task::spawn_blocking(move || Ok(Self::process_job_sync(job, started_at))) + } + + #[tracing::instrument( + skip_all, + fields(l1_batch = %job_id) + )] + async fn save_result( + &self, + job_id: L1BatchNumber, + started_at: Instant, + artifacts: RecursionTipArtifacts, + ) -> anyhow::Result<()> { + let blob_save_started_at = Instant::now(); + + let blob_urls = + Self::save_artifacts(job_id.0, artifacts.clone(), &*self.object_store).await; + + WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::RecursionTip.into()] + .observe(blob_save_started_at.elapsed()); + + Self::update_database( + &self.prover_connection_pool, + job_id.0, + started_at, + blob_urls, + artifacts, + ) + .await?; + + Ok(()) + } + + fn max_attempts(&self) -> u32 { + self.config.max_attempts + } + + async fn get_job_attempts(&self, job_id: &L1BatchNumber) -> anyhow::Result { + let mut prover_storage = self + .prover_connection_pool + .connection() + .await + .context("failed to acquire DB connection for RecursionTipWitnessGenerator")?; + prover_storage + .fri_witness_generator_dal() + .get_recursion_tip_witness_job_attempts(*job_id) + .await + .map(|attempts| attempts.unwrap_or(0)) + .context("failed to get job attempts for RecursionTipWitnessGenerator") + } +} diff --git a/prover/crates/bin/witness_generator/src/recursion_tip.rs b/prover/crates/bin/witness_generator/src/recursion_tip/mod.rs similarity index 58% rename from prover/crates/bin/witness_generator/src/recursion_tip.rs rename to prover/crates/bin/witness_generator/src/recursion_tip/mod.rs index 5e97631babb9..4abb56a7d788 100644 --- a/prover/crates/bin/witness_generator/src/recursion_tip.rs +++ b/prover/crates/bin/witness_generator/src/recursion_tip/mod.rs @@ -1,7 +1,6 @@ use std::{sync::Arc, time::Instant}; use anyhow::Context; -use async_trait::async_trait; use circuit_definitions::{ circuit_definitions::recursion_layer::{ recursion_tip::RecursionTipCircuit, ZkSyncRecursionLayerStorageType, @@ -37,23 +36,20 @@ use zkevm_test_harness::{ }; use zksync_config::configs::FriWitnessGeneratorConfig; use zksync_object_store::ObjectStore; -use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; -use zksync_prover_fri_types::{ - get_current_pod_name, - keys::{ClosedFormInputKey, FriCircuitKey}, - CircuitWrapper, -}; +use zksync_prover_dal::{ConnectionPool, Prover}; +use zksync_prover_fri_types::keys::ClosedFormInputKey; use zksync_prover_keystore::{keystore::Keystore, utils::get_leaf_vk_params}; -use zksync_queued_job_processor::JobProcessor; use zksync_types::{ basic_fri_types::AggregationRound, protocol_version::ProtocolSemanticVersion, L1BatchNumber, }; use crate::{ - metrics::WITNESS_GENERATOR_METRICS, - utils::{load_proofs_for_recursion_tip, ClosedFormInputWrapper}, + artifacts::ArtifactsManager, metrics::WITNESS_GENERATOR_METRICS, utils::ClosedFormInputWrapper, }; +mod artifacts; +mod job_processor; + #[derive(Clone)] pub struct RecursionTipWitnessGeneratorJob { block_number: L1BatchNumber, @@ -65,6 +61,7 @@ pub struct RecursionTipWitnessGeneratorJob { node_vk: ZkSyncRecursionLayerVerificationKey, } +#[derive(Clone)] pub struct RecursionTipArtifacts { pub recursion_tip_circuit: ZkSyncRecursiveLayerCircuit, } @@ -138,148 +135,6 @@ impl RecursionTipWitnessGenerator { } } -#[async_trait] -impl JobProcessor for RecursionTipWitnessGenerator { - type Job = RecursionTipWitnessGeneratorJob; - type JobId = L1BatchNumber; - type JobArtifacts = RecursionTipArtifacts; - - const SERVICE_NAME: &'static str = "recursion_tip_witness_generator"; - - async fn get_next_job(&self) -> anyhow::Result> { - let mut prover_connection = self.prover_connection_pool.connection().await?; - let pod_name = get_current_pod_name(); - let Some((l1_batch_number, number_of_final_node_jobs)) = prover_connection - .fri_witness_generator_dal() - .get_next_recursion_tip_witness_job(self.protocol_version, &pod_name) - .await - else { - return Ok(None); - }; - - let final_node_proof_job_ids = prover_connection - .fri_prover_jobs_dal() - .get_final_node_proof_job_ids_for(l1_batch_number) - .await; - - assert_eq!( - final_node_proof_job_ids.len(), - number_of_final_node_jobs as usize, - "recursion tip witness job was scheduled without all final node jobs being completed; expected {}, got {}", - number_of_final_node_jobs, final_node_proof_job_ids.len() - ); - - Ok(Some(( - l1_batch_number, - prepare_job( - l1_batch_number, - final_node_proof_job_ids, - &*self.object_store, - self.keystore.clone(), - ) - .await - .context("prepare_job()")?, - ))) - } - - async fn save_failure(&self, job_id: L1BatchNumber, _started_at: Instant, error: String) -> () { - self.prover_connection_pool - .connection() - .await - .unwrap() - .fri_witness_generator_dal() - .mark_recursion_tip_job_failed(&error, job_id) - .await; - } - - #[allow(clippy::async_yields_async)] - async fn process_job( - &self, - _job_id: &Self::JobId, - job: RecursionTipWitnessGeneratorJob, - started_at: Instant, - ) -> tokio::task::JoinHandle> { - tokio::task::spawn_blocking(move || Ok(Self::process_job_sync(job, started_at))) - } - - #[tracing::instrument( - skip_all, - fields(l1_batch = %job_id) - )] - async fn save_result( - &self, - job_id: L1BatchNumber, - started_at: Instant, - artifacts: RecursionTipArtifacts, - ) -> anyhow::Result<()> { - let key = FriCircuitKey { - block_number: job_id, - circuit_id: 255, - sequence_number: 0, - depth: 0, - aggregation_round: AggregationRound::RecursionTip, - }; - let blob_save_started_at = Instant::now(); - - let recursion_tip_circuit_blob_url = self - .object_store - .put( - key, - &CircuitWrapper::Recursive(artifacts.recursion_tip_circuit), - ) - .await?; - - WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::RecursionTip.into()] - .observe(blob_save_started_at.elapsed()); - - let mut prover_connection = self.prover_connection_pool.connection().await?; - let mut transaction = prover_connection.start_transaction().await?; - let protocol_version_id = transaction - .fri_witness_generator_dal() - .protocol_version_for_l1_batch(job_id) - .await; - transaction - .fri_prover_jobs_dal() - .insert_prover_job( - job_id, - ZkSyncRecursionLayerStorageType::RecursionTipCircuit as u8, - 0, - 0, - AggregationRound::RecursionTip, - &recursion_tip_circuit_blob_url, - false, - protocol_version_id, - ) - .await; - - transaction - .fri_witness_generator_dal() - .mark_recursion_tip_job_as_successful(job_id, started_at.elapsed()) - .await; - - transaction.commit().await?; - Ok(()) - } - - fn max_attempts(&self) -> u32 { - self.config.max_attempts - } - - async fn get_job_attempts(&self, job_id: &L1BatchNumber) -> anyhow::Result { - let mut prover_storage = self - .prover_connection_pool - .connection() - .await - .context("failed to acquire DB connection for RecursionTipWitnessGenerator")?; - prover_storage - .fri_witness_generator_dal() - .get_recursion_tip_witness_job_attempts(*job_id) - .await - .map(|attempts| attempts.unwrap_or(0)) - .context("failed to get job attempts for RecursionTipWitnessGenerator") - } -} - #[tracing::instrument( skip_all, fields(l1_batch = %l1_batch_number) @@ -292,7 +147,8 @@ pub async fn prepare_job( ) -> anyhow::Result { let started_at = Instant::now(); let recursion_tip_proofs = - load_proofs_for_recursion_tip(final_node_proof_job_ids, object_store).await?; + RecursionTipWitnessGenerator::get_artifacts(&final_node_proof_job_ids, object_store) + .await?; WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::RecursionTip.into()] .observe(started_at.elapsed()); diff --git a/prover/crates/bin/witness_generator/src/scheduler/artifacts.rs b/prover/crates/bin/witness_generator/src/scheduler/artifacts.rs new file mode 100644 index 000000000000..b20a97641887 --- /dev/null +++ b/prover/crates/bin/witness_generator/src/scheduler/artifacts.rs @@ -0,0 +1,94 @@ +use std::time::Instant; + +use async_trait::async_trait; +use circuit_definitions::circuit_definitions::recursion_layer::ZkSyncRecursionLayerStorageType; +use zksync_object_store::ObjectStore; +use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; +use zksync_prover_fri_types::{keys::FriCircuitKey, CircuitWrapper, FriProofWrapper}; +use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; + +use crate::{ + artifacts::{ArtifactsManager, BlobUrls}, + scheduler::{SchedulerArtifacts, SchedulerWitnessGenerator}, +}; + +#[async_trait] +impl ArtifactsManager for SchedulerWitnessGenerator { + type InputMetadata = u32; + type InputArtifacts = FriProofWrapper; + type OutputArtifacts = SchedulerArtifacts; + + async fn get_artifacts( + metadata: &Self::InputMetadata, + object_store: &dyn ObjectStore, + ) -> anyhow::Result { + let artifacts = object_store.get(*metadata).await?; + + Ok(artifacts) + } + + async fn save_artifacts( + job_id: u32, + artifacts: Self::OutputArtifacts, + object_store: &dyn ObjectStore, + ) -> BlobUrls { + let key = FriCircuitKey { + block_number: L1BatchNumber(job_id), + circuit_id: 1, + sequence_number: 0, + depth: 0, + aggregation_round: AggregationRound::Scheduler, + }; + + let blob_url = object_store + .put( + key, + &CircuitWrapper::Recursive(artifacts.scheduler_circuit.clone()), + ) + .await + .unwrap(); + + BlobUrls::Url(blob_url) + } + + async fn update_database( + connection_pool: &ConnectionPool, + job_id: u32, + started_at: Instant, + blob_urls: BlobUrls, + _artifacts: Self::OutputArtifacts, + ) -> anyhow::Result<()> { + let blob_url = match blob_urls { + BlobUrls::Url(url) => url, + _ => panic!("Unexpected blob urls type"), + }; + + let mut prover_connection = connection_pool.connection().await?; + let mut transaction = prover_connection.start_transaction().await?; + let protocol_version_id = transaction + .fri_witness_generator_dal() + .protocol_version_for_l1_batch(L1BatchNumber(job_id)) + .await; + transaction + .fri_prover_jobs_dal() + .insert_prover_job( + L1BatchNumber(job_id), + ZkSyncRecursionLayerStorageType::SchedulerCircuit as u8, + 0, + 0, + AggregationRound::Scheduler, + &blob_url, + false, + protocol_version_id, + ) + .await; + + transaction + .fri_witness_generator_dal() + .mark_scheduler_job_as_successful(L1BatchNumber(job_id), started_at.elapsed()) + .await; + + transaction.commit().await?; + Ok(()) + } +} diff --git a/prover/crates/bin/witness_generator/src/scheduler/job_processor.rs b/prover/crates/bin/witness_generator/src/scheduler/job_processor.rs new file mode 100644 index 000000000000..fe4f2db4090a --- /dev/null +++ b/prover/crates/bin/witness_generator/src/scheduler/job_processor.rs @@ -0,0 +1,129 @@ +use std::time::Instant; + +use anyhow::Context as _; +use async_trait::async_trait; +use zksync_prover_dal::ProverDal; +use zksync_prover_fri_types::get_current_pod_name; +use zksync_queued_job_processor::JobProcessor; +use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; + +use crate::{ + artifacts::ArtifactsManager, + metrics::WITNESS_GENERATOR_METRICS, + scheduler::{ + prepare_job, SchedulerArtifacts, SchedulerWitnessGenerator, SchedulerWitnessGeneratorJob, + }, +}; + +#[async_trait] +impl JobProcessor for SchedulerWitnessGenerator { + type Job = SchedulerWitnessGeneratorJob; + type JobId = L1BatchNumber; + type JobArtifacts = SchedulerArtifacts; + + const SERVICE_NAME: &'static str = "fri_scheduler_witness_generator"; + + async fn get_next_job(&self) -> anyhow::Result> { + let mut prover_connection = self.prover_connection_pool.connection().await?; + let pod_name = get_current_pod_name(); + let Some(l1_batch_number) = prover_connection + .fri_witness_generator_dal() + .get_next_scheduler_witness_job(self.protocol_version, &pod_name) + .await + else { + return Ok(None); + }; + let recursion_tip_job_id = prover_connection + .fri_prover_jobs_dal() + .get_recursion_tip_proof_job_id(l1_batch_number) + .await + .context(format!( + "could not find recursion tip proof for l1 batch {}", + l1_batch_number + ))?; + + Ok(Some(( + l1_batch_number, + prepare_job( + l1_batch_number, + recursion_tip_job_id, + &*self.object_store, + self.keystore.clone(), + ) + .await + .context("prepare_job()")?, + ))) + } + + async fn save_failure(&self, job_id: L1BatchNumber, _started_at: Instant, error: String) -> () { + self.prover_connection_pool + .connection() + .await + .unwrap() + .fri_witness_generator_dal() + .mark_scheduler_job_failed(&error, job_id) + .await; + } + + #[allow(clippy::async_yields_async)] + async fn process_job( + &self, + _job_id: &Self::JobId, + job: SchedulerWitnessGeneratorJob, + started_at: Instant, + ) -> tokio::task::JoinHandle> { + tokio::task::spawn_blocking(move || { + let block_number = job.block_number; + let _span = tracing::info_span!("scheduler", %block_number).entered(); + Ok(Self::process_job_sync(job, started_at)) + }) + } + + #[tracing::instrument( + skip_all, + fields(l1_batch = %job_id) + )] + async fn save_result( + &self, + job_id: L1BatchNumber, + started_at: Instant, + artifacts: SchedulerArtifacts, + ) -> anyhow::Result<()> { + let blob_save_started_at = Instant::now(); + + let blob_urls = + Self::save_artifacts(job_id.0, artifacts.clone(), &*self.object_store).await; + + WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::Scheduler.into()] + .observe(blob_save_started_at.elapsed()); + + Self::update_database( + &self.prover_connection_pool, + job_id.0, + started_at, + blob_urls, + artifacts, + ) + .await?; + + Ok(()) + } + + fn max_attempts(&self) -> u32 { + self.config.max_attempts + } + + async fn get_job_attempts(&self, job_id: &L1BatchNumber) -> anyhow::Result { + let mut prover_storage = self + .prover_connection_pool + .connection() + .await + .context("failed to acquire DB connection for SchedulerWitnessGenerator")?; + prover_storage + .fri_witness_generator_dal() + .get_scheduler_witness_job_attempts(*job_id) + .await + .map(|attempts| attempts.unwrap_or(0)) + .context("failed to get job attempts for SchedulerWitnessGenerator") + } +} diff --git a/prover/crates/bin/witness_generator/src/scheduler.rs b/prover/crates/bin/witness_generator/src/scheduler/mod.rs similarity index 54% rename from prover/crates/bin/witness_generator/src/scheduler.rs rename to prover/crates/bin/witness_generator/src/scheduler/mod.rs index c6e43582bbdb..10230b35c4f6 100644 --- a/prover/crates/bin/witness_generator/src/scheduler.rs +++ b/prover/crates/bin/witness_generator/src/scheduler/mod.rs @@ -1,13 +1,12 @@ use std::{convert::TryInto, sync::Arc, time::Instant}; use anyhow::Context as _; -use async_trait::async_trait; use zkevm_test_harness::zkevm_circuits::recursion::{ leaf_layer::input::RecursionLeafParametersWitness, NUM_BASE_LAYER_CIRCUITS, }; use zksync_config::configs::FriWitnessGeneratorConfig; use zksync_object_store::ObjectStore; -use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; +use zksync_prover_dal::{ConnectionPool, Prover}; use zksync_prover_fri_types::{ circuit_definitions::{ boojum::{ @@ -21,18 +20,22 @@ use zksync_prover_fri_types::{ recursion_layer_proof_config, zkevm_circuits::scheduler::{input::SchedulerCircuitInstanceWitness, SchedulerConfig}, }, - get_current_pod_name, - keys::FriCircuitKey, - CircuitWrapper, FriProofWrapper, + FriProofWrapper, }; use zksync_prover_keystore::{keystore::Keystore, utils::get_leaf_vk_params}; -use zksync_queued_job_processor::JobProcessor; use zksync_types::{ basic_fri_types::AggregationRound, protocol_version::ProtocolSemanticVersion, L1BatchNumber, }; -use crate::{metrics::WITNESS_GENERATOR_METRICS, utils::SchedulerPartialInputWrapper}; +use crate::{ + artifacts::ArtifactsManager, metrics::WITNESS_GENERATOR_METRICS, + utils::SchedulerPartialInputWrapper, +}; + +mod artifacts; +mod job_processor; +#[derive(Clone)] pub struct SchedulerArtifacts { pub scheduler_circuit: ZkSyncRecursiveLayerCircuit, } @@ -121,143 +124,6 @@ impl SchedulerWitnessGenerator { } } -#[async_trait] -impl JobProcessor for SchedulerWitnessGenerator { - type Job = SchedulerWitnessGeneratorJob; - type JobId = L1BatchNumber; - type JobArtifacts = SchedulerArtifacts; - - const SERVICE_NAME: &'static str = "fri_scheduler_witness_generator"; - - async fn get_next_job(&self) -> anyhow::Result> { - let mut prover_connection = self.prover_connection_pool.connection().await?; - let pod_name = get_current_pod_name(); - let Some(l1_batch_number) = prover_connection - .fri_witness_generator_dal() - .get_next_scheduler_witness_job(self.protocol_version, &pod_name) - .await - else { - return Ok(None); - }; - let recursion_tip_job_id = prover_connection - .fri_prover_jobs_dal() - .get_recursion_tip_proof_job_id(l1_batch_number) - .await - .context(format!( - "could not find recursion tip proof for l1 batch {}", - l1_batch_number - ))?; - - Ok(Some(( - l1_batch_number, - prepare_job( - l1_batch_number, - recursion_tip_job_id, - &*self.object_store, - self.keystore.clone(), - ) - .await - .context("prepare_job()")?, - ))) - } - - async fn save_failure(&self, job_id: L1BatchNumber, _started_at: Instant, error: String) -> () { - self.prover_connection_pool - .connection() - .await - .unwrap() - .fri_witness_generator_dal() - .mark_scheduler_job_failed(&error, job_id) - .await; - } - - #[allow(clippy::async_yields_async)] - async fn process_job( - &self, - _job_id: &Self::JobId, - job: SchedulerWitnessGeneratorJob, - started_at: Instant, - ) -> tokio::task::JoinHandle> { - tokio::task::spawn_blocking(move || { - let block_number = job.block_number; - let _span = tracing::info_span!("scheduler", %block_number).entered(); - Ok(Self::process_job_sync(job, started_at)) - }) - } - - #[tracing::instrument( - skip_all, - fields(l1_batch = %job_id) - )] - async fn save_result( - &self, - job_id: L1BatchNumber, - started_at: Instant, - artifacts: SchedulerArtifacts, - ) -> anyhow::Result<()> { - let key = FriCircuitKey { - block_number: job_id, - circuit_id: 1, - sequence_number: 0, - depth: 0, - aggregation_round: AggregationRound::Scheduler, - }; - let blob_save_started_at = Instant::now(); - let scheduler_circuit_blob_url = self - .object_store - .put(key, &CircuitWrapper::Recursive(artifacts.scheduler_circuit)) - .await?; - WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::Scheduler.into()] - .observe(blob_save_started_at.elapsed()); - - let mut prover_connection = self.prover_connection_pool.connection().await?; - let mut transaction = prover_connection.start_transaction().await?; - let protocol_version_id = transaction - .fri_witness_generator_dal() - .protocol_version_for_l1_batch(job_id) - .await; - transaction - .fri_prover_jobs_dal() - .insert_prover_job( - job_id, - ZkSyncRecursionLayerStorageType::SchedulerCircuit as u8, - 0, - 0, - AggregationRound::Scheduler, - &scheduler_circuit_blob_url, - false, - protocol_version_id, - ) - .await; - - transaction - .fri_witness_generator_dal() - .mark_scheduler_job_as_successful(job_id, started_at.elapsed()) - .await; - - transaction.commit().await?; - Ok(()) - } - - fn max_attempts(&self) -> u32 { - self.config.max_attempts - } - - async fn get_job_attempts(&self, job_id: &L1BatchNumber) -> anyhow::Result { - let mut prover_storage = self - .prover_connection_pool - .connection() - .await - .context("failed to acquire DB connection for SchedulerWitnessGenerator")?; - prover_storage - .fri_witness_generator_dal() - .get_scheduler_witness_job_attempts(*job_id) - .await - .map(|attempts| attempts.unwrap_or(0)) - .context("failed to get job attempts for SchedulerWitnessGenerator") - } -} - #[tracing::instrument( skip_all, fields(l1_batch = %l1_batch_number) @@ -269,7 +135,8 @@ pub async fn prepare_job( keystore: Keystore, ) -> anyhow::Result { let started_at = Instant::now(); - let wrapper = object_store.get(recursion_tip_job_id).await?; + let wrapper = + SchedulerWitnessGenerator::get_artifacts(&recursion_tip_job_id, object_store).await?; let recursion_tip_proof = match wrapper { FriProofWrapper::Base(_) => Err(anyhow::anyhow!( "Expected only recursive proofs for scheduler l1 batch {l1_batch_number}, got Base" diff --git a/prover/crates/bin/witness_generator/src/utils.rs b/prover/crates/bin/witness_generator/src/utils.rs index f8656ac90f44..3ea2b539773f 100644 --- a/prover/crates/bin/witness_generator/src/utils.rs +++ b/prover/crates/bin/witness_generator/src/utils.rs @@ -1,21 +1,14 @@ use std::{ - collections::HashMap, io::{BufWriter, Write as _}, sync::Arc, }; use circuit_definitions::{ - circuit_definitions::{ - base_layer::ZkSyncBaseLayerCircuit, - recursion_layer::{ZkSyncRecursionLayerStorageType, ZkSyncRecursionProof}, - }, + circuit_definitions::base_layer::ZkSyncBaseLayerCircuit, encodings::memory_query::MemoryQueueStateWitnesses, }; use once_cell::sync::Lazy; -use zkevm_test_harness::{ - boojum::field::goldilocks::GoldilocksField, empty_node_proof, - zkevm_circuits::scheduler::aux::BaseLayerCircuitType, -}; +use zkevm_test_harness::boojum::field::goldilocks::GoldilocksField; use zksync_multivm::utils::get_used_bootloader_memory_bytes; use zksync_object_store::{serialize_using_bincode, Bucket, ObjectStore, StoredObject}; use zksync_prover_fri_types::{ @@ -248,54 +241,3 @@ pub async fn load_proofs_for_job_ids( .map(|x| x.unwrap()) .collect() } - -/// Loads all proofs for a given recursion tip's job ids. -/// Note that recursion tip may not have proofs for some specific circuits (because the batch didn't contain them). -/// In this scenario, we still need to pass a proof, but it won't be taken into account during proving. -/// For this scenario, we use an empty_proof, but any proof would suffice. -#[tracing::instrument(skip_all)] -pub async fn load_proofs_for_recursion_tip( - job_ids: Vec<(u8, u32)>, - object_store: &dyn ObjectStore, -) -> anyhow::Result> { - let job_mapping: HashMap = job_ids - .into_iter() - .map(|(leaf_circuit_id, job_id)| { - ( - ZkSyncRecursionLayerStorageType::from_leaf_u8_to_basic_u8(leaf_circuit_id), - job_id, - ) - }) - .collect(); - - let empty_proof = empty_node_proof().into_inner(); - - let mut proofs = Vec::new(); - for circuit_id in BaseLayerCircuitType::as_iter_u8() { - if job_mapping.contains_key(&circuit_id) { - let fri_proof_wrapper = object_store - .get(*job_mapping.get(&circuit_id).unwrap()) - .await - .unwrap_or_else(|_| { - panic!( - "Failed to load proof with circuit_id {} for recursion tip", - circuit_id - ) - }); - match fri_proof_wrapper { - FriProofWrapper::Base(_) => { - return Err(anyhow::anyhow!( - "Expected only recursive proofs for recursion tip, got Base for circuit {}", - circuit_id - )); - } - FriProofWrapper::Recursive(recursive_proof) => { - proofs.push(recursive_proof.into_inner()); - } - } - } else { - proofs.push(empty_proof.clone()); - } - } - Ok(proofs) -} From ccf1b6352f6db56bcb4b67d53564a3919532efeb Mon Sep 17 00:00:00 2001 From: Danil Date: Thu, 12 Sep 2024 15:47:47 +0200 Subject: [PATCH 066/116] fix(zk-toolbox): use chain admin for bridgehub (#2857) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --------- Signed-off-by: Danil --- contracts | 2 +- zk_toolbox/crates/config/src/contracts.rs | 1 + .../deploy_ecosystem/output.rs | 1 + .../src/commands/ecosystem/init.rs | 37 ++++++++++++++++++- .../zk_supervisor/src/commands/test/rust.rs | 4 +- 5 files changed, 41 insertions(+), 4 deletions(-) diff --git a/contracts b/contracts index 73b20c4b972f..3a1b5d4b94ff 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 73b20c4b972f575613b4054d238332f93f2685cc +Subproject commit 3a1b5d4b94ffb00f03d436a7db7e48589eb74d39 diff --git a/zk_toolbox/crates/config/src/contracts.rs b/zk_toolbox/crates/config/src/contracts.rs index 19d432909487..0d4b1c7b1f81 100644 --- a/zk_toolbox/crates/config/src/contracts.rs +++ b/zk_toolbox/crates/config/src/contracts.rs @@ -69,6 +69,7 @@ impl ContractsConfig { self.ecosystem_contracts .diamond_cut_data .clone_from(&deploy_l1_output.contracts_config.diamond_cut_data); + self.l1.chain_admin_addr = deploy_l1_output.deployed_addresses.chain_admin; } pub fn set_chain_contracts(&mut self, register_chain_output: &RegisterChainOutput) { diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/output.rs b/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/output.rs index bf9292e9ba30..7f35cf0357c2 100644 --- a/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/output.rs +++ b/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/output.rs @@ -44,6 +44,7 @@ pub struct DeployL1DeployedAddressesOutput { pub governance_addr: Address, pub transparent_proxy_admin_addr: Address, pub validator_timelock_addr: Address, + pub chain_admin: Address, pub bridgehub: L1BridgehubOutput, pub bridges: L1BridgesOutput, pub state_transition: L1StateTransitionOutput, diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs index 0862d1018d89..7d34437ef2d2 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs @@ -36,7 +36,7 @@ use super::{ setup_observability, }; use crate::{ - accept_ownership::accept_owner, + accept_ownership::{accept_admin, accept_owner}, commands::{ chain::{self, args::init::PortOffset}, ecosystem::create_configs::{ @@ -332,6 +332,17 @@ async fn deploy_ecosystem_inner( ) .await?; + accept_admin( + shell, + config, + contracts_config.l1.chain_admin_addr, + config.get_wallets()?.governor_private_key(), + contracts_config.ecosystem_contracts.bridgehub_proxy_addr, + &forge_args, + l1_rpc_url.clone(), + ) + .await?; + accept_owner( shell, config, @@ -343,6 +354,17 @@ async fn deploy_ecosystem_inner( ) .await?; + accept_admin( + shell, + config, + contracts_config.l1.chain_admin_addr, + config.get_wallets()?.governor_private_key(), + contracts_config.bridges.shared.l1_address, + &forge_args, + l1_rpc_url.clone(), + ) + .await?; + accept_owner( shell, config, @@ -356,6 +378,19 @@ async fn deploy_ecosystem_inner( ) .await?; + accept_admin( + shell, + config, + contracts_config.l1.chain_admin_addr, + config.get_wallets()?.governor_private_key(), + contracts_config + .ecosystem_contracts + .state_transition_proxy_addr, + &forge_args, + l1_rpc_url.clone(), + ) + .await?; + Ok(contracts_config) } diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/rust.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/rust.rs index c42f95e8e3b5..3ac331becc9f 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/rust.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/rust.rs @@ -12,8 +12,8 @@ use crate::{ dals::{Dal, CORE_DAL_PATH, PROVER_DAL_PATH}, defaults::{TEST_DATABASE_PROVER_URL, TEST_DATABASE_SERVER_URL}, messages::{ - MSG_CHAIN_NOT_FOUND_ERR, MSG_POSTGRES_CONFIG_NOT_FOUND_ERR, - MSG_RESETTING_TEST_DATABASES, MSG_UNIT_TESTS_RUN_SUCCESS, MSG_USING_CARGO_NEXTEST, + MSG_CHAIN_NOT_FOUND_ERR, MSG_POSTGRES_CONFIG_NOT_FOUND_ERR, MSG_RESETTING_TEST_DATABASES, + MSG_UNIT_TESTS_RUN_SUCCESS, MSG_USING_CARGO_NEXTEST, }, }; From fee0c2ad08a5ab4a04252765b367eb9fbb1f3db7 Mon Sep 17 00:00:00 2001 From: Joonatan Saarhelo Date: Thu, 12 Sep 2024 15:00:59 +0100 Subject: [PATCH 067/116] fix: count SECP256 precompile to account validation gas limit as well (#2859) Account validation counts all the precompile's extra gas costs. This PR adds a missing precompile. --- core/lib/constants/src/contracts.rs | 5 +++++ core/lib/multivm/src/versions/vm_latest/tracers/utils.rs | 4 +++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/core/lib/constants/src/contracts.rs b/core/lib/constants/src/contracts.rs index 44bb05a89764..73b4a0ffaaa2 100644 --- a/core/lib/constants/src/contracts.rs +++ b/core/lib/constants/src/contracts.rs @@ -100,6 +100,11 @@ pub const SHA256_PRECOMPILE_ADDRESS: Address = H160([ 0x00, 0x00, 0x00, 0x02, ]); +pub const SECP256R1_VERIFY_PRECOMPILE_ADDRESS: Address = H160([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x01, 0x00, +]); + pub const EC_ADD_PRECOMPILE_ADDRESS: Address = H160([ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/utils.rs b/core/lib/multivm/src/versions/vm_latest/tracers/utils.rs index 1ecb75c28071..0a11f5d3f849 100644 --- a/core/lib/multivm/src/versions/vm_latest/tracers/utils.rs +++ b/core/lib/multivm/src/versions/vm_latest/tracers/utils.rs @@ -6,7 +6,8 @@ use zk_evm_1_5_0::{ }, }; use zksync_system_constants::{ - ECRECOVER_PRECOMPILE_ADDRESS, KECCAK256_PRECOMPILE_ADDRESS, SHA256_PRECOMPILE_ADDRESS, + ECRECOVER_PRECOMPILE_ADDRESS, KECCAK256_PRECOMPILE_ADDRESS, + SECP256R1_VERIFY_PRECOMPILE_ADDRESS, SHA256_PRECOMPILE_ADDRESS, }; use zksync_types::U256; use zksync_utils::u256_to_h256; @@ -187,6 +188,7 @@ pub(crate) fn computational_gas_price( if address == KECCAK256_PRECOMPILE_ADDRESS || address == SHA256_PRECOMPILE_ADDRESS || address == ECRECOVER_PRECOMPILE_ADDRESS + || address == SECP256R1_VERIFY_PRECOMPILE_ADDRESS { data.src1_value.value.low_u32() } else { From 3609ea6f1b6d767c0f1d64a9303f0331db920931 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Thu, 12 Sep 2024 18:43:53 +0300 Subject: [PATCH 068/116] chore(vm): Bump `zksync_vm2` revision (#2838) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Updates the fast VM revision to incorporate latest changes. ## Why ❔ Mainly to check that these changes work. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- Cargo.lock | 42 ++++----- Cargo.toml | 2 +- core/lib/multivm/Cargo.toml | 2 +- .../src/versions/vm_fast/circuits_tracer.rs | 2 +- .../multivm/src/versions/vm_fast/events.rs | 2 +- core/lib/multivm/src/versions/vm_fast/glue.rs | 6 +- .../src/versions/vm_fast/tests/bootloader.rs | 6 +- .../src/versions/vm_fast/tests/code_oracle.rs | 2 +- .../src/versions/vm_fast/tests/default_aa.rs | 4 +- .../src/versions/vm_fast/tests/gas_limit.rs | 2 +- .../versions/vm_fast/tests/l1_tx_execution.rs | 2 +- .../versions/vm_fast/tests/require_eip712.rs | 2 +- .../src/versions/vm_fast/tests/rollbacks.rs | 2 +- .../tests/tester/transaction_test_info.rs | 18 ++-- .../vm_fast/tests/tester/vm_tester.rs | 6 +- .../src/versions/vm_fast/tests/transfer.rs | 2 +- .../src/versions/vm_fast/tests/upgrade.rs | 4 +- .../src/versions/vm_fast/tests/utils.rs | 8 +- core/lib/multivm/src/versions/vm_fast/vm.rs | 90 +++++++++++-------- prover/Cargo.lock | 42 ++++----- .../src/gpu_prover_job_processor.rs | 5 +- 21 files changed, 133 insertions(+), 118 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9d7b19b424bc..59b464f8501d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2177,14 +2177,6 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" -[[package]] -name = "eravm-stable-interface" -version = "0.1.0" -source = "git+https://github.com/matter-labs/vm2.git?rev=4ef15d46410ffc11744771a3a6c7c09dd9470c90#4ef15d46410ffc11744771a3a6c7c09dd9470c90" -dependencies = [ - "primitive-types", -] - [[package]] name = "errno" version = "0.3.9" @@ -8356,18 +8348,6 @@ dependencies = [ "zksync_vlog", ] -[[package]] -name = "vm2" -version = "0.1.0" -source = "git+https://github.com/matter-labs/vm2.git?rev=4ef15d46410ffc11744771a3a6c7c09dd9470c90#4ef15d46410ffc11744771a3a6c7c09dd9470c90" -dependencies = [ - "enum_dispatch", - "eravm-stable-interface", - "primitive-types", - "zk_evm_abstractions 0.150.5", - "zkevm_opcode_defs 0.150.5", -] - [[package]] name = "walkdir" version = "2.4.0" @@ -10156,7 +10136,6 @@ dependencies = [ "tokio", "tracing", "vise", - "vm2", "zk_evm 0.131.0-rc.2", "zk_evm 0.133.0", "zk_evm 0.140.0", @@ -10168,6 +10147,7 @@ dependencies = [ "zksync_test_account", "zksync_types", "zksync_utils", + "zksync_vm2", "zksync_vm_interface", ] @@ -10966,6 +10946,26 @@ dependencies = [ "vise-exporter", ] +[[package]] +name = "zksync_vm2" +version = "0.1.0" +source = "git+https://github.com/matter-labs/vm2.git?rev=cd6136c42ec56856e0abcf2a98d1a9e120161482#cd6136c42ec56856e0abcf2a98d1a9e120161482" +dependencies = [ + "enum_dispatch", + "primitive-types", + "zk_evm_abstractions 0.150.5", + "zkevm_opcode_defs 0.150.5", + "zksync_vm2_interface", +] + +[[package]] +name = "zksync_vm2_interface" +version = "0.1.0" +source = "git+https://github.com/matter-labs/vm2.git?rev=cd6136c42ec56856e0abcf2a98d1a9e120161482#cd6136c42ec56856e0abcf2a98d1a9e120161482" +dependencies = [ + "primitive-types", +] + [[package]] name = "zksync_vm_executor" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index 6ee04692d8c7..5eb862f0bcb7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -226,7 +226,7 @@ zk_evm_1_4_1 = { package = "zk_evm", version = "0.141" } zk_evm_1_5_0 = { package = "zk_evm", version = "=0.150.5" } # New VM; pinned to a specific commit because of instability -vm2 = { git = "https://github.com/matter-labs/vm2.git", rev = "4ef15d46410ffc11744771a3a6c7c09dd9470c90" } +zksync_vm2 = { git = "https://github.com/matter-labs/vm2.git", rev = "cd6136c42ec56856e0abcf2a98d1a9e120161482" } # Consensus dependencies. zksync_concurrency = "=0.1.1" diff --git a/core/lib/multivm/Cargo.toml b/core/lib/multivm/Cargo.toml index 4711eefa0d6c..5e76c10f53e7 100644 --- a/core/lib/multivm/Cargo.toml +++ b/core/lib/multivm/Cargo.toml @@ -16,7 +16,7 @@ zk_evm_1_4_1.workspace = true zk_evm_1_4_0.workspace = true zk_evm_1_3_3.workspace = true zk_evm_1_3_1.workspace = true -vm2.workspace = true +zksync_vm2.workspace = true circuit_sequencer_api_1_3_3.workspace = true circuit_sequencer_api_1_4_0.workspace = true diff --git a/core/lib/multivm/src/versions/vm_fast/circuits_tracer.rs b/core/lib/multivm/src/versions/vm_fast/circuits_tracer.rs index 061d91be60b7..de6ead71e655 100644 --- a/core/lib/multivm/src/versions/vm_fast/circuits_tracer.rs +++ b/core/lib/multivm/src/versions/vm_fast/circuits_tracer.rs @@ -1,5 +1,5 @@ use circuit_sequencer_api_1_5_0::{geometry_config::get_geometry_config, toolset::GeometryConfig}; -use vm2::{CycleStats, Opcode, OpcodeType, StateInterface, Tracer}; +use zksync_vm2::{CycleStats, Opcode, OpcodeType, StateInterface, Tracer}; use zksync_vm_interface::CircuitStatistic; use crate::vm_latest::tracers::circuits_capacity::*; diff --git a/core/lib/multivm/src/versions/vm_fast/events.rs b/core/lib/multivm/src/versions/vm_fast/events.rs index 798a1e12bdd8..2312c3d97b40 100644 --- a/core/lib/multivm/src/versions/vm_fast/events.rs +++ b/core/lib/multivm/src/versions/vm_fast/events.rs @@ -1,6 +1,6 @@ -use vm2::Event; use zksync_types::{L1BatchNumber, H256}; use zksync_utils::h256_to_account_address; +use zksync_vm2::Event; use crate::interface::VmEvent; diff --git a/core/lib/multivm/src/versions/vm_fast/glue.rs b/core/lib/multivm/src/versions/vm_fast/glue.rs index cbf22f9122b0..f24c82af11e9 100644 --- a/core/lib/multivm/src/versions/vm_fast/glue.rs +++ b/core/lib/multivm/src/versions/vm_fast/glue.rs @@ -3,9 +3,9 @@ use zksync_utils::u256_to_h256; use crate::glue::GlueFrom; -impl GlueFrom<&vm2::L2ToL1Log> for SystemL2ToL1Log { - fn glue_from(value: &vm2::L2ToL1Log) -> Self { - let vm2::L2ToL1Log { +impl GlueFrom<&zksync_vm2::L2ToL1Log> for SystemL2ToL1Log { + fn glue_from(value: &zksync_vm2::L2ToL1Log) -> Self { + let zksync_vm2::L2ToL1Log { key, value, is_service, diff --git a/core/lib/multivm/src/versions/vm_fast/tests/bootloader.rs b/core/lib/multivm/src/versions/vm_fast/tests/bootloader.rs index 8e1a273bc7b1..5c1158a5909d 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/bootloader.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/bootloader.rs @@ -1,5 +1,6 @@ use assert_matches::assert_matches; use zksync_types::U256; +use zksync_vm2::HeapId; use crate::{ interface::{ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterfaceExt}, @@ -25,10 +26,7 @@ fn test_dummy_bootloader() { let correct_first_cell = U256::from_str_radix("123123123", 16).unwrap(); - verify_required_memory( - &vm.vm.inner.state, - vec![(correct_first_cell, vm2::FIRST_HEAP, 0)], - ); + verify_required_memory(&vm.vm.inner, vec![(correct_first_cell, HeapId::FIRST, 0)]); } #[test] diff --git a/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs b/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs index 836603d77d87..caea07617ddb 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs @@ -210,7 +210,7 @@ fn refunds_in_code_oracle() { let account = &mut vm.rich_accounts[0]; if decommit { - let (_, is_fresh) = vm.vm.inner.world_diff.decommit_opcode( + let (_, is_fresh) = vm.vm.inner.world_diff_mut().decommit_opcode( &mut vm.vm.world, &mut CircuitsTracer::default(), h256_to_u256(normal_zkevm_bytecode_hash), diff --git a/core/lib/multivm/src/versions/vm_fast/tests/default_aa.rs b/core/lib/multivm/src/versions/vm_fast/tests/default_aa.rs index f809af81b165..c2ce02d39fe1 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/default_aa.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/default_aa.rs @@ -61,7 +61,7 @@ fn test_default_aa_interaction() { verify_required_storage( &expected_slots, &mut vm.vm.world.storage, - vm.vm.inner.world_diff.get_storage_state(), + vm.vm.inner.world_diff().get_storage_state(), ); let expected_fee = maximal_fee @@ -71,7 +71,7 @@ fn test_default_aa_interaction() { AccountTreeId::new(L2_BASE_TOKEN_ADDRESS), &vm.fee_account, &mut vm.vm.world.storage, - vm.vm.inner.world_diff.get_storage_state(), + vm.vm.inner.world_diff().get_storage_state(), ); assert_eq!( diff --git a/core/lib/multivm/src/versions/vm_fast/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_fast/tests/gas_limit.rs index e0c55c5a685a..b7a2154bdc71 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/gas_limit.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/gas_limit.rs @@ -27,7 +27,7 @@ fn test_tx_gas_limit_offset() { vm.vm.push_transaction(tx); - assert!(vm.vm.inner.state.previous_frames.is_empty()); + assert!(!vm.vm.has_previous_far_calls()); let gas_limit_from_memory = vm .vm .read_word_from_bootloader_heap(TX_DESCRIPTION_OFFSET + TX_GAS_LIMIT_OFFSET); diff --git a/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs index f1399a1b4e68..3b58565098d5 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs @@ -82,7 +82,7 @@ fn test_l1_tx_execution() { ] { assert_eq!( expected_value, - vm.vm.inner.world_diff.get_storage_state()[&( + vm.vm.inner.world_diff().get_storage_state()[&( *storage_location.address(), h256_to_u256(*storage_location.key()) )] diff --git a/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs index fe94189ed7cf..68e49b202a93 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs @@ -26,7 +26,7 @@ impl VmTester { ); self.vm .inner - .world_diff + .world_diff() .get_storage_state() .get(&(L2_BASE_TOKEN_ADDRESS, h256_to_u256(*key.key()))) .copied() diff --git a/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs index c530c5af18ea..a677a61c6029 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs @@ -56,7 +56,7 @@ fn test_vm_rollbacks() { TransactionTestInfo::new_rejected(tx_0, TxModifier::NonceReused.into()), ]); - assert_eq!(result_without_rollbacks, result_with_rollbacks); + pretty_assertions::assert_eq!(result_without_rollbacks, result_with_rollbacks); } #[test] diff --git a/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs index 105bc5f2fd43..ce45390260c5 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs @@ -1,3 +1,5 @@ +use std::fmt; + use zksync_types::{ExecuteTransactionCommon, Transaction, H160, U256}; use super::VmTester; @@ -7,7 +9,7 @@ use crate::{ VmExecutionMode, VmExecutionResultAndLogs, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, VmRevertReason, }, - vm_fast::{circuits_tracer::CircuitsTracer, vm::World, Vm}, + vm_fast::Vm, }; #[derive(Debug, Clone)] @@ -186,12 +188,12 @@ impl TransactionTestInfo { // TODO this doesn't include all the state of ModifiedWorld #[derive(Debug)] struct VmStateDump { - state: vm2::State>, + state: S, storage_writes: Vec<((H160, U256), U256)>, - events: Box<[vm2::Event]>, + events: Box<[zksync_vm2::Event]>, } -impl PartialEq for VmStateDump { +impl PartialEq for VmStateDump { fn eq(&self, other: &Self) -> bool { self.state == other.state && self.storage_writes == other.storage_writes @@ -200,17 +202,17 @@ impl PartialEq for VmStateDump { } impl Vm { - fn dump_state(&self) -> VmStateDump { + fn dump_state(&self) -> VmStateDump { VmStateDump { - state: self.inner.state.clone(), + state: self.inner.dump_state(), storage_writes: self .inner - .world_diff + .world_diff() .get_storage_state() .iter() .map(|(k, v)| (*k, *v)) .collect(), - events: self.inner.world_diff.events().into(), + events: self.inner.world_diff().events().into(), } } } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/tester/vm_tester.rs b/core/lib/multivm/src/versions/vm_fast/tests/tester/vm_tester.rs index 335ec752c7d4..8071bcf51d4a 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/tester/vm_tester.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/tester/vm_tester.rs @@ -1,6 +1,5 @@ use std::{cell::RefCell, rc::Rc}; -use vm2::WorldDiff; use zksync_contracts::BaseSystemContracts; use zksync_test_account::{Account, TxType}; use zksync_types::{ @@ -13,6 +12,7 @@ use zksync_types::{ StorageKey, U256, }; use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; +use zksync_vm2::WorldDiff; use crate::{ interface::{ @@ -53,7 +53,7 @@ impl VmTester { pub(crate) fn reset_with_empty_storage(&mut self) { self.storage = Rc::new(RefCell::new(get_empty_storage())); - self.vm.inner.world_diff = WorldDiff::default(); + *self.vm.inner.world_diff_mut() = WorldDiff::default(); self.reset_state(false); } @@ -78,7 +78,7 @@ impl VmTester { { let mut storage = storage.borrow_mut(); // Commit pending storage changes (old VM versions commit them on successful execution) - for (&(address, slot), &value) in self.vm.inner.world_diff.get_storage_state() { + for (&(address, slot), &value) in self.vm.inner.world_diff().get_storage_state() { let key = StorageKey::new(AccountTreeId::new(address), u256_to_h256(slot)); storage.set_value(key, u256_to_h256(value)); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/transfer.rs b/core/lib/multivm/src/versions/vm_fast/tests/transfer.rs index 3327012801ce..57877854031d 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/transfer.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/transfer.rs @@ -92,7 +92,7 @@ fn test_send_or_transfer(test_option: TestOptions) { AccountTreeId::new(L2_BASE_TOKEN_ADDRESS), &recipient_address, &mut vm.vm.world.storage, - vm.vm.inner.world_diff.get_storage_state(), + vm.vm.inner.world_diff().get_storage_state(), ); assert_eq!(new_recipient_balance, value); diff --git a/core/lib/multivm/src/versions/vm_fast/tests/upgrade.rs b/core/lib/multivm/src/versions/vm_fast/tests/upgrade.rs index f972b29cda8a..dd25c2097405 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/upgrade.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/upgrade.rs @@ -164,7 +164,7 @@ fn test_force_deploy_upgrade() { verify_required_storage( &expected_slots, &mut *vm.storage.borrow_mut(), - vm.vm.inner.world_diff.get_storage_state(), + vm.vm.inner.world_diff().get_storage_state(), ); } @@ -223,7 +223,7 @@ fn test_complex_upgrader() { verify_required_storage( &expected_slots, &mut *vm.storage.borrow_mut(), - vm.vm.inner.world_diff.get_storage_state(), + vm.vm.inner.world_diff().get_storage_state(), ); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/utils.rs b/core/lib/multivm/src/versions/vm_fast/tests/utils.rs index d75ae12c30c1..d91e13076514 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/utils.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/utils.rs @@ -2,7 +2,6 @@ use std::collections::BTreeMap; use ethabi::Contract; use once_cell::sync::Lazy; -use vm2::{instruction_handlers::HeapInterface, HeapId, State}; use zksync_contracts::{ load_contract, read_bytecode, read_zbin_bytecode, BaseSystemContracts, SystemContractCode, }; @@ -11,18 +10,19 @@ use zksync_types::{ U256, }; use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; +use zksync_vm2::{HeapId, StateInterface}; use crate::interface::storage::ReadStorage; pub(crate) static BASE_SYSTEM_CONTRACTS: Lazy = Lazy::new(BaseSystemContracts::load_from_disk); -pub(crate) fn verify_required_memory( - state: &State, +pub(crate) fn verify_required_memory( + state: &impl StateInterface, required_values: Vec<(U256, HeapId, u32)>, ) { for (required_value, memory_page, cell) in required_values { - let current_value = state.heaps[memory_page].read_u256(cell * 32); + let current_value = state.read_heap_u256(memory_page, cell * 32); assert_eq!(current_value, required_value); } } diff --git a/core/lib/multivm/src/versions/vm_fast/vm.rs b/core/lib/multivm/src/versions/vm_fast/vm.rs index d8816cfaf2a6..5a73ce49b06c 100644 --- a/core/lib/multivm/src/versions/vm_fast/vm.rs +++ b/core/lib/multivm/src/versions/vm_fast/vm.rs @@ -1,9 +1,5 @@ use std::{collections::HashMap, fmt}; -use vm2::{ - decode::decode_program, fat_pointer::FatPointer, instruction_handlers::HeapInterface, - ExecutionEnd, Program, Settings, Tracer, VirtualMachine, -}; use zk_evm_1_5_0::zkevm_opcode_defs::system_params::INITIAL_FRAME_FORMAL_EH_LOCATION; use zksync_contracts::SystemContractCode; use zksync_types::{ @@ -19,6 +15,10 @@ use zksync_types::{ L2_BASE_TOKEN_ADDRESS, U256, }; use zksync_utils::{bytecode::hash_bytecode, h256_to_u256, u256_to_h256}; +use zksync_vm2::{ + decode::decode_program, CallframeInterface, ExecutionEnd, FatPointer, HeapId, Program, + Settings, StateInterface, Tracer, VirtualMachine, +}; use super::{ bootloader_state::{BootloaderState, BootloaderStateSnapshot}, @@ -79,7 +79,7 @@ impl Vm { operator_suggested_refund: 0, }; let mut last_tx_result = None; - let mut pubdata_before = self.inner.world_diff.pubdata() as u32; + let mut pubdata_before = self.inner.world_diff().pubdata() as u32; let result = loop { let hook = match self.inner.run(&mut self.world, tracer) { @@ -93,7 +93,7 @@ impl Vm { } ExecutionEnd::Panicked => { break ExecutionResult::Halt { - reason: if self.inner.state.current_frame.gas == 0 { + reason: if self.gas_remaining() == 0 { Halt::BootloaderOutOfGas } else { Halt::VMPanic @@ -125,7 +125,7 @@ impl Vm { ) .as_u64(); - let pubdata_published = self.inner.world_diff.pubdata() as u32; + let pubdata_published = self.inner.world_diff().pubdata() as u32; refunds.operator_suggested_refund = compute_refund( &self.batch_env, @@ -161,10 +161,7 @@ impl Vm { let result = self.get_hook_params()[0]; let value = self.get_hook_params()[1]; let fp = FatPointer::from(value); - assert_eq!(fp.offset, 0); - - let return_data = self.inner.state.heaps[fp.memory_page] - .read_range_big_endian(fp.start..fp.start + fp.length); + let return_data = self.read_bytes_from_heap(fp); last_tx_result = Some(if result.is_zero() { ExecutionResult::Revert { @@ -190,7 +187,7 @@ impl Vm { } let events = - merge_events(self.inner.world_diff.events(), self.batch_env.number); + merge_events(self.inner.world_diff().events(), self.batch_env.number); let published_bytecodes = events .iter() @@ -276,7 +273,20 @@ impl Vm { /// Should only be used when the bootloader is executing (e.g., when handling hooks). pub(crate) fn read_word_from_bootloader_heap(&self, word: usize) -> U256 { - self.inner.state.heaps[vm2::FIRST_HEAP].read_u256(word as u32 * 32) + let start_address = word as u32 * 32; + self.inner.read_heap_u256(HeapId::FIRST, start_address) + } + + fn read_bytes_from_heap(&self, ptr: FatPointer) -> Vec { + assert_eq!(ptr.offset, 0); + (ptr.start..ptr.start + ptr.length) + .map(|addr| self.inner.read_heap_byte(ptr.memory_page, addr)) + .collect() + } + + pub(crate) fn has_previous_far_calls(&mut self) -> bool { + let callframe_count = self.inner.number_of_callframes(); + (1..callframe_count).any(|i| !self.inner.callframe(i).is_near_call()) } /// Should only be used when the bootloader is executing (e.g., when handling hooks). @@ -284,12 +294,15 @@ impl Vm { &mut self, memory: impl IntoIterator, ) { - assert!(self.inner.state.previous_frames.is_empty()); + assert!( + !self.has_previous_far_calls(), + "Cannot write to bootloader heap when not in root call frame" + ); + for (slot, value) in memory { + let start_address = slot as u32 * 32; self.inner - .state - .heaps - .write_u256(vm2::FIRST_HEAP, slot as u32 * 32, value); + .write_heap_u256(HeapId::FIRST, start_address, value); } } @@ -317,7 +330,7 @@ impl Vm { } else { compress_bytecodes(&tx.factory_deps, |hash| { self.inner - .world_diff + .world_diff() .get_storage_state() .get(&(KNOWN_CODES_STORAGE_ADDRESS, h256_to_u256(hash))) .map(|x| !x.is_zero()) @@ -351,7 +364,7 @@ impl Vm { } let storage = &mut self.world.storage; - let diffs = self.inner.world_diff.get_storage_changes().map( + let diffs = self.inner.world_diff().get_storage_changes().map( move |((address, key), (initial_value, final_value))| { let storage_key = StorageKey::new(AccountTreeId::new(address), u256_to_h256(key)); StateDiffRecord { @@ -375,11 +388,11 @@ impl Vm { } pub(crate) fn decommitted_hashes(&self) -> impl Iterator + '_ { - self.inner.world_diff.decommitted_hashes() + self.inner.world_diff().decommitted_hashes() } - pub(super) fn gas_remaining(&self) -> u32 { - self.inner.state.current_frame.gas + pub(super) fn gas_remaining(&mut self) -> u32 { + self.inner.current_frame().gas() } } @@ -418,12 +431,13 @@ impl Vm { }, ); - inner.state.current_frame.sp = 0; - + inner.current_frame().set_stack_pointer(0); // The bootloader writes results to high addresses in its heap, so it makes sense to preallocate it. - inner.state.current_frame.heap_size = u32::MAX; - inner.state.current_frame.aux_heap_size = u32::MAX; - inner.state.current_frame.exception_handler = INITIAL_FRAME_FORMAL_EH_LOCATION; + inner.current_frame().set_heap_bound(u32::MAX); + inner.current_frame().set_aux_heap_bound(u32::MAX); + inner + .current_frame() + .set_exception_handler(INITIAL_FRAME_FORMAL_EH_LOCATION); let mut this = Self { world: World::new(storage, program_cache), @@ -446,7 +460,7 @@ impl Vm { // visible for testing pub(super) fn get_current_execution_state(&self) -> CurrentExecutionState { - let world_diff = &self.inner.world_diff; + let world_diff = self.inner.world_diff(); let events = merge_events(world_diff.events(), self.batch_env.number); let user_l2_to_l1_logs = extract_l2tol1logs_from_l1_messenger(&events) @@ -478,7 +492,7 @@ impl Vm { } fn delete_history_if_appropriate(&mut self) { - if self.snapshot.is_none() && self.inner.state.previous_frames.is_empty() { + if self.snapshot.is_none() && !self.has_previous_far_calls() { self.inner.delete_history(); } } @@ -504,8 +518,8 @@ impl VmInterface for Vm { } let mut tracer = CircuitsTracer::default(); - let start = self.inner.world_diff.snapshot(); - let pubdata_before = self.inner.world_diff.pubdata(); + let start = self.inner.world_diff().snapshot(); + let pubdata_before = self.inner.world_diff().pubdata(); let gas_before = self.gas_remaining(); let (result, refunds) = self.run(execution_mode, &mut tracer, track_refunds); @@ -519,7 +533,7 @@ impl VmInterface for Vm { } else { let storage_logs = self .inner - .world_diff + .world_diff() .get_storage_changes_after(&start) .map(|((address, key), change)| StorageLogWithPreviousValue { log: StorageLog { @@ -535,7 +549,7 @@ impl VmInterface for Vm { }) .collect(); let events = merge_events( - self.inner.world_diff.events_after(&start), + self.inner.world_diff().events_after(&start), self.batch_env.number, ); let user_l2_to_l1_logs = extract_l2tol1logs_from_l1_messenger(&events) @@ -545,7 +559,7 @@ impl VmInterface for Vm { .collect(); let system_l2_to_l1_logs = self .inner - .world_diff + .world_diff() .l2_to_l1_logs_after(&start) .iter() .map(|x| x.glue_into()) @@ -559,7 +573,7 @@ impl VmInterface for Vm { } }; - let pubdata_after = self.inner.world_diff.pubdata(); + let pubdata_after = self.inner.world_diff().pubdata(); let circuit_statistic = tracer.circuit_statistic(); let gas_remaining = self.gas_remaining(); VmExecutionResultAndLogs { @@ -634,7 +648,7 @@ impl VmInterface for Vm { #[derive(Debug)] struct VmSnapshot { - vm_snapshot: vm2::Snapshot, + vm_snapshot: zksync_vm2::Snapshot, bootloader_snapshot: BootloaderStateSnapshot, gas_for_account_validation: u32, } @@ -744,7 +758,7 @@ impl World { } } -impl vm2::StorageInterface for World { +impl zksync_vm2::StorageInterface for World { fn read_storage(&mut self, contract: H160, key: U256) -> Option { let key = &StorageKey::new(AccountTreeId::new(contract), u256_to_h256(key)); if self.storage.is_write_initial(key) { @@ -789,7 +803,7 @@ impl vm2::StorageInterface for World { } } -impl vm2::World for World { +impl zksync_vm2::World for World { fn decommit(&mut self, hash: U256) -> Program { self.program_cache .entry(hash) diff --git a/prover/Cargo.lock b/prover/Cargo.lock index 17f27737aa21..d29f0110f217 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -1700,14 +1700,6 @@ dependencies = [ "serde_json", ] -[[package]] -name = "eravm-stable-interface" -version = "0.1.0" -source = "git+https://github.com/matter-labs/vm2.git?rev=4ef15d46410ffc11744771a3a6c7c09dd9470c90#4ef15d46410ffc11744771a3a6c7c09dd9470c90" -dependencies = [ - "primitive-types", -] - [[package]] name = "errno" version = "0.3.9" @@ -6544,18 +6536,6 @@ dependencies = [ "syn 2.0.66", ] -[[package]] -name = "vm2" -version = "0.1.0" -source = "git+https://github.com/matter-labs/vm2.git?rev=4ef15d46410ffc11744771a3a6c7c09dd9470c90#4ef15d46410ffc11744771a3a6c7c09dd9470c90" -dependencies = [ - "enum_dispatch", - "eravm-stable-interface", - "primitive-types", - "zk_evm_abstractions 0.150.5", - "zkevm_opcode_defs 0.150.5", -] - [[package]] name = "wait-timeout" version = "0.2.0" @@ -7690,7 +7670,6 @@ dependencies = [ "thiserror", "tracing", "vise", - "vm2", "zk_evm 0.131.0-rc.2", "zk_evm 0.133.0", "zk_evm 0.140.0", @@ -7700,6 +7679,7 @@ dependencies = [ "zksync_system_constants", "zksync_types", "zksync_utils", + "zksync_vm2", "zksync_vm_interface", ] @@ -8102,6 +8082,26 @@ dependencies = [ "vise-exporter", ] +[[package]] +name = "zksync_vm2" +version = "0.1.0" +source = "git+https://github.com/matter-labs/vm2.git?rev=cd6136c42ec56856e0abcf2a98d1a9e120161482#cd6136c42ec56856e0abcf2a98d1a9e120161482" +dependencies = [ + "enum_dispatch", + "primitive-types", + "zk_evm_abstractions 0.150.5", + "zkevm_opcode_defs 0.150.5", + "zksync_vm2_interface", +] + +[[package]] +name = "zksync_vm2_interface" +version = "0.1.0" +source = "git+https://github.com/matter-labs/vm2.git?rev=cd6136c42ec56856e0abcf2a98d1a9e120161482#cd6136c42ec56856e0abcf2a98d1a9e120161482" +dependencies = [ + "primitive-types", +] + [[package]] name = "zksync_vm_interface" version = "0.1.0" diff --git a/prover/crates/bin/prover_fri/src/gpu_prover_job_processor.rs b/prover/crates/bin/prover_fri/src/gpu_prover_job_processor.rs index be28f2bd97ee..cfd588c26662 100644 --- a/prover/crates/bin/prover_fri/src/gpu_prover_job_processor.rs +++ b/prover/crates/bin/prover_fri/src/gpu_prover_job_processor.rs @@ -8,8 +8,9 @@ pub mod gpu_prover { ProverContextConfig, }; use tokio::task::JoinHandle; - use zksync_config::configs::fri_prover::SetupLoadMode as SetupLoadModeConfig; - use zksync_config::configs::FriProverConfig; + use zksync_config::configs::{ + fri_prover::SetupLoadMode as SetupLoadModeConfig, FriProverConfig, + }; use zksync_object_store::ObjectStore; use zksync_prover_dal::{ConnectionPool, ProverDal}; use zksync_prover_fri_types::{ From 23cdda75ea8b76c0296aa4942d714b0692983426 Mon Sep 17 00:00:00 2001 From: Artem Fomiuk <88630083+Artemka374@users.noreply.github.com> Date: Fri, 13 Sep 2024 14:38:53 +0300 Subject: [PATCH 069/116] feat: Add 80 arch to cuda archs (#2871) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Add CUDA ARCH 80 to list of CUDA archs. ## Why ❔ To be able to run provers on A100. ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- .github/workflows/build-docker-from-tag.yml | 2 +- .github/workflows/release-test-stage.yml | 18 +++++++++--------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.github/workflows/build-docker-from-tag.yml b/.github/workflows/build-docker-from-tag.yml index 791f44117477..29d26a713d89 100644 --- a/.github/workflows/build-docker-from-tag.yml +++ b/.github/workflows/build-docker-from-tag.yml @@ -102,7 +102,7 @@ jobs: with: image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }}-avx512 ERA_BELLMAN_CUDA_RELEASE: ${{ vars.ERA_BELLMAN_CUDA_RELEASE }} - CUDA_ARCH: "60;70;75;89" + CUDA_ARCH: "60;70;75;80;89" WITNESS_GENERATOR_RUST_FLAGS: "-Ctarget_feature=+avx512bw,+avx512cd,+avx512dq,+avx512f,+avx512vl" secrets: DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} diff --git a/.github/workflows/release-test-stage.yml b/.github/workflows/release-test-stage.yml index 1da5aa9ac928..ce74b76a6b7c 100644 --- a/.github/workflows/release-test-stage.yml +++ b/.github/workflows/release-test-stage.yml @@ -39,7 +39,7 @@ jobs: - '!prover/**' setup: name: Setup - runs-on: [matterlabs-deployer-stage] + runs-on: [ matterlabs-deployer-stage ] outputs: image_tag_suffix: ${{ steps.generate-tag-suffix.outputs.image_tag_suffix }} prover_fri_gpu_key_id: ${{ steps.extract-prover-fri-setup-key-ids.outputs.gpu_short_commit_sha }} @@ -61,7 +61,7 @@ jobs: build-push-core-images: name: Build and push images - needs: [setup, changed_files] + needs: [ setup, changed_files ] uses: ./.github/workflows/build-core-template.yml if: needs.changed_files.outputs.core == 'true' || needs.changed_files.outputs.all == 'true' with: @@ -72,7 +72,7 @@ jobs: build-push-tee-prover-images: name: Build and push images - needs: [setup, changed_files] + needs: [ setup, changed_files ] uses: ./.github/workflows/build-tee-prover-template.yml if: needs.changed_files.outputs.core == 'true' || needs.changed_files.outputs.all == 'true' with: @@ -84,7 +84,7 @@ jobs: build-push-contract-verifier: name: Build and push images - needs: [setup, changed_files] + needs: [ setup, changed_files ] uses: ./.github/workflows/build-contract-verifier-template.yml if: needs.changed_files.outputs.core == 'true' || needs.changed_files.outputs.all == 'true' with: @@ -95,26 +95,26 @@ jobs: build-push-prover-images: name: Build and push images - needs: [setup, changed_files] + needs: [ setup, changed_files ] uses: ./.github/workflows/build-prover-template.yml if: needs.changed_files.outputs.prover == 'true' || needs.changed_files.outputs.all == 'true' with: image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }} ERA_BELLMAN_CUDA_RELEASE: ${{ vars.ERA_BELLMAN_CUDA_RELEASE }} - CUDA_ARCH: "60;70;75;89" + CUDA_ARCH: "60;70;75;80;89" secrets: DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} build-push-witness-generator-image-avx512: name: Build and push prover images with avx512 instructions - needs: [setup, changed_files] + needs: [ setup, changed_files ] uses: ./.github/workflows/build-witness-generator-template.yml if: needs.changed_files.outputs.prover == 'true' || needs.changed_files.outputs.all == 'true' with: image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }}-avx512 ERA_BELLMAN_CUDA_RELEASE: ${{ vars.ERA_BELLMAN_CUDA_RELEASE }} - CUDA_ARCH: "60;70;75;89" + CUDA_ARCH: "60;70;75;80;89" WITNESS_GENERATOR_RUST_FLAGS: "-Ctarget_feature=+avx512bw,+avx512cd,+avx512dq,+avx512f,+avx512vl" secrets: DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} @@ -122,7 +122,7 @@ jobs: build-gar-prover-fri-gpu: name: Build GAR prover FRI GPU - needs: [setup, build-push-prover-images] + needs: [ setup, build-push-prover-images ] uses: ./.github/workflows/build-prover-fri-gpu-gar.yml if: needs.changed_files.outputs.prover == 'true' || needs.changed_files.outputs.all == 'true' with: From 32889be93d75d8be99d682531c16e2df968039df Mon Sep 17 00:00:00 2001 From: Artem Fomiuk <88630083+Artemka374@users.noreply.github.com> Date: Fri, 13 Sep 2024 17:31:03 +0300 Subject: [PATCH 070/116] fix: Docker build for compressor (#2874) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Pass CUDA_ARCH to compressor image build. ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- infrastructure/zk/src/docker.ts | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/infrastructure/zk/src/docker.ts b/infrastructure/zk/src/docker.ts index 27de68d1d98d..a100d1231da6 100644 --- a/infrastructure/zk/src/docker.ts +++ b/infrastructure/zk/src/docker.ts @@ -1,4 +1,4 @@ -import { Command } from 'commander'; +import {Command} from 'commander'; import * as utils from 'utils'; const IMAGES = [ @@ -31,7 +31,7 @@ async function dockerCommand( dockerOrg: string = 'matterlabs' ) { // Generating all tags for containers. We need 2 tags here: SHA and SHA+TS - const { stdout: COMMIT_SHORT_SHA }: { stdout: string } = await utils.exec('git rev-parse --short HEAD'); + const {stdout: COMMIT_SHORT_SHA}: { stdout: string } = await utils.exec('git rev-parse --short HEAD'); // COMMIT_SHORT_SHA returns with newline, so we need to trim it const imageTagShaTS: string = process.env.IMAGE_TAG_SUFFIX ? process.env.IMAGE_TAG_SUFFIX @@ -114,7 +114,7 @@ async function _build(image: string, tagList: string[], dockerOrg: string, platf if (platform != '') { buildArgs += `--platform=${platform} `; } - if (image === 'prover-gpu-fri') { + if (image === 'prover-gpu-fri' || image == 'proof-fri-gpu-compressor') { const cudaArch = process.env.CUDA_ARCH; buildArgs += `--build-arg CUDA_ARCH='${cudaArch}' `; } @@ -126,6 +126,8 @@ async function _build(image: string, tagList: string[], dockerOrg: string, platf } buildArgs += extraArgs; + console.log("Build args: ", buildArgs); + const buildCommand = `DOCKER_BUILDKIT=1 docker buildx build ${tagsToBuild}` + (buildArgs ? ` ${buildArgs}` : '') + From 4ae5a93e9e96cd0cd529baf9ffa78c1b21a9c4b1 Mon Sep 17 00:00:00 2001 From: perekopskiy <53865202+perekopskiy@users.noreply.github.com> Date: Fri, 13 Sep 2024 17:39:47 +0300 Subject: [PATCH 071/116] fix(prover): fix setup_metadata_to_setup_data_key (#2875) --- prover/crates/bin/prover_fri/src/utils.rs | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/prover/crates/bin/prover_fri/src/utils.rs b/prover/crates/bin/prover_fri/src/utils.rs index 2941c15439a9..181dc857c364 100644 --- a/prover/crates/bin/prover_fri/src/utils.rs +++ b/prover/crates/bin/prover_fri/src/utils.rs @@ -143,9 +143,19 @@ pub fn verify_proof( pub fn setup_metadata_to_setup_data_key( setup_metadata: &CircuitIdRoundTuple, ) -> ProverServiceDataKey { - ProverServiceDataKey { - circuit_id: setup_metadata.circuit_id, - round: setup_metadata.aggregation_round.into(), + let round = setup_metadata.aggregation_round.into(); + match round { + AggregationRound::NodeAggregation => { + // For node aggregation only one key exist for all circuit types + ProverServiceDataKey { + circuit_id: ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8, + round, + } + } + _ => ProverServiceDataKey { + circuit_id: setup_metadata.circuit_id, + round, + }, } } From b8940394a082c429ce4346d44678722a7405c516 Mon Sep 17 00:00:00 2001 From: Harald Hoyer Date: Fri, 13 Sep 2024 18:49:21 +0200 Subject: [PATCH 072/116] fix(zksync_tee_prover): strip binary for nix package (#2876) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Strip `zksync_tee_prover` binary for nix package. ## Why ❔ Somehow llvm decided to produce different symbol names each build ```diff - 12916: 000000000194d628 48 OBJECT LOCAL HIDDEN 23 anon.c880a573512c356e53163839ee7cd669.742.llvm.17979232152659092167 + 12916: 000000000194d628 48 OBJECT LOCAL HIDDEN 23 anon.c880a573512c356e53163839ee7cd669.742.llvm.14235143304249741099 [...] ``` Remove those to make the builds reproducible again. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. Signed-off-by: Harald Hoyer --- etc/nix/tee_prover.nix | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/etc/nix/tee_prover.nix b/etc/nix/tee_prover.nix index 50273b91fb5a..0b424522dffb 100644 --- a/etc/nix/tee_prover.nix +++ b/etc/nix/tee_prover.nix @@ -7,4 +7,8 @@ craneLib.buildPackage (commonArgs // { version = (builtins.fromTOML (builtins.readFile ../../core/bin/zksync_tee_prover/Cargo.toml)).package.version; cargoExtraArgs = "-p zksync_tee_prover --bin zksync_tee_prover"; inherit cargoArtifacts; + + postInstall = '' + strip $out/bin/zksync_tee_prover + ''; }) From c957dd8011213e0e95fa5962e2310321b29a0d16 Mon Sep 17 00:00:00 2001 From: perekopskiy <53865202+perekopskiy@users.noreply.github.com> Date: Mon, 16 Sep 2024 10:58:39 +0300 Subject: [PATCH 073/116] fix(eth_watch): fix `get_events_inner` (#2882) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ - `get_events_inner` should recursively call itself - `get_events_inner` should allow passing `None` as topics and/or addresses ## Why ❔ bug fix ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- Cargo.lock | 12 ++++++++ Cargo.toml | 1 + core/node/eth_watch/Cargo.toml | 1 + core/node/eth_watch/src/client.rs | 51 +++++++++++++++++++++---------- 4 files changed, 49 insertions(+), 16 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 59b464f8501d..c47e5b77e391 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -336,6 +336,17 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "async-recursion" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" +dependencies = [ + "proc-macro2 1.0.86", + "quote 1.0.36", + "syn 2.0.72", +] + [[package]] name = "async-signal" version = "0.2.10" @@ -9823,6 +9834,7 @@ name = "zksync_eth_watch" version = "0.1.0" dependencies = [ "anyhow", + "async-recursion", "async-trait", "thiserror", "tokio", diff --git a/Cargo.toml b/Cargo.toml index 5eb862f0bcb7..1e2fb9e0c7aa 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -105,6 +105,7 @@ categories = ["cryptography"] anyhow = "1" assert_matches = "1.5" async-trait = "0.1" +async-recursion = "1" axum = "0.7.5" backon = "0.4.4" bigdecimal = "0.4.5" diff --git a/core/node/eth_watch/Cargo.toml b/core/node/eth_watch/Cargo.toml index bbdc4ba27d34..a3d6325f4a24 100644 --- a/core/node/eth_watch/Cargo.toml +++ b/core/node/eth_watch/Cargo.toml @@ -24,6 +24,7 @@ anyhow.workspace = true thiserror.workspace = true async-trait.workspace = true tracing.workspace = true +async-recursion.workspace = true [dev-dependencies] zksync_concurrency.workspace = true diff --git a/core/node/eth_watch/src/client.rs b/core/node/eth_watch/src/client.rs index 67e603041e6c..237c8e5bc2e6 100644 --- a/core/node/eth_watch/src/client.rs +++ b/core/node/eth_watch/src/client.rs @@ -100,21 +100,24 @@ impl EthHttpQueryClient { .collect() } + #[async_recursion::async_recursion] async fn get_events_inner( &self, from: BlockNumber, to: BlockNumber, - topics1: Vec, - topics2: Vec, - addresses: Vec
, + topics1: Option>, + topics2: Option>, + addresses: Option>, retries_left: usize, ) -> EnrichedClientResult> { - let filter = FilterBuilder::default() + let mut builder = FilterBuilder::default() .from_block(from) .to_block(to) - .topics(Some(topics1), Some(topics2), None, None) - .address(addresses) - .build(); + .topics(topics1.clone(), topics2.clone(), None, None); + if let Some(addresses) = addresses.clone() { + builder = builder.address(addresses); + } + let filter = builder.build(); let mut result = self.client.logs(&filter).await; // This code is compatible with both Infura and Alchemy API providers. @@ -168,17 +171,33 @@ impl EthHttpQueryClient { tracing::warn!("Splitting block range in half: {from:?} - {mid:?} - {to:?}"); let mut first_half = self - .get_events(from, BlockNumber::Number(mid), RETRY_LIMIT) + .get_events_inner( + from, + BlockNumber::Number(mid), + topics1.clone(), + topics2.clone(), + addresses.clone(), + RETRY_LIMIT, + ) .await?; let mut second_half = self - .get_events(BlockNumber::Number(mid + 1u64), to, RETRY_LIMIT) + .get_events_inner( + BlockNumber::Number(mid + 1u64), + to, + topics1, + topics2, + addresses, + RETRY_LIMIT, + ) .await?; first_half.append(&mut second_half); result = Ok(first_half); } else if should_retry(err_code, err_message) && retries_left > 0 { tracing::warn!("Retrying. Retries left: {retries_left}"); - result = self.get_events(from, to, retries_left - 1).await; + result = self + .get_events_inner(from, to, topics1, topics2, addresses, retries_left - 1) + .await; } } @@ -216,9 +235,9 @@ impl EthClient for EthHttpQueryClient { .get_events_inner( from_block.into(), to_block.into(), - vec![self.new_upgrade_cut_data_signature], - vec![packed_version], - vec![state_transition_manager_address], + Some(vec![self.new_upgrade_cut_data_signature]), + Some(vec![packed_version]), + Some(vec![state_transition_manager_address]), RETRY_LIMIT, ) .await?; @@ -235,9 +254,9 @@ impl EthClient for EthHttpQueryClient { self.get_events_inner( from, to, - self.topics.clone(), - Vec::new(), - self.get_default_address_list(), + Some(self.topics.clone()), + None, + Some(self.get_default_address_list()), retries_left, ) .await From 9ab72005f0d349d607cd28a704893d8fdd4423bd Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Mon, 16 Sep 2024 11:27:23 +0300 Subject: [PATCH 074/116] refactor(vm-runner): Improve VM runner / VM playground (#2840) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Various minor improvements to VM runner / VM playground: - Get batch storage asynchronously, so that it works efficiently with snapshot storage. - Add metrics / logs to ensure that snapshot storage works as expected. ## Why ❔ Improves usability. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- core/lib/state/src/lib.rs | 1 + core/lib/state/src/storage_factory/metrics.rs | 37 ++++ .../mod.rs} | 184 +++++++----------- .../storage_factory/rocksdb_with_memory.rs | 75 +++++++ .../lib/state/src/storage_factory/snapshot.rs | 49 +++++ core/lib/vm_executor/src/batch/factory.rs | 39 +++- core/lib/vm_executor/src/oneshot/metrics.rs | 77 +------- core/lib/vm_executor/src/oneshot/mod.rs | 2 +- core/lib/vm_executor/src/shared.rs | 81 +++++++- core/lib/vm_interface/src/storage/mod.rs | 2 +- core/lib/vm_interface/src/storage/view.rs | 71 ++++--- core/node/vm_runner/src/impls/bwip.rs | 8 +- core/node/vm_runner/src/impls/playground.rs | 7 +- .../vm_runner/src/impls/protective_reads.rs | 6 +- core/node/vm_runner/src/io.rs | 51 ++++- core/node/vm_runner/src/metrics.rs | 26 ++- core/node/vm_runner/src/output_handler.rs | 6 +- core/node/vm_runner/src/process.rs | 108 +++++----- core/node/vm_runner/src/storage.rs | 8 +- core/node/vm_runner/src/tests/mod.rs | 4 +- core/node/vm_runner/src/tests/process.rs | 4 +- .../vm_runner/src/tests/storage_writer.rs | 10 +- 22 files changed, 552 insertions(+), 304 deletions(-) create mode 100644 core/lib/state/src/storage_factory/metrics.rs rename core/lib/state/src/{storage_factory.rs => storage_factory/mod.rs} (80%) create mode 100644 core/lib/state/src/storage_factory/rocksdb_with_memory.rs create mode 100644 core/lib/state/src/storage_factory/snapshot.rs diff --git a/core/lib/state/src/lib.rs b/core/lib/state/src/lib.rs index 205579552a30..fa06599357ca 100644 --- a/core/lib/state/src/lib.rs +++ b/core/lib/state/src/lib.rs @@ -21,6 +21,7 @@ pub use self::{ shadow_storage::ShadowStorage, storage_factory::{ BatchDiff, CommonStorage, OwnedStorage, ReadStorageFactory, RocksdbWithMemory, + SnapshotStorage, }, }; diff --git a/core/lib/state/src/storage_factory/metrics.rs b/core/lib/state/src/storage_factory/metrics.rs new file mode 100644 index 000000000000..822db90820ce --- /dev/null +++ b/core/lib/state/src/storage_factory/metrics.rs @@ -0,0 +1,37 @@ +use std::time::Duration; + +use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Histogram, Metrics, Unit}; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] +#[metrics(label = "stage", rename_all = "snake_case")] +pub(super) enum SnapshotStage { + BatchHeader, + ProtectiveReads, + TouchedSlots, + PreviousValues, + InitialWrites, + Bytecodes, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] +#[metrics(label = "kind", rename_all = "snake_case")] +pub(super) enum AccessKind { + ReadValue, + IsWriteInitial, + LoadFactoryDep, + GetEnumerationIndex, +} + +#[derive(Debug, Metrics)] +#[metrics(prefix = "state_snapshot")] +pub(super) struct SnapshotMetrics { + /// Latency of loading a batch snapshot split by stage. + #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds)] + pub load_latency: Family>, + /// Latency of accessing the fallback storage for a batch snapshot. + #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds)] + pub fallback_access_latency: Family>, +} + +#[vise::register] +pub(super) static SNAPSHOT_METRICS: vise::Global = vise::Global::new(); diff --git a/core/lib/state/src/storage_factory.rs b/core/lib/state/src/storage_factory/mod.rs similarity index 80% rename from core/lib/state/src/storage_factory.rs rename to core/lib/state/src/storage_factory/mod.rs index 2ef9b249af2e..0b514f8f9644 100644 --- a/core/lib/state/src/storage_factory.rs +++ b/core/lib/state/src/storage_factory/mod.rs @@ -1,7 +1,4 @@ -use std::{ - collections::{HashMap, HashSet}, - fmt::Debug, -}; +use std::{collections::HashSet, fmt}; use anyhow::Context as _; use async_trait::async_trait; @@ -10,64 +7,18 @@ use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_storage::RocksDB; use zksync_types::{L1BatchNumber, StorageKey, StorageValue, H256}; use zksync_utils::u256_to_h256; -use zksync_vm_interface::storage::{ReadStorage, StorageSnapshot, StorageWithSnapshot}; +use zksync_vm_interface::storage::{ReadStorage, StorageSnapshot}; +use self::metrics::{SnapshotStage, SNAPSHOT_METRICS}; +pub use self::{ + rocksdb_with_memory::{BatchDiff, RocksdbWithMemory}, + snapshot::SnapshotStorage, +}; use crate::{PostgresStorage, RocksdbStorage, RocksdbStorageBuilder, StateKeeperColumnFamily}; -/// Storage with a static lifetime that can be sent to Tokio tasks etc. -pub type OwnedStorage = CommonStorage<'static>; - -/// Factory that can produce storage instances on demand. The storage type is encapsulated as a type param -/// (mostly for testing purposes); the default is [`OwnedStorage`]. -#[async_trait] -pub trait ReadStorageFactory: Debug + Send + Sync + 'static { - /// Creates a storage instance, e.g. over a Postgres connection or a RocksDB instance. - /// The specific criteria on which one are left up to the implementation. - /// - /// Implementations may be cancel-aware and return `Ok(None)` iff `stop_receiver` receives - /// a stop signal; this is the only case in which `Ok(None)` should be returned. - async fn access_storage( - &self, - stop_receiver: &watch::Receiver, - l1_batch_number: L1BatchNumber, - ) -> anyhow::Result>; -} - -/// [`ReadStorageFactory`] producing Postgres-backed storage instances. Hence, it is slower than more advanced -/// alternatives with RocksDB caches and should be used sparingly (e.g., for testing). -#[async_trait] -impl ReadStorageFactory for ConnectionPool { - async fn access_storage( - &self, - _stop_receiver: &watch::Receiver, - l1_batch_number: L1BatchNumber, - ) -> anyhow::Result> { - let connection = self.connection().await?; - let storage = OwnedStorage::postgres(connection, l1_batch_number).await?; - Ok(Some(storage.into())) - } -} - -/// DB difference introduced by one batch. -#[derive(Debug, Clone)] -pub struct BatchDiff { - /// Storage slots touched by this batch along with new values there. - pub state_diff: HashMap, - /// Initial write indices introduced by this batch. - pub enum_index_diff: HashMap, - /// Factory dependencies introduced by this batch. - pub factory_dep_diff: HashMap>, -} - -/// A RocksDB cache instance with in-memory DB diffs that gives access to DB state at batches `N` to -/// `N + K`, where `K` is the number of diffs. -#[derive(Debug)] -pub struct RocksdbWithMemory { - /// RocksDB cache instance caught up to batch `N`. - pub rocksdb: RocksdbStorage, - /// Diffs for batches `N + 1` to `N + K`. - pub batch_diffs: Vec, -} +mod metrics; +mod rocksdb_with_memory; +mod snapshot; /// Union of all [`ReadStorage`] implementations that are returned by [`ReadStorageFactory`], such as /// Postgres- and RocksDB-backed storages. @@ -83,7 +34,7 @@ pub enum CommonStorage<'a> { /// Implementation over a RocksDB cache instance with in-memory DB diffs. RocksdbWithMemory(RocksdbWithMemory), /// In-memory storage snapshot with the Postgres storage fallback. - Snapshot(StorageWithSnapshot>), + Snapshot(SnapshotStorage<'a>), /// Generic implementation. Should be used for testing purposes only since it has performance penalty because /// of the dynamic dispatch. Boxed(Box), @@ -176,6 +127,7 @@ impl CommonStorage<'static> { connection: &mut Connection<'static, Core>, l1_batch_number: L1BatchNumber, ) -> anyhow::Result> { + let latency = SNAPSHOT_METRICS.load_latency[&SnapshotStage::BatchHeader].start(); let Some(header) = connection .blocks_dal() .get_l1_batch_header(l1_batch_number) @@ -188,8 +140,10 @@ impl CommonStorage<'static> { .into_iter() .map(u256_to_h256) .collect(); + latency.observe(); // Check protective reads early on. + let latency = SNAPSHOT_METRICS.load_latency[&SnapshotStage::ProtectiveReads].start(); let protective_reads = connection .storage_logs_dedup_dal() .get_protective_reads_for_l1_batch(l1_batch_number) @@ -199,14 +153,18 @@ impl CommonStorage<'static> { return Ok(None); } let protective_reads_len = protective_reads.len(); - tracing::debug!("Loaded {protective_reads_len} protective reads"); + let latency = latency.observe(); + tracing::debug!("Loaded {protective_reads_len} protective reads in {latency:?}"); + let latency = SNAPSHOT_METRICS.load_latency[&SnapshotStage::TouchedSlots].start(); let touched_slots = connection .storage_logs_dal() .get_touched_slots_for_l1_batch(l1_batch_number) .await?; - tracing::debug!("Loaded {} touched keys", touched_slots.len()); + let latency = latency.observe(); + tracing::debug!("Loaded {} touched keys in {latency:?}", touched_slots.len()); + let latency = SNAPSHOT_METRICS.load_latency[&SnapshotStage::PreviousValues].start(); let all_accessed_keys: Vec<_> = protective_reads .into_iter() .map(|key| key.hashed_key()) @@ -216,21 +174,31 @@ impl CommonStorage<'static> { .storage_logs_dal() .get_previous_storage_values(&all_accessed_keys, l1_batch_number) .await?; + let latency = latency.observe(); tracing::debug!( - "Obtained {} previous values for accessed keys", + "Obtained {} previous values for accessed keys in {latency:?}", previous_values.len() ); + + let latency = SNAPSHOT_METRICS.load_latency[&SnapshotStage::InitialWrites].start(); let initial_write_info = connection .storage_logs_dal() .get_l1_batches_and_indices_for_initial_writes(&all_accessed_keys) .await?; - tracing::debug!("Obtained initial write info for accessed keys"); + let latency = latency.observe(); + tracing::debug!("Obtained initial write info for accessed keys in {latency:?}"); + let latency = SNAPSHOT_METRICS.load_latency[&SnapshotStage::Bytecodes].start(); let bytecodes = connection .factory_deps_dal() .get_factory_deps(&bytecode_hashes) .await; - tracing::debug!("Loaded {} bytecodes used in the batch", bytecodes.len()); + let latency = latency.observe(); + tracing::debug!( + "Loaded {} bytecodes used in the batch in {latency:?}", + bytecodes.len() + ); + let factory_deps = bytecodes .into_iter() .map(|(hash_u256, words)| { @@ -256,54 +224,6 @@ impl CommonStorage<'static> { } } -impl ReadStorage for RocksdbWithMemory { - fn read_value(&mut self, key: &StorageKey) -> StorageValue { - let hashed_key = key.hashed_key(); - match self - .batch_diffs - .iter() - .rev() - .find_map(|b| b.state_diff.get(&hashed_key)) - { - None => self.rocksdb.read_value(key), - Some(value) => *value, - } - } - - fn is_write_initial(&mut self, key: &StorageKey) -> bool { - match self - .batch_diffs - .iter() - .find_map(|b| b.enum_index_diff.get(&key.hashed_key())) - { - None => self.rocksdb.is_write_initial(key), - Some(_) => false, - } - } - - fn load_factory_dep(&mut self, hash: H256) -> Option> { - match self - .batch_diffs - .iter() - .find_map(|b| b.factory_dep_diff.get(&hash)) - { - None => self.rocksdb.load_factory_dep(hash), - Some(value) => Some(value.clone()), - } - } - - fn get_enumeration_index(&mut self, key: &StorageKey) -> Option { - match self - .batch_diffs - .iter() - .find_map(|b| b.enum_index_diff.get(&key.hashed_key())) - { - None => self.rocksdb.get_enumeration_index(key), - Some(value) => Some(*value), - } - } -} - impl ReadStorage for CommonStorage<'_> { fn read_value(&mut self, key: &StorageKey) -> StorageValue { match self { @@ -358,8 +278,42 @@ impl From for CommonStorage<'_> { } } -impl<'a> From>> for CommonStorage<'a> { - fn from(value: StorageWithSnapshot>) -> Self { +impl<'a> From> for CommonStorage<'a> { + fn from(value: SnapshotStorage<'a>) -> Self { Self::Snapshot(value) } } + +/// Storage with a static lifetime that can be sent to Tokio tasks etc. +pub type OwnedStorage = CommonStorage<'static>; + +/// Factory that can produce storage instances on demand. The storage type is encapsulated as a type param +/// (mostly for testing purposes); the default is [`OwnedStorage`]. +#[async_trait] +pub trait ReadStorageFactory: fmt::Debug + Send + Sync + 'static { + /// Creates a storage instance, e.g. over a Postgres connection or a RocksDB instance. + /// The specific criteria on which one are left up to the implementation. + /// + /// Implementations may be cancel-aware and return `Ok(None)` iff `stop_receiver` receives + /// a stop signal; this is the only case in which `Ok(None)` should be returned. + async fn access_storage( + &self, + stop_receiver: &watch::Receiver, + l1_batch_number: L1BatchNumber, + ) -> anyhow::Result>; +} + +/// [`ReadStorageFactory`] producing Postgres-backed storage instances. Hence, it is slower than more advanced +/// alternatives with RocksDB caches and should be used sparingly (e.g., for testing). +#[async_trait] +impl ReadStorageFactory for ConnectionPool { + async fn access_storage( + &self, + _stop_receiver: &watch::Receiver, + l1_batch_number: L1BatchNumber, + ) -> anyhow::Result> { + let connection = self.connection().await?; + let storage = OwnedStorage::postgres(connection, l1_batch_number).await?; + Ok(Some(storage.into())) + } +} diff --git a/core/lib/state/src/storage_factory/rocksdb_with_memory.rs b/core/lib/state/src/storage_factory/rocksdb_with_memory.rs new file mode 100644 index 000000000000..411460dad18e --- /dev/null +++ b/core/lib/state/src/storage_factory/rocksdb_with_memory.rs @@ -0,0 +1,75 @@ +use std::collections::HashMap; + +use zksync_types::{StorageKey, StorageValue, H256}; +use zksync_vm_interface::storage::ReadStorage; + +use crate::RocksdbStorage; + +/// DB difference introduced by one batch. +#[derive(Debug, Clone)] +pub struct BatchDiff { + /// Storage slots touched by this batch along with new values there. + pub state_diff: HashMap, + /// Initial write indices introduced by this batch. + pub enum_index_diff: HashMap, + /// Factory dependencies introduced by this batch. + pub factory_dep_diff: HashMap>, +} + +/// A RocksDB cache instance with in-memory DB diffs that gives access to DB state at batches `N` to +/// `N + K`, where `K` is the number of diffs. +#[derive(Debug)] +pub struct RocksdbWithMemory { + /// RocksDB cache instance caught up to batch `N`. + pub rocksdb: RocksdbStorage, + /// Diffs for batches `N + 1` to `N + K`. + pub batch_diffs: Vec, +} + +impl ReadStorage for RocksdbWithMemory { + fn read_value(&mut self, key: &StorageKey) -> StorageValue { + let hashed_key = key.hashed_key(); + match self + .batch_diffs + .iter() + .rev() + .find_map(|b| b.state_diff.get(&hashed_key)) + { + None => self.rocksdb.read_value(key), + Some(value) => *value, + } + } + + fn is_write_initial(&mut self, key: &StorageKey) -> bool { + match self + .batch_diffs + .iter() + .find_map(|b| b.enum_index_diff.get(&key.hashed_key())) + { + None => self.rocksdb.is_write_initial(key), + Some(_) => false, + } + } + + fn load_factory_dep(&mut self, hash: H256) -> Option> { + match self + .batch_diffs + .iter() + .find_map(|b| b.factory_dep_diff.get(&hash)) + { + None => self.rocksdb.load_factory_dep(hash), + Some(value) => Some(value.clone()), + } + } + + fn get_enumeration_index(&mut self, key: &StorageKey) -> Option { + match self + .batch_diffs + .iter() + .find_map(|b| b.enum_index_diff.get(&key.hashed_key())) + { + None => self.rocksdb.get_enumeration_index(key), + Some(value) => Some(*value), + } + } +} diff --git a/core/lib/state/src/storage_factory/snapshot.rs b/core/lib/state/src/storage_factory/snapshot.rs new file mode 100644 index 000000000000..05a79125dd30 --- /dev/null +++ b/core/lib/state/src/storage_factory/snapshot.rs @@ -0,0 +1,49 @@ +use zksync_types::{StorageKey, StorageValue, H256}; +use zksync_vm_interface::storage::StorageWithSnapshot; + +use super::metrics::{AccessKind, SNAPSHOT_METRICS}; +use crate::{interface::ReadStorage, PostgresStorage}; + +/// Wrapper around [`PostgresStorage`] used to track frequency of fallback access. +#[derive(Debug)] +pub struct FallbackStorage<'a>(PostgresStorage<'a>); + +impl<'a> From> for FallbackStorage<'a> { + fn from(storage: PostgresStorage<'a>) -> Self { + Self(storage) + } +} + +impl ReadStorage for FallbackStorage<'_> { + fn read_value(&mut self, key: &StorageKey) -> StorageValue { + let latency = SNAPSHOT_METRICS.fallback_access_latency[&AccessKind::ReadValue].start(); + let output = self.0.read_value(key); + latency.observe(); + output + } + + fn is_write_initial(&mut self, key: &StorageKey) -> bool { + let latency = SNAPSHOT_METRICS.fallback_access_latency[&AccessKind::IsWriteInitial].start(); + let output = self.0.is_write_initial(key); + latency.observe(); + output + } + + fn load_factory_dep(&mut self, hash: H256) -> Option> { + let latency = SNAPSHOT_METRICS.fallback_access_latency[&AccessKind::LoadFactoryDep].start(); + let output = self.0.load_factory_dep(hash); + latency.observe(); + output + } + + fn get_enumeration_index(&mut self, key: &StorageKey) -> Option { + let latency = + SNAPSHOT_METRICS.fallback_access_latency[&AccessKind::GetEnumerationIndex].start(); + let output = self.0.get_enumeration_index(key); + latency.observe(); + output + } +} + +/// Snapshot-backed storage used for batch processing. +pub type SnapshotStorage<'a> = StorageWithSnapshot>; diff --git a/core/lib/vm_executor/src/batch/factory.rs b/core/lib/vm_executor/src/batch/factory.rs index 68a3769ee622..d6f7555b7672 100644 --- a/core/lib/vm_executor/src/batch/factory.rs +++ b/core/lib/vm_executor/src/batch/factory.rs @@ -1,4 +1,4 @@ -use std::{marker::PhantomData, rc::Rc, sync::Arc}; +use std::{marker::PhantomData, rc::Rc, sync::Arc, time::Duration}; use anyhow::Context as _; use once_cell::sync::OnceCell; @@ -6,7 +6,7 @@ use tokio::sync::mpsc; use zksync_multivm::{ interface::{ executor::{BatchExecutor, BatchExecutorFactory}, - storage::{ReadStorage, StorageView}, + storage::{ReadStorage, StorageView, StorageViewStats}, BatchTransactionExecutionResult, ExecutionResult, FinishedL1Batch, Halt, L1BatchEnv, L2BlockEnv, SystemEnv, VmInterface, VmInterfaceHistoryEnabled, }, @@ -20,7 +20,7 @@ use super::{ executor::{Command, MainBatchExecutor}, metrics::{TxExecutionStage, BATCH_TIP_METRICS, EXECUTOR_METRICS, KEEPER_METRICS}, }; -use crate::shared::InteractionType; +use crate::shared::{InteractionType, STORAGE_METRICS}; /// The default implementation of [`BatchExecutorFactory`]. /// Creates real batch executors which maintain the VM (as opposed to the test factories which don't use the VM). @@ -35,6 +35,7 @@ pub struct MainBatchExecutorFactory { /// regardless of its configuration, this flag should be set to `true`. optional_bytecode_compression: bool, fast_vm_mode: FastVmMode, + observe_storage_metrics: bool, } impl MainBatchExecutorFactory { @@ -43,9 +44,11 @@ impl MainBatchExecutorFactory { save_call_traces, optional_bytecode_compression, fast_vm_mode: FastVmMode::Old, + observe_storage_metrics: false, } } + /// Sets the fast VM mode used by this executor. pub fn set_fast_vm_mode(&mut self, fast_vm_mode: FastVmMode) { if !matches!(fast_vm_mode, FastVmMode::Old) { tracing::warn!( @@ -54,6 +57,13 @@ impl MainBatchExecutorFactory { } self.fast_vm_mode = fast_vm_mode; } + + /// Enables storage metrics reporting for this executor. Storage metrics will be reported for each transaction. + // The reason this isn't on by default is that storage metrics don't distinguish between "batch-executed" and "oneshot-executed" transactions; + // this optimally needs some improvements in `vise` (ability to add labels for groups of metrics). + pub fn observe_storage_metrics(&mut self) { + self.observe_storage_metrics = true; + } } impl BatchExecutorFactory for MainBatchExecutorFactory { @@ -70,6 +80,7 @@ impl BatchExecutorFactory for MainBatchExecu save_call_traces: self.save_call_traces, optional_bytecode_compression: self.optional_bytecode_compression, fast_vm_mode: self.fast_vm_mode, + observe_storage_metrics: self.observe_storage_metrics, commands: commands_receiver, _storage: PhantomData, }; @@ -91,6 +102,7 @@ struct CommandReceiver { save_call_traces: bool, optional_bytecode_compression: bool, fast_vm_mode: FastVmMode, + observe_storage_metrics: bool, commands: mpsc::Receiver, _storage: PhantomData, } @@ -112,14 +124,22 @@ impl CommandReceiver { self.fast_vm_mode, ); let mut batch_finished = false; + let mut prev_storage_stats = StorageViewStats::default(); while let Some(cmd) = self.commands.blocking_recv() { match cmd { Command::ExecuteTx(tx, resp) => { let tx_hash = tx.hash(); - let result = self.execute_tx(*tx, &mut vm).with_context(|| { + let (result, latency) = self.execute_tx(*tx, &mut vm).with_context(|| { format!("fatal error executing transaction {tx_hash:?}") })?; + + if self.observe_storage_metrics { + let storage_stats = storage_view.borrow().stats(); + let stats_diff = storage_stats.saturating_sub(&prev_storage_stats); + STORAGE_METRICS.observe(&format!("Tx {tx_hash:?}"), latency, &stats_diff); + prev_storage_stats = storage_stats; + } if resp.send(result).is_err() { break; } @@ -152,11 +172,11 @@ impl CommandReceiver { .context("storage view leaked")? .into_inner(); if batch_finished { - let metrics = storage_view.metrics(); + let stats = storage_view.stats(); EXECUTOR_METRICS.batch_storage_interaction_duration[&InteractionType::GetValue] - .observe(metrics.time_spent_on_get_value); + .observe(stats.time_spent_on_get_value); EXECUTOR_METRICS.batch_storage_interaction_duration[&InteractionType::SetValue] - .observe(metrics.time_spent_on_set_value); + .observe(stats.time_spent_on_set_value); } else { // State keeper can exit because of stop signal, so it's OK to exit mid-batch. tracing::info!("State keeper exited with an unfinished L1 batch"); @@ -168,7 +188,7 @@ impl CommandReceiver { &self, transaction: Transaction, vm: &mut VmInstance, - ) -> anyhow::Result { + ) -> anyhow::Result<(BatchTransactionExecutionResult, Duration)> { // Executing a next transaction means that a previous transaction was either rolled back (in which case its snapshot // was already removed), or that we build on top of it (in which case, it can be removed now). vm.pop_snapshot_no_rollback(); @@ -182,9 +202,8 @@ impl CommandReceiver { } else { self.execute_tx_in_vm(&transaction, vm)? }; - latency.observe(); - Ok(result) + Ok((result, latency.observe())) } fn rollback_last_tx(&self, vm: &mut VmInstance) { diff --git a/core/lib/vm_executor/src/oneshot/metrics.rs b/core/lib/vm_executor/src/oneshot/metrics.rs index 8a89ce0a9a4f..475463300f16 100644 --- a/core/lib/vm_executor/src/oneshot/metrics.rs +++ b/core/lib/vm_executor/src/oneshot/metrics.rs @@ -1,9 +1,9 @@ use std::time::Duration; use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Histogram, Metrics}; -use zksync_multivm::interface::{storage::StorageViewMetrics, VmMemoryMetrics}; +use zksync_multivm::interface::{storage::StorageViewStats, VmMemoryMetrics}; -use crate::shared::InteractionType; +use crate::shared::STORAGE_METRICS; #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] #[metrics(label = "type", rename_all = "snake_case")] @@ -46,29 +46,11 @@ struct RuntimeContextMemoryMetrics { #[vise::register] static MEMORY_METRICS: vise::Global = vise::Global::new(); -const INTERACTION_AMOUNT_BUCKETS: Buckets = Buckets::exponential(10.0..=10_000_000.0, 10.0); - -#[derive(Debug, Metrics)] -#[metrics(prefix = "runtime_context_storage_interaction")] -struct RuntimeContextStorageMetrics { - #[metrics(buckets = INTERACTION_AMOUNT_BUCKETS)] - amount: Family>, - #[metrics(buckets = Buckets::LATENCIES)] - duration: Family>, - #[metrics(buckets = Buckets::LATENCIES)] - duration_per_unit: Family>, - #[metrics(buckets = Buckets::ZERO_TO_ONE)] - ratio: Histogram, -} - -#[vise::register] -static STORAGE_METRICS: vise::Global = vise::Global::new(); - pub(super) fn report_vm_memory_metrics( tx_id: &str, memory_metrics: &VmMemoryMetrics, vm_execution_took: Duration, - storage_metrics: StorageViewMetrics, + storage_metrics: &StorageViewStats, ) { MEMORY_METRICS.event_sink_size[&SizeType::Inner].observe(memory_metrics.event_sink_inner); MEMORY_METRICS.event_sink_size[&SizeType::History].observe(memory_metrics.event_sink_history); @@ -88,56 +70,5 @@ pub(super) fn report_vm_memory_metrics( .full .observe(memory_metrics.full_size() + storage_metrics.cache_size); - let total_storage_invocations = storage_metrics.get_value_storage_invocations - + storage_metrics.set_value_storage_invocations; - let total_time_spent_in_storage = - storage_metrics.time_spent_on_get_value + storage_metrics.time_spent_on_set_value; - - STORAGE_METRICS.amount[&InteractionType::Missed] - .observe(storage_metrics.storage_invocations_missed); - STORAGE_METRICS.amount[&InteractionType::GetValue] - .observe(storage_metrics.get_value_storage_invocations); - STORAGE_METRICS.amount[&InteractionType::SetValue] - .observe(storage_metrics.set_value_storage_invocations); - STORAGE_METRICS.amount[&InteractionType::Total].observe(total_storage_invocations); - - STORAGE_METRICS.duration[&InteractionType::Missed] - .observe(storage_metrics.time_spent_on_storage_missed); - STORAGE_METRICS.duration[&InteractionType::GetValue] - .observe(storage_metrics.time_spent_on_get_value); - STORAGE_METRICS.duration[&InteractionType::SetValue] - .observe(storage_metrics.time_spent_on_set_value); - STORAGE_METRICS.duration[&InteractionType::Total].observe(total_time_spent_in_storage); - - if total_storage_invocations > 0 { - STORAGE_METRICS.duration_per_unit[&InteractionType::Total] - .observe(total_time_spent_in_storage.div_f64(total_storage_invocations as f64)); - } - if storage_metrics.storage_invocations_missed > 0 { - let duration_per_unit = storage_metrics - .time_spent_on_storage_missed - .div_f64(storage_metrics.storage_invocations_missed as f64); - STORAGE_METRICS.duration_per_unit[&InteractionType::Missed].observe(duration_per_unit); - } - - STORAGE_METRICS - .ratio - .observe(total_time_spent_in_storage.as_secs_f64() / vm_execution_took.as_secs_f64()); - - const STORAGE_INVOCATIONS_DEBUG_THRESHOLD: usize = 1_000; - - if total_storage_invocations > STORAGE_INVOCATIONS_DEBUG_THRESHOLD { - tracing::info!( - "Tx {tx_id} resulted in {total_storage_invocations} storage_invocations, {} new_storage_invocations, \ - {} get_value_storage_invocations, {} set_value_storage_invocations, \ - vm execution took {vm_execution_took:?}, storage interaction took {total_time_spent_in_storage:?} \ - (missed: {:?} get: {:?} set: {:?})", - storage_metrics.storage_invocations_missed, - storage_metrics.get_value_storage_invocations, - storage_metrics.set_value_storage_invocations, - storage_metrics.time_spent_on_storage_missed, - storage_metrics.time_spent_on_get_value, - storage_metrics.time_spent_on_set_value, - ); - } + STORAGE_METRICS.observe(&format!("Tx {tx_id}"), vm_execution_took, storage_metrics); } diff --git a/core/lib/vm_executor/src/oneshot/mod.rs b/core/lib/vm_executor/src/oneshot/mod.rs index cac8edfdfdf8..1838381d2a01 100644 --- a/core/lib/vm_executor/src/oneshot/mod.rs +++ b/core/lib/vm_executor/src/oneshot/mod.rs @@ -284,7 +284,7 @@ impl VmSandbox { &tx_id, &memory_metrics, vm_execution_took, - self.storage_view.as_ref().borrow_mut().metrics(), + &self.storage_view.borrow().stats(), ); result } diff --git a/core/lib/vm_executor/src/shared.rs b/core/lib/vm_executor/src/shared.rs index 420005be05d6..8ac4dce2e01e 100644 --- a/core/lib/vm_executor/src/shared.rs +++ b/core/lib/vm_executor/src/shared.rs @@ -1,6 +1,9 @@ //! Functionality shared among different types of executors. -use vise::{EncodeLabelSet, EncodeLabelValue}; +use std::time::Duration; + +use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Histogram, Metrics}; +use zksync_multivm::interface::storage::StorageViewStats; #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] #[metrics(label = "interaction", rename_all = "snake_case")] @@ -10,3 +13,79 @@ pub(crate) enum InteractionType { SetValue, Total, } + +const INTERACTION_AMOUNT_BUCKETS: Buckets = Buckets::exponential(10.0..=10_000_000.0, 10.0); + +#[derive(Debug, Metrics)] +#[metrics(prefix = "runtime_context_storage_interaction")] +pub(crate) struct RuntimeContextStorageMetrics { + #[metrics(buckets = INTERACTION_AMOUNT_BUCKETS)] + amount: Family>, + #[metrics(buckets = Buckets::LATENCIES)] + duration: Family>, + #[metrics(buckets = Buckets::LATENCIES)] + duration_per_unit: Family>, + #[metrics(buckets = Buckets::ZERO_TO_ONE)] + ratio: Histogram, +} + +impl RuntimeContextStorageMetrics { + pub fn observe( + &self, + op: &str, + total_vm_latency: Duration, + storage_metrics: &StorageViewStats, + ) { + const STORAGE_INVOCATIONS_DEBUG_THRESHOLD: usize = 1_000; + + let total_storage_invocations = storage_metrics.get_value_storage_invocations + + storage_metrics.set_value_storage_invocations; + let total_time_spent_in_storage = + storage_metrics.time_spent_on_get_value + storage_metrics.time_spent_on_set_value; + + self.amount[&InteractionType::Missed].observe(storage_metrics.storage_invocations_missed); + self.amount[&InteractionType::GetValue] + .observe(storage_metrics.get_value_storage_invocations); + self.amount[&InteractionType::SetValue] + .observe(storage_metrics.set_value_storage_invocations); + self.amount[&InteractionType::Total].observe(total_storage_invocations); + + self.duration[&InteractionType::Missed] + .observe(storage_metrics.time_spent_on_storage_missed); + self.duration[&InteractionType::GetValue].observe(storage_metrics.time_spent_on_get_value); + self.duration[&InteractionType::SetValue].observe(storage_metrics.time_spent_on_set_value); + self.duration[&InteractionType::Total].observe(total_time_spent_in_storage); + + if total_storage_invocations > 0 { + self.duration_per_unit[&InteractionType::Total] + .observe(total_time_spent_in_storage.div_f64(total_storage_invocations as f64)); + } + if storage_metrics.storage_invocations_missed > 0 { + let duration_per_unit = storage_metrics + .time_spent_on_storage_missed + .div_f64(storage_metrics.storage_invocations_missed as f64); + self.duration_per_unit[&InteractionType::Missed].observe(duration_per_unit); + } + + self.ratio + .observe(total_time_spent_in_storage.as_secs_f64() / total_vm_latency.as_secs_f64()); + + if total_storage_invocations > STORAGE_INVOCATIONS_DEBUG_THRESHOLD { + tracing::info!( + "{op} resulted in {total_storage_invocations} storage_invocations, {} new_storage_invocations, \ + {} get_value_storage_invocations, {} set_value_storage_invocations, \ + vm execution took {total_vm_latency:?}, storage interaction took {total_time_spent_in_storage:?} \ + (missed: {:?} get: {:?} set: {:?})", + storage_metrics.storage_invocations_missed, + storage_metrics.get_value_storage_invocations, + storage_metrics.set_value_storage_invocations, + storage_metrics.time_spent_on_storage_missed, + storage_metrics.time_spent_on_get_value, + storage_metrics.time_spent_on_set_value, + ); + } + } +} + +#[vise::register] +pub(crate) static STORAGE_METRICS: vise::Global = vise::Global::new(); diff --git a/core/lib/vm_interface/src/storage/mod.rs b/core/lib/vm_interface/src/storage/mod.rs index 9b92ef8b7705..6cdcd33db682 100644 --- a/core/lib/vm_interface/src/storage/mod.rs +++ b/core/lib/vm_interface/src/storage/mod.rs @@ -6,7 +6,7 @@ pub use self::{ // Note, that `test_infra` of the bootloader tests relies on this value to be exposed in_memory::{InMemoryStorage, IN_MEMORY_STORAGE_DEFAULT_NETWORK_ID}, snapshot::{StorageSnapshot, StorageWithSnapshot}, - view::{ImmutableStorageView, StorageView, StorageViewCache, StorageViewMetrics}, + view::{ImmutableStorageView, StorageView, StorageViewCache, StorageViewStats}, }; mod in_memory; diff --git a/core/lib/vm_interface/src/storage/view.rs b/core/lib/vm_interface/src/storage/view.rs index 101f5c82f497..ec9267609e23 100644 --- a/core/lib/vm_interface/src/storage/view.rs +++ b/core/lib/vm_interface/src/storage/view.rs @@ -10,9 +10,9 @@ use zksync_types::{StorageKey, StorageValue, H256}; use super::{ReadStorage, StoragePtr, WriteStorage}; -/// Metrics for [`StorageView`]. +/// Statistics for [`StorageView`]. #[derive(Debug, Default, Clone, Copy)] -pub struct StorageViewMetrics { +pub struct StorageViewStats { /// Estimated byte size of the cache used by the `StorageView`. pub cache_size: usize, /// Number of read / write ops for which the value was read from the underlying storage. @@ -29,6 +29,33 @@ pub struct StorageViewMetrics { pub time_spent_on_set_value: Duration, } +impl StorageViewStats { + /// Subtracts two sets of statistics. This can be used to measure increment between these stats and older stats for the same VM. + pub fn saturating_sub(&self, older: &Self) -> Self { + Self { + cache_size: self.cache_size.saturating_sub(older.cache_size), + storage_invocations_missed: self + .storage_invocations_missed + .saturating_sub(older.storage_invocations_missed), + get_value_storage_invocations: self + .get_value_storage_invocations + .saturating_sub(older.get_value_storage_invocations), + set_value_storage_invocations: self + .set_value_storage_invocations + .saturating_sub(older.set_value_storage_invocations), + time_spent_on_storage_missed: self + .time_spent_on_storage_missed + .saturating_sub(older.time_spent_on_storage_missed), + time_spent_on_get_value: self + .time_spent_on_get_value + .saturating_sub(older.time_spent_on_get_value), + time_spent_on_set_value: self + .time_spent_on_set_value + .saturating_sub(older.time_spent_on_set_value), + } + } +} + /// `StorageView` is a buffer for `StorageLog`s between storage and transaction execution code. /// In order to commit transactions logs should be submitted to the underlying storage /// after a transaction is executed. @@ -46,7 +73,7 @@ pub struct StorageView { // Used for caching and to get the list/count of modified keys modified_storage_keys: HashMap, cache: StorageViewCache, - metrics: StorageViewMetrics, + stats: StorageViewStats, } /// `StorageViewCache` is a struct for caching storage reads and `contains_key()` checks. @@ -112,7 +139,7 @@ impl StorageView { read_storage_keys: HashMap::new(), initial_writes: HashMap::new(), }, - metrics: StorageViewMetrics::default(), + stats: StorageViewStats::default(), } } @@ -126,8 +153,8 @@ impl StorageView { cached_value.copied().unwrap_or_else(|| { let value = self.storage_handle.read_value(key); self.cache.read_storage_keys.insert(*key, value); - self.metrics.time_spent_on_storage_missed += started_at.elapsed(); - self.metrics.storage_invocations_missed += 1; + self.stats.time_spent_on_storage_missed += started_at.elapsed(); + self.stats.storage_invocations_missed += 1; value }) } @@ -138,11 +165,11 @@ impl StorageView { + self.cache.read_storage_keys.len() * mem::size_of::<(StorageKey, StorageValue)>() } - /// Returns the current metrics. - pub fn metrics(&self) -> StorageViewMetrics { - StorageViewMetrics { + /// Returns the current storage access stats. + pub fn stats(&self) -> StorageViewStats { + StorageViewStats { cache_size: self.cache_size(), - ..self.metrics + ..self.stats } } @@ -155,7 +182,7 @@ impl StorageView { impl ReadStorage for StorageView { fn read_value(&mut self, key: &StorageKey) -> StorageValue { let started_at = Instant::now(); - self.metrics.get_value_storage_invocations += 1; + self.stats.get_value_storage_invocations += 1; let value = self.get_value_no_log(key); tracing::trace!( @@ -166,7 +193,7 @@ impl ReadStorage for StorageView { key.key() ); - self.metrics.time_spent_on_get_value += started_at.elapsed(); + self.stats.time_spent_on_get_value += started_at.elapsed(); value } @@ -198,7 +225,7 @@ impl WriteStorage for StorageView { fn set_value(&mut self, key: StorageKey, value: StorageValue) -> StorageValue { let started_at = Instant::now(); - self.metrics.set_value_storage_invocations += 1; + self.stats.set_value_storage_invocations += 1; let original = self.get_value_no_log(&key); tracing::trace!( @@ -210,7 +237,7 @@ impl WriteStorage for StorageView { key.key() ); self.modified_storage_keys.insert(key, value); - self.metrics.time_spent_on_set_value += started_at.elapsed(); + self.stats.time_spent_on_set_value += started_at.elapsed(); original } @@ -220,7 +247,7 @@ impl WriteStorage for StorageView { } fn missed_storage_invocations(&self) -> usize { - self.metrics.storage_invocations_missed + self.stats.storage_invocations_missed } } @@ -245,8 +272,8 @@ impl ReadStorage for ImmutableStorageView { cached_value.copied().unwrap_or_else(|| { let value = this.storage_handle.read_value(key); this.cache.read_storage_keys.insert(*key, value); - this.metrics.time_spent_on_storage_missed += started_at.elapsed(); - this.metrics.storage_invocations_missed += 1; + this.stats.time_spent_on_storage_missed += started_at.elapsed(); + this.stats.storage_invocations_missed += 1; value }) } @@ -289,7 +316,7 @@ mod test { assert_eq!(storage_view.read_value(&key), value); assert!(storage_view.is_write_initial(&key)); // key was inserted during the view lifetime - assert_eq!(storage_view.metrics().storage_invocations_missed, 1); + assert_eq!(storage_view.stats().storage_invocations_missed, 1); // ^ We should only read a value at `key` once, and then used the cached value. raw_storage.set_value(key, value); @@ -307,10 +334,10 @@ mod test { assert_eq!(storage_view.read_value(&new_key), new_value); assert!(storage_view.is_write_initial(&new_key)); - let metrics = storage_view.metrics(); - assert_eq!(metrics.storage_invocations_missed, 2); - assert_eq!(metrics.get_value_storage_invocations, 3); - assert_eq!(metrics.set_value_storage_invocations, 2); + let stats = storage_view.stats(); + assert_eq!(stats.storage_invocations_missed, 2); + assert_eq!(stats.get_value_storage_invocations, 3); + assert_eq!(stats.set_value_storage_invocations, 2); } #[test] diff --git a/core/node/vm_runner/src/impls/bwip.rs b/core/node/vm_runner/src/impls/bwip.rs index f23f63533ff5..6c2933635b4c 100644 --- a/core/node/vm_runner/src/impls/bwip.rs +++ b/core/node/vm_runner/src/impls/bwip.rs @@ -52,9 +52,9 @@ impl BasicWitnessInputProducer { ConcurrentOutputHandlerFactory::new(pool.clone(), io.clone(), output_handler_factory); let vm_runner = VmRunner::new( pool, - Box::new(io), + Arc::new(io), Arc::new(loader), - Box::new(output_handler_factory), + Arc::new(output_handler_factory), batch_executor_factory, ); Ok(( @@ -168,7 +168,7 @@ impl OutputHandler for BasicWitnessInputProducerOutputHandler { )] async fn handle_l1_batch(self: Box, output: Arc) -> anyhow::Result<()> { let l1_batch_number = self.l1_batch_number; - let mut connection = self.pool.connection().await?; + let mut connection = self.pool.connection_tagged("bwip").await?; tracing::info!(%l1_batch_number, "Started saving VM run data"); @@ -381,7 +381,7 @@ struct BasicWitnessInputProducerOutputHandlerFactory { #[async_trait] impl OutputHandlerFactory for BasicWitnessInputProducerOutputHandlerFactory { async fn create_handler( - &mut self, + &self, system_env: SystemEnv, l1_batch_env: L1BatchEnv, ) -> anyhow::Result> { diff --git a/core/node/vm_runner/src/impls/playground.rs b/core/node/vm_runner/src/impls/playground.rs index 091fa15fc953..dc21d5a32036 100644 --- a/core/node/vm_runner/src/impls/playground.rs +++ b/core/node/vm_runner/src/impls/playground.rs @@ -129,6 +129,7 @@ impl VmPlayground { let mut batch_executor_factory = MainBatchExecutorFactory::new(false, false); batch_executor_factory.set_fast_vm_mode(vm_mode); + batch_executor_factory.observe_storage_metrics(); let io = VmPlaygroundIo { cursor_file_path, @@ -246,9 +247,9 @@ impl VmPlayground { }; let vm_runner = VmRunner::new( self.pool, - Box::new(self.io), + Arc::new(self.io), loader, - Box::new(self.output_handler_factory), + Arc::new(self.output_handler_factory), Box::new(self.batch_executor_factory), ); vm_runner.run(&stop_receiver).await @@ -412,7 +413,7 @@ impl OutputHandler for VmPlaygroundOutputHandler { #[async_trait] impl OutputHandlerFactory for VmPlaygroundOutputHandler { async fn create_handler( - &mut self, + &self, _system_env: SystemEnv, _l1_batch_env: L1BatchEnv, ) -> anyhow::Result> { diff --git a/core/node/vm_runner/src/impls/protective_reads.rs b/core/node/vm_runner/src/impls/protective_reads.rs index b620675b78e2..b1aff9fe3825 100644 --- a/core/node/vm_runner/src/impls/protective_reads.rs +++ b/core/node/vm_runner/src/impls/protective_reads.rs @@ -41,9 +41,9 @@ impl ProtectiveReadsWriter { let batch_processor = MainBatchExecutorFactory::new(false, false); let vm_runner = VmRunner::new( pool, - Box::new(io), + Arc::new(io), Arc::new(loader), - Box::new(output_handler_factory), + Arc::new(output_handler_factory), Box::new(batch_processor), ); Ok(( @@ -219,7 +219,7 @@ struct ProtectiveReadsOutputHandlerFactory { #[async_trait] impl OutputHandlerFactory for ProtectiveReadsOutputHandlerFactory { async fn create_handler( - &mut self, + &self, _system_env: SystemEnv, l1_batch_env: L1BatchEnv, ) -> anyhow::Result> { diff --git a/core/node/vm_runner/src/io.rs b/core/node/vm_runner/src/io.rs index 2e118f6cfd13..6d758f816f8a 100644 --- a/core/node/vm_runner/src/io.rs +++ b/core/node/vm_runner/src/io.rs @@ -1,4 +1,4 @@ -use std::fmt::Debug; +use std::{fmt::Debug, sync::Arc}; use async_trait::async_trait; use zksync_dal::{Connection, Core}; @@ -31,8 +31,9 @@ pub trait VmRunnerIo: Debug + Send + Sync + 'static { conn: &mut Connection<'_, Core>, ) -> anyhow::Result; - /// Marks the specified batch as being in progress. Must be called before a batch can be marked - /// as completed. + /// Marks the specified batch as being in progress. Will be called at least once before a batch can be marked + /// as completed; can be called multiple times in case of a crash. The order in which this method is called + /// is not specified; i.e., it is **not** guaranteed to be called sequentially. /// /// # Errors /// @@ -44,7 +45,8 @@ pub trait VmRunnerIo: Debug + Send + Sync + 'static { ) -> anyhow::Result<()>; /// Marks the specified batch as the latest completed batch. All earlier batches are considered - /// to be completed too. No guarantees about later batches. + /// to be completed too. No guarantees about later batches. This method is guaranteed to be called + /// with monotonically increasing batch numbers. /// /// # Errors /// @@ -55,3 +57,44 @@ pub trait VmRunnerIo: Debug + Send + Sync + 'static { l1_batch_number: L1BatchNumber, ) -> anyhow::Result<()>; } + +#[async_trait] +impl VmRunnerIo for Arc { + fn name(&self) -> &'static str { + (**self).name() + } + + async fn latest_processed_batch( + &self, + conn: &mut Connection<'_, Core>, + ) -> anyhow::Result { + (**self).latest_processed_batch(conn).await + } + + async fn last_ready_to_be_loaded_batch( + &self, + conn: &mut Connection<'_, Core>, + ) -> anyhow::Result { + (**self).last_ready_to_be_loaded_batch(conn).await + } + + async fn mark_l1_batch_as_processing( + &self, + conn: &mut Connection<'_, Core>, + l1_batch_number: L1BatchNumber, + ) -> anyhow::Result<()> { + (**self) + .mark_l1_batch_as_processing(conn, l1_batch_number) + .await + } + + async fn mark_l1_batch_as_completed( + &self, + conn: &mut Connection<'_, Core>, + l1_batch_number: L1BatchNumber, + ) -> anyhow::Result<()> { + (**self) + .mark_l1_batch_as_completed(conn, l1_batch_number) + .await + } +} diff --git a/core/node/vm_runner/src/metrics.rs b/core/node/vm_runner/src/metrics.rs index 4252ad5f0d4f..cc588fd02630 100644 --- a/core/node/vm_runner/src/metrics.rs +++ b/core/node/vm_runner/src/metrics.rs @@ -2,7 +2,28 @@ use std::time::Duration; -use vise::{Buckets, Gauge, Histogram, Metrics}; +use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Gauge, Histogram, Metrics, Unit}; +use zksync_state::OwnedStorage; + +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] +#[metrics(label = "storage", rename_all = "snake_case")] +pub(super) enum StorageKind { + Postgres, + Snapshot, + Rocksdb, + Unknown, +} + +impl StorageKind { + pub fn new(storage: &OwnedStorage) -> Self { + match storage { + OwnedStorage::Rocksdb(_) | OwnedStorage::RocksdbWithMemory(_) => Self::Rocksdb, + OwnedStorage::Postgres(_) => Self::Postgres, + OwnedStorage::Snapshot(_) => Self::Snapshot, + OwnedStorage::Boxed(_) => Self::Unknown, + } + } +} #[derive(Debug, Metrics)] #[metrics(prefix = "vm_runner")] @@ -16,6 +37,9 @@ pub(super) struct VmRunnerMetrics { /// Total latency of loading an L1 batch (RocksDB mode only). #[metrics(buckets = Buckets::LATENCIES)] pub storage_load_time: Histogram, + /// Latency of loading data and storage for a batch, grouped by the storage kind. + #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds)] + pub data_and_storage_latency: Family>, /// Total latency of running VM on an L1 batch. #[metrics(buckets = Buckets::LATENCIES)] pub run_vm_time: Histogram, diff --git a/core/node/vm_runner/src/output_handler.rs b/core/node/vm_runner/src/output_handler.rs index 25eae5e36845..7a8d1e41e529 100644 --- a/core/node/vm_runner/src/output_handler.rs +++ b/core/node/vm_runner/src/output_handler.rs @@ -61,7 +61,7 @@ pub trait OutputHandler: fmt::Debug + Send { /// simultaneously. Implementing this trait signifies that this property is held for the data the /// implementation is responsible for. #[async_trait] -pub trait OutputHandlerFactory: fmt::Debug + Send { +pub trait OutputHandlerFactory: fmt::Debug + Send + Sync { /// Creates a [`StateKeeperOutputHandler`] implementation for the provided L1 batch. Only /// supposed to be used for the L1 batch data it was created against. Using it for anything else /// will lead to errors. @@ -70,7 +70,7 @@ pub trait OutputHandlerFactory: fmt::Debug + Send { /// /// Propagates DB errors. async fn create_handler( - &mut self, + &self, system_env: SystemEnv, l1_batch_env: L1BatchEnv, ) -> anyhow::Result>; @@ -139,7 +139,7 @@ impl OutputHandlerFactory for ConcurrentOutputHandlerFactory { async fn create_handler( - &mut self, + &self, system_env: SystemEnv, l1_batch_env: L1BatchEnv, ) -> anyhow::Result> { diff --git a/core/node/vm_runner/src/process.rs b/core/node/vm_runner/src/process.rs index e2a678ccdce4..4f7ac1f97284 100644 --- a/core/node/vm_runner/src/process.rs +++ b/core/node/vm_runner/src/process.rs @@ -1,20 +1,26 @@ -use std::{sync::Arc, time::Duration}; +use std::{ + sync::Arc, + time::{Duration, Instant}, +}; use anyhow::Context; -use tokio::{sync::watch, task::JoinHandle}; +use tokio::{ + sync::{watch, Mutex}, + task::JoinHandle, +}; use zksync_dal::{ConnectionPool, Core}; use zksync_state::OwnedStorage; -use zksync_types::{block::L2BlockExecutionData, L1BatchNumber}; -use zksync_vm_interface::{ - executor::{BatchExecutor, BatchExecutorFactory}, - L2BlockEnv, -}; +use zksync_types::L1BatchNumber; +use zksync_vm_interface::{executor::BatchExecutorFactory, L2BlockEnv}; use crate::{ - metrics::METRICS, output_handler::OutputHandler, storage::StorageLoader, L1BatchOutput, - L2BlockOutput, OutputHandlerFactory, VmRunnerIo, + metrics::{StorageKind, METRICS}, + storage::StorageLoader, + L1BatchOutput, L2BlockOutput, OutputHandlerFactory, VmRunnerIo, }; +const SLEEP_INTERVAL: Duration = Duration::from_millis(50); + /// VM runner represents a logic layer of L1 batch / L2 block processing flow akin to that of state /// keeper. The difference is that VM runner is designed to be run on batches/blocks that have /// already been processed by state keeper but still require some extra handling as regulated by @@ -26,13 +32,13 @@ use crate::{ /// /// You can think of VM runner as a concurrent processor of a continuous stream of newly committed /// batches/blocks. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct VmRunner { pool: ConnectionPool, - io: Box, + io: Arc, loader: Arc, - output_handler_factory: Box, - batch_executor_factory: Box>, + output_handler_factory: Arc, + batch_executor_factory: Arc>>>, } impl VmRunner { @@ -44,9 +50,9 @@ impl VmRunner { /// an underlying implementation of [`OutputHandlerFactory`]. pub fn new( pool: ConnectionPool, - io: Box, + io: Arc, loader: Arc, - output_handler_factory: Box, + output_handler_factory: Arc, batch_executor_factory: Box>, ) -> Self { Self { @@ -54,17 +60,42 @@ impl VmRunner { io, loader, output_handler_factory, - batch_executor_factory, + batch_executor_factory: Arc::new(Mutex::new(batch_executor_factory)), } } - async fn process_batch( - mut batch_executor: Box>, - l2_blocks: Vec, - mut output_handler: Box, - ) -> anyhow::Result<()> { + async fn process_batch(self, number: L1BatchNumber) -> anyhow::Result<()> { + let stage_started_at = Instant::now(); + let (batch_data, storage) = loop { + match self.loader.load_batch(number).await? { + Some(data_and_storage) => break data_and_storage, + None => { + // Next batch has not been loaded yet + tokio::time::sleep(SLEEP_INTERVAL).await; + } + } + }; + let kind = StorageKind::new(&storage); + METRICS.data_and_storage_latency[&kind].observe(stage_started_at.elapsed()); + + let mut batch_executor = self.batch_executor_factory.lock().await.init_batch( + storage, + batch_data.l1_batch_env.clone(), + batch_data.system_env.clone(), + ); + let mut output_handler = self + .output_handler_factory + .create_handler(batch_data.system_env, batch_data.l1_batch_env) + .await?; + self.io + .mark_l1_batch_as_processing( + &mut self.pool.connection_tagged("vm_runner").await?, + number, + ) + .await?; + let latency = METRICS.run_vm_time.start(); - for (i, l2_block) in l2_blocks.into_iter().enumerate() { + for (i, l2_block) in batch_data.l2_blocks.into_iter().enumerate() { let block_env = L2BlockEnv::from_l2_block_data(&l2_block); if i > 0 { // First L2 block in every batch is already preloaded @@ -112,14 +143,12 @@ impl VmRunner { /// Consumes VM runner to execute a loop that continuously pulls data from [`VmRunnerIo`] and /// processes it. - pub async fn run(mut self, stop_receiver: &watch::Receiver) -> anyhow::Result<()> { - const SLEEP_INTERVAL: Duration = Duration::from_millis(50); - + pub async fn run(self, stop_receiver: &watch::Receiver) -> anyhow::Result<()> { // Join handles for asynchronous tasks that are being run in the background let mut task_handles: Vec<(L1BatchNumber, JoinHandle>)> = Vec::new(); let mut next_batch = self .io - .latest_processed_batch(&mut self.pool.connection().await?) + .latest_processed_batch(&mut self.pool.connection_tagged("vm_runner").await?) .await? + 1; loop { @@ -148,7 +177,7 @@ impl VmRunner { let last_ready_batch = self .io - .last_ready_to_be_loaded_batch(&mut self.pool.connection().await?) + .last_ready_to_be_loaded_batch(&mut self.pool.connection_tagged("vm_runner").await?) .await?; METRICS.last_ready_batch.set(last_ready_batch.0.into()); if next_batch > last_ready_batch { @@ -156,31 +185,8 @@ impl VmRunner { tokio::time::sleep(SLEEP_INTERVAL).await; continue; } - let Some((batch_data, storage)) = self.loader.load_batch(next_batch).await? else { - // Next batch has not been loaded yet - tokio::time::sleep(SLEEP_INTERVAL).await; - continue; - }; - let batch_executor = self.batch_executor_factory.init_batch( - storage, - batch_data.l1_batch_env.clone(), - batch_data.system_env.clone(), - ); - let output_handler = self - .output_handler_factory - .create_handler(batch_data.system_env, batch_data.l1_batch_env) - .await?; - - self.io - .mark_l1_batch_as_processing(&mut self.pool.connection().await?, next_batch) - .await?; - let handle = tokio::task::spawn(Self::process_batch( - batch_executor, - batch_data.l2_blocks, - output_handler, - )); + let handle = tokio::spawn(self.clone().process_batch(next_batch)); task_handles.push((next_batch, handle)); - next_batch += 1; } } diff --git a/core/node/vm_runner/src/storage.rs b/core/node/vm_runner/src/storage.rs index baee426007c5..cd746e4e1d97 100644 --- a/core/node/vm_runner/src/storage.rs +++ b/core/node/vm_runner/src/storage.rs @@ -49,7 +49,7 @@ pub(crate) struct PostgresLoader { impl PostgresLoader { pub async fn new(pool: ConnectionPool, chain_id: L2ChainId) -> anyhow::Result { let mut l1_batch_params_provider = L1BatchParamsProvider::new(); - let mut conn = pool.connection().await?; + let mut conn = pool.connection_tagged("vm_runner").await?; l1_batch_params_provider.initialize(&mut conn).await?; Ok(Self { pool, @@ -72,7 +72,7 @@ impl StorageLoader for PostgresLoader { &self, l1_batch_number: L1BatchNumber, ) -> anyhow::Result> { - let mut conn = self.pool.connection().await?; + let mut conn = self.pool.connection_tagged("vm_runner").await?; let Some(data) = load_batch_execute_data( &mut conn, l1_batch_number, @@ -86,7 +86,7 @@ impl StorageLoader for PostgresLoader { if let Some(snapshot) = OwnedStorage::snapshot(&mut conn, l1_batch_number).await? { let postgres = OwnedStorage::postgres(conn, l1_batch_number - 1).await?; - let storage = snapshot.with_fallback(postgres, self.shadow_snapshots); + let storage = snapshot.with_fallback(postgres.into(), self.shadow_snapshots); let storage = OwnedStorage::from(storage); return Ok(Some((data, storage))); } @@ -94,7 +94,7 @@ impl StorageLoader for PostgresLoader { tracing::info!( "Incomplete data to create storage snapshot for batch; will use sequential storage" ); - let conn = self.pool.connection().await?; + let conn = self.pool.connection_tagged("vm_runner").await?; let storage = OwnedStorage::postgres(conn, l1_batch_number - 1).await?; Ok(Some((data, storage.into()))) } diff --git a/core/node/vm_runner/src/tests/mod.rs b/core/node/vm_runner/src/tests/mod.rs index 530016408140..9fe9e99e92c8 100644 --- a/core/node/vm_runner/src/tests/mod.rs +++ b/core/node/vm_runner/src/tests/mod.rs @@ -39,7 +39,7 @@ struct IoMock { } #[async_trait] -impl VmRunnerIo for Arc> { +impl VmRunnerIo for RwLock { fn name(&self) -> &'static str { "io_mock" } @@ -153,7 +153,7 @@ struct TestOutputFactory { #[async_trait] impl OutputHandlerFactory for TestOutputFactory { async fn create_handler( - &mut self, + &self, _system_env: SystemEnv, l1_batch_env: L1BatchEnv, ) -> anyhow::Result> { diff --git a/core/node/vm_runner/src/tests/process.rs b/core/node/vm_runner/src/tests/process.rs index fec3fd2ba60a..115410ce8fbd 100644 --- a/core/node/vm_runner/src/tests/process.rs +++ b/core/node/vm_runner/src/tests/process.rs @@ -57,9 +57,9 @@ async fn process_batches((batch_count, window): (u32, u32)) -> anyhow::Result<() let batch_executor = MainBatchExecutorFactory::new(false, false); let vm_runner = VmRunner::new( connection_pool, - Box::new(io.clone()), + io.clone(), storage, - Box::new(output_factory), + Arc::new(output_factory), Box::new(batch_executor), ); tokio::task::spawn(async move { vm_runner.run(&stop_receiver).await.unwrap() }); diff --git a/core/node/vm_runner/src/tests/storage_writer.rs b/core/node/vm_runner/src/tests/storage_writer.rs index 76d0867125a8..c377cf95b5a8 100644 --- a/core/node/vm_runner/src/tests/storage_writer.rs +++ b/core/node/vm_runner/src/tests/storage_writer.rs @@ -57,6 +57,8 @@ impl VmRunnerIo for StorageWriterIo { l1_batch_number: L1BatchNumber, ) -> anyhow::Result<()> { assert_eq!(l1_batch_number, self.batch() + 1); + // ^ The assertion works because of `last_ready_to_be_loaded_batch()` implementation; it wouldn't hold if we allowed + // to process multiple batches concurrently. Ok(()) } @@ -147,7 +149,7 @@ impl OutputHandler for StorageWriterIo { #[async_trait] impl OutputHandlerFactory for StorageWriterIo { async fn create_handler( - &mut self, + &self, _system_env: SystemEnv, l1_batch_env: L1BatchEnv, ) -> anyhow::Result> { @@ -167,7 +169,7 @@ pub(super) async fn write_storage_logs(pool: ConnectionPool, insert_protec .unwrap() .expect("No L1 batches in storage"); drop(conn); - let io = Box::new(StorageWriterIo { + let io = Arc::new(StorageWriterIo { last_processed_batch: Arc::new(watch::channel(L1BatchNumber(0)).0), last_processed_block: L2BlockNumber(0), pool: pool.clone(), @@ -240,9 +242,9 @@ async fn storage_writer_works(insert_protective_reads: bool) { let batch_executor = MainBatchExecutorFactory::new(false, false); let vm_runner = VmRunner::new( pool, - Box::new(io.clone()), + io.clone(), loader, - Box::new(output_factory), + Arc::new(output_factory), Box::new(batch_executor), ); From f985e41f10f61c524f5ce47d27e75f3c626ae2fc Mon Sep 17 00:00:00 2001 From: Manuel Mauro Date: Mon, 16 Sep 2024 12:13:29 +0200 Subject: [PATCH 075/116] fix(zk_toolbox): increase confirmations in testing (#2878) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Increase confirmations in testing. ## Why ❔ Should make CI workflows less flaky. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- infrastructure/zk/src/docker.ts | 6 ++-- zk_toolbox/crates/common/src/ethereum.rs | 45 +++++++++++------------- 2 files changed, 23 insertions(+), 28 deletions(-) diff --git a/infrastructure/zk/src/docker.ts b/infrastructure/zk/src/docker.ts index a100d1231da6..035061a8ed0d 100644 --- a/infrastructure/zk/src/docker.ts +++ b/infrastructure/zk/src/docker.ts @@ -1,4 +1,4 @@ -import {Command} from 'commander'; +import { Command } from 'commander'; import * as utils from 'utils'; const IMAGES = [ @@ -31,7 +31,7 @@ async function dockerCommand( dockerOrg: string = 'matterlabs' ) { // Generating all tags for containers. We need 2 tags here: SHA and SHA+TS - const {stdout: COMMIT_SHORT_SHA}: { stdout: string } = await utils.exec('git rev-parse --short HEAD'); + const { stdout: COMMIT_SHORT_SHA }: { stdout: string } = await utils.exec('git rev-parse --short HEAD'); // COMMIT_SHORT_SHA returns with newline, so we need to trim it const imageTagShaTS: string = process.env.IMAGE_TAG_SUFFIX ? process.env.IMAGE_TAG_SUFFIX @@ -126,7 +126,7 @@ async function _build(image: string, tagList: string[], dockerOrg: string, platf } buildArgs += extraArgs; - console.log("Build args: ", buildArgs); + console.log('Build args: ', buildArgs); const buildCommand = `DOCKER_BUILDKIT=1 docker buildx build ${tagsToBuild}` + diff --git a/zk_toolbox/crates/common/src/ethereum.rs b/zk_toolbox/crates/common/src/ethereum.rs index 93cc524568c3..4f000ed0fd53 100644 --- a/zk_toolbox/crates/common/src/ethereum.rs +++ b/zk_toolbox/crates/common/src/ethereum.rs @@ -10,7 +10,7 @@ use ethers::{ }; use types::TokenInfo; -use crate::{logger, wallets::Wallet}; +use crate::wallets::Wallet; pub fn create_ethers_client( private_key: H256, @@ -89,35 +89,30 @@ pub async fn mint_token( chain_id: u64, amount: u128, ) -> anyhow::Result<()> { - let client = Arc::new(create_ethers_client( - main_wallet.private_key.unwrap(), - l1_rpc, - Some(chain_id), - )?); + let client = Arc::new( + create_ethers_client(main_wallet.private_key.unwrap(), l1_rpc, Some(chain_id))? + .nonce_manager(main_wallet.address), + ); let contract = TokenContract::new(token_address, client); - // contract + + let mut pending_calls = vec![]; for address in addresses { - if let Err(err) = mint(&contract, address, amount).await { - logger::warn(format!("Failed to mint {err}")) - } + pending_calls.push(contract.mint(address, amount.into())); } - Ok(()) -} + let mut pending_txs = vec![]; + for call in &pending_calls { + pending_txs.push( + call.send() + .await? + // It's safe to set such low number of confirmations and low interval for localhost + .confirmations(3) + .interval(Duration::from_millis(30)), + ); + } + + futures::future::join_all(pending_txs).await; -async fn mint( - contract: &TokenContract, - address: Address, - amount: u128, -) -> anyhow::Result<()> { - contract - .mint(address, amount.into()) - .send() - .await? - // It's safe to set such low number of confirmations and low interval for localhost - .confirmations(1) - .interval(Duration::from_millis(30)) - .await?; Ok(()) } From 438c8209ea7cb669e35fc7c24c3b2d66909231f3 Mon Sep 17 00:00:00 2001 From: Igor Aleksanov Date: Mon, 16 Sep 2024 16:10:55 +0400 Subject: [PATCH 076/116] feat(zk_toolbox): Verbose version message (#2884) Modifies the version message to include stuff like git commit, branch, build timestamp, and submodules versions (non-recursive). Sample output: ``` $ zki --version zk_inception v0.1.0-c03229138 Branch: popzxc-version-with-git-commit Submodules: - contracts: 3a1b5d4b94ffb00f03d436a7db7e48589eb74d39 Build timestamp: 2024-09-16 12:07:09 ``` --- zk_toolbox/Cargo.lock | 8 ++ zk_toolbox/Cargo.toml | 3 + zk_toolbox/crates/common/Cargo.toml | 1 + zk_toolbox/crates/common/src/lib.rs | 1 + zk_toolbox/crates/common/src/version.rs | 24 ++++++ .../crates/git_version_macro/Cargo.toml | 17 ++++ .../crates/git_version_macro/src/lib.rs | 81 +++++++++++++++++++ zk_toolbox/crates/zk_inception/src/main.rs | 11 ++- zk_toolbox/crates/zk_supervisor/src/main.rs | 12 ++- 9 files changed, 153 insertions(+), 5 deletions(-) create mode 100644 zk_toolbox/crates/common/src/version.rs create mode 100644 zk_toolbox/crates/git_version_macro/Cargo.toml create mode 100644 zk_toolbox/crates/git_version_macro/src/lib.rs diff --git a/zk_toolbox/Cargo.lock b/zk_toolbox/Cargo.lock index 291c24dbf846..91c379d6443e 100644 --- a/zk_toolbox/Cargo.lock +++ b/zk_toolbox/Cargo.lock @@ -684,6 +684,7 @@ dependencies = [ "console", "ethers", "futures", + "git_version_macro", "once_cell", "serde", "serde_json", @@ -1962,6 +1963,13 @@ version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd" +[[package]] +name = "git_version_macro" +version = "0.1.0" +dependencies = [ + "chrono", +] + [[package]] name = "glob" version = "0.3.1" diff --git a/zk_toolbox/Cargo.toml b/zk_toolbox/Cargo.toml index 33309872ea3b..875db3794d41 100644 --- a/zk_toolbox/Cargo.toml +++ b/zk_toolbox/Cargo.toml @@ -5,6 +5,7 @@ members = [ "crates/types", "crates/zk_inception", "crates/zk_supervisor", + "crates/git_version_macro", ] resolver = "2" @@ -25,6 +26,7 @@ keywords = ["zk", "cryptography", "blockchain", "ZKStack", "ZKsync"] common = { path = "crates/common" } config = { path = "crates/config" } types = { path = "crates/types" } +git_version_macro = { path = "crates/git_version_macro" } # ZkSync deps zksync_config = { path = "../core/lib/config" } @@ -37,6 +39,7 @@ zksync_protobuf = "=0.1.1" # External dependencies anyhow = "1.0.82" clap = { version = "4.4", features = ["derive", "wrap_help", "string"] } +chrono = "0.4" slugify-rs = "0.0.3" cliclack = "0.2.5" console = "0.15.8" diff --git a/zk_toolbox/crates/common/Cargo.toml b/zk_toolbox/crates/common/Cargo.toml index 1f6fb6fd9fe1..5fdf481bea6f 100644 --- a/zk_toolbox/crates/common/Cargo.toml +++ b/zk_toolbox/crates/common/Cargo.toml @@ -29,3 +29,4 @@ url.workspace = true xshell.workspace = true thiserror.workspace = true strum.workspace = true +git_version_macro.workspace = true diff --git a/zk_toolbox/crates/common/src/lib.rs b/zk_toolbox/crates/common/src/lib.rs index 7be4af740700..b4495d555ec0 100644 --- a/zk_toolbox/crates/common/src/lib.rs +++ b/zk_toolbox/crates/common/src/lib.rs @@ -12,6 +12,7 @@ pub mod files; pub mod forge; pub mod git; pub mod server; +pub mod version; pub mod wallets; pub use prerequisites::{ diff --git a/zk_toolbox/crates/common/src/version.rs b/zk_toolbox/crates/common/src/version.rs new file mode 100644 index 000000000000..43be7a07b7ee --- /dev/null +++ b/zk_toolbox/crates/common/src/version.rs @@ -0,0 +1,24 @@ +const GIT_VERSION: &str = git_version_macro::build_git_revision!(); +const GIT_BRANCH: &str = git_version_macro::build_git_branch!(); +const GIT_SUBMODULES: &[(&str, &str)] = git_version_macro::build_git_submodules!(); +const BUILD_TIMESTAMP: &str = git_version_macro::build_timestamp!(); + +/// Returns a multi-line version message that includes: +/// - provided crate version +/// - git revision +/// - git branch +/// - git submodules +/// - build timestamp +pub fn version_message(crate_version: &str) -> String { + let mut version = format!("v{}-{}\n", crate_version, GIT_VERSION); + version.push_str(&format!("Branch: {}\n", GIT_BRANCH)); + #[allow(clippy::const_is_empty)] // Proc-macro generated. + if !GIT_SUBMODULES.is_empty() { + version.push_str("Submodules:\n"); + for (name, rev) in GIT_SUBMODULES { + version.push_str(&format!(" - {}: {}\n", name, rev)); + } + } + version.push_str(&format!("Build timestamp: {}\n", BUILD_TIMESTAMP)); + version +} diff --git a/zk_toolbox/crates/git_version_macro/Cargo.toml b/zk_toolbox/crates/git_version_macro/Cargo.toml new file mode 100644 index 000000000000..eb70b450a4cf --- /dev/null +++ b/zk_toolbox/crates/git_version_macro/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "git_version_macro" +edition = "2021" +description = "Procedural macro to generate metainformation about build in compile time" +version.workspace = true +homepage.workspace = true +license.workspace = true +authors.workspace = true +exclude.workspace = true +repository.workspace = true +keywords.workspace = true + +[lib] +proc-macro = true + +[dependencies] +chrono.workspace = true diff --git a/zk_toolbox/crates/git_version_macro/src/lib.rs b/zk_toolbox/crates/git_version_macro/src/lib.rs new file mode 100644 index 000000000000..34b83efce195 --- /dev/null +++ b/zk_toolbox/crates/git_version_macro/src/lib.rs @@ -0,0 +1,81 @@ +extern crate proc_macro; +use std::{process::Command, str::FromStr}; + +use proc_macro::TokenStream; + +/// Outputs the current date and time as a string literal. +/// Can be used to include the build timestamp in the binary. +#[proc_macro] +pub fn build_timestamp(_item: TokenStream) -> TokenStream { + let now = chrono::Local::now().format("%Y-%m-%d %H:%M:%S").to_string(); + encode_as_str(&now) +} + +/// Outputs the current git branch as a string literal. +#[proc_macro] +pub fn build_git_branch(_item: TokenStream) -> TokenStream { + let out = run_cmd("git", &["rev-parse", "--abbrev-ref", "HEAD"]); + encode_as_str(&out) +} + +/// Outputs the current git commit hash as a string literal. +#[proc_macro] +pub fn build_git_revision(_item: TokenStream) -> TokenStream { + let out = run_cmd("git", &["rev-parse", "--short", "HEAD"]); + encode_as_str(&out) +} + +/// Creates a slice of `&[(&str, &str)]` tuples that correspond to +/// the submodule name -> revision. +/// Results in an empty list if there are no submodules or if +/// the command fails. +#[proc_macro] +pub fn build_git_submodules(_item: TokenStream) -> TokenStream { + let Some(out) = run_cmd_opt("git", &["submodule", "status"]) else { + return TokenStream::from_str("&[]").unwrap(); + }; + let submodules = out + .lines() + .filter_map(|line| { + let parts: Vec<&str> = line.split_whitespace().collect(); + // Index 0 is commit hash, index 1 is the path to the folder, and there + // may be some metainformation after that. + if parts.len() >= 2 { + let folder_name = parts[1].split('/').last().unwrap_or(parts[1]); + Some((folder_name, parts[0])) + } else { + None + } + }) + .collect::>(); + let submodules = submodules + .iter() + .map(|(name, rev)| format!("(\"{}\", \"{}\")", name, rev)) + .collect::>() + .join(", "); + TokenStream::from_str(format!("&[{}]", submodules).as_str()) + .unwrap_or_else(|_| panic!("Unable to encode submodules: {}", submodules)) +} + +/// Tries to run the command, only returns `Some` if the command +/// succeeded and the output was valid utf8. +fn run_cmd(cmd: &str, args: &[&str]) -> String { + run_cmd_opt(cmd, args).unwrap_or("unknown".to_string()) +} + +fn run_cmd_opt(cmd: &str, args: &[&str]) -> Option { + let output = Command::new(cmd).args(args).output().ok()?; + if output.status.success() { + String::from_utf8(output.stdout) + .ok() + .map(|s| s.trim().to_string()) + } else { + None + } +} + +/// Encodes string as a literal. +fn encode_as_str(s: &str) -> TokenStream { + TokenStream::from_str(format!("\"{}\"", s).as_str()) + .unwrap_or_else(|_| panic!("Unable to encode string: {}", s)) +} diff --git a/zk_toolbox/crates/zk_inception/src/main.rs b/zk_toolbox/crates/zk_inception/src/main.rs index f6f7d83dede6..f1ca348df386 100644 --- a/zk_toolbox/crates/zk_inception/src/main.rs +++ b/zk_toolbox/crates/zk_inception/src/main.rs @@ -8,6 +8,7 @@ use common::{ config::{global_config, init_global_config, GlobalConfig}, error::log_error, init_prompt_theme, logger, + version::version_message, }; use config::EcosystemConfig; use xshell::Shell; @@ -26,7 +27,10 @@ mod messages; mod utils; #[derive(Parser, Debug)] -#[command(version, about)] +#[command( + version = version_message(env!("CARGO_PKG_VERSION")), + about +)] struct Inception { #[command(subcommand)] command: InceptionSubcommands, @@ -86,13 +90,16 @@ struct InceptionGlobalArgs { async fn main() -> anyhow::Result<()> { human_panic::setup_panic!(); + // We must parse arguments before printing the intro, because some autogenerated + // Clap commands (like `--version` would look odd otherwise). + let inception_args = Inception::parse(); + init_prompt_theme(); logger::new_empty_line(); logger::intro(); let shell = Shell::new().unwrap(); - let inception_args = Inception::parse(); init_global_config_inner(&shell, &inception_args.global)?; diff --git a/zk_toolbox/crates/zk_supervisor/src/main.rs b/zk_toolbox/crates/zk_supervisor/src/main.rs index 32aefa7fcad9..67a5179f4ecf 100644 --- a/zk_toolbox/crates/zk_supervisor/src/main.rs +++ b/zk_toolbox/crates/zk_supervisor/src/main.rs @@ -8,6 +8,7 @@ use common::{ config::{global_config, init_global_config, GlobalConfig}, error::log_error, init_prompt_theme, logger, + version::version_message, }; use config::EcosystemConfig; use messages::{ @@ -25,7 +26,10 @@ mod defaults; mod messages; #[derive(Parser, Debug)] -#[command(version, about)] +#[command( + version = version_message(env!("CARGO_PKG_VERSION")), + about +)] struct Supervisor { #[command(subcommand)] command: SupervisorSubcommands, @@ -73,14 +77,16 @@ struct SupervisorGlobalArgs { async fn main() -> anyhow::Result<()> { human_panic::setup_panic!(); + // We must parse arguments before printing the intro, because some autogenerated + // Clap commands (like `--version` would look odd otherwise). + let args = Supervisor::parse(); + init_prompt_theme(); logger::new_empty_line(); logger::intro(); let shell = Shell::new().unwrap(); - let args = Supervisor::parse(); - init_global_config_inner(&shell, &args.global)?; if !global_config().ignore_prerequisites { From f095b4a3223222ac712de53592fe1e68f766600f Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Mon, 16 Sep 2024 15:15:50 +0300 Subject: [PATCH 077/116] feat(en): Resume incomplete snapshot in snapshot creator in more cases (#2886) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Resumes an incomplete snapshot in the snapshot creator if the creator config doesn't specify an L1 batch. ## Why ❔ This effectively reverts the relevant changes from https://github.com/matter-labs/zksync-era/pull/2256. It makes the snapshot creator resilient by default without additional setup, at the cost of parallel creator jobs working incorrectly (unless they all specify L1 batches). ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- Cargo.lock | 1 + core/bin/snapshots_creator/Cargo.toml | 1 + core/bin/snapshots_creator/src/creator.rs | 42 +++++----- core/bin/snapshots_creator/src/tests.rs | 99 ++++++++++++++++++----- 4 files changed, 101 insertions(+), 42 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c47e5b77e391..38300269e80a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6851,6 +6851,7 @@ dependencies = [ "futures 0.3.30", "rand 0.8.5", "structopt", + "test-casing", "tokio", "tracing", "vise", diff --git a/core/bin/snapshots_creator/Cargo.toml b/core/bin/snapshots_creator/Cargo.toml index 530b9635cd4f..5a36c646e88e 100644 --- a/core/bin/snapshots_creator/Cargo.toml +++ b/core/bin/snapshots_creator/Cargo.toml @@ -29,3 +29,4 @@ futures.workspace = true [dev-dependencies] rand.workspace = true +test-casing.workspace = true diff --git a/core/bin/snapshots_creator/src/creator.rs b/core/bin/snapshots_creator/src/creator.rs index 18212a7d2055..29150cd6b698 100644 --- a/core/bin/snapshots_creator/src/creator.rs +++ b/core/bin/snapshots_creator/src/creator.rs @@ -291,25 +291,38 @@ impl SnapshotCreator { .get_sealed_l1_batch_number() .await?; let sealed_l1_batch_number = sealed_l1_batch_number.context("No L1 batches in Postgres")?; - let requested_l1_batch_number = if let Some(l1_batch_number) = config.l1_batch_number { + let (requested_l1_batch_number, existing_snapshot) = if let Some(l1_batch_number) = + config.l1_batch_number + { anyhow::ensure!( l1_batch_number <= sealed_l1_batch_number, "Requested a snapshot for L1 batch #{l1_batch_number} that doesn't exist in Postgres (latest L1 batch: {sealed_l1_batch_number})" ); - l1_batch_number + + let existing_snapshot = master_conn + .snapshots_dal() + .get_snapshot_metadata(l1_batch_number) + .await?; + (l1_batch_number, existing_snapshot) } else { // We subtract 1 so that after restore, EN node has at least one L1 batch to fetch. anyhow::ensure!( sealed_l1_batch_number != L1BatchNumber(0), "Cannot create snapshot when only the genesis L1 batch is present in Postgres" ); - sealed_l1_batch_number - 1 - }; + let requested_l1_batch_number = sealed_l1_batch_number - 1; - let existing_snapshot = master_conn - .snapshots_dal() - .get_snapshot_metadata(requested_l1_batch_number) - .await?; + // Continue creating a pending snapshot if it exists, even if it doesn't correspond to the latest L1 batch. + // OTOH, a completed snapshot does not matter, unless it corresponds to `requested_l1_batch_number` (in which case it doesn't need to be created again). + let existing_snapshot = master_conn + .snapshots_dal() + .get_newest_snapshot_metadata() + .await? + .filter(|snapshot| { + !snapshot.is_complete() || snapshot.l1_batch_number == requested_l1_batch_number + }); + (requested_l1_batch_number, existing_snapshot) + }; drop(master_conn); match existing_snapshot { @@ -317,18 +330,7 @@ impl SnapshotCreator { tracing::info!("Snapshot for the requested L1 batch is complete: {snapshot:?}"); Ok(None) } - Some(snapshot) if config.l1_batch_number.is_some() => { - Ok(Some(SnapshotProgress::from_existing_snapshot(&snapshot))) - } - Some(snapshot) => { - // Unless creating a snapshot for a specific L1 batch is requested, we never continue an existing snapshot, even if it's incomplete. - // This it to make running multiple snapshot creator instances in parallel easier to reason about. - tracing::warn!( - "Snapshot at expected L1 batch #{requested_l1_batch_number} exists, but is incomplete: {snapshot:?}. If you need to resume creating it, \ - specify the L1 batch number in the snapshot creator config" - ); - Ok(None) - } + Some(snapshot) => Ok(Some(SnapshotProgress::from_existing_snapshot(&snapshot))), None => { Self::initialize_snapshot_progress( config, diff --git a/core/bin/snapshots_creator/src/tests.rs b/core/bin/snapshots_creator/src/tests.rs index 990dd672975a..a440d836b4c9 100644 --- a/core/bin/snapshots_creator/src/tests.rs +++ b/core/bin/snapshots_creator/src/tests.rs @@ -10,6 +10,7 @@ use std::{ }; use rand::{thread_rng, Rng}; +use test_casing::test_casing; use zksync_config::SnapshotsCreatorConfig; use zksync_dal::{Connection, CoreDal}; use zksync_object_store::{MockObjectStore, ObjectStore}; @@ -64,6 +65,15 @@ impl HandleEvent for TestEventListener { } } +#[derive(Debug)] +struct UnreachableEventListener; + +impl HandleEvent for UnreachableEventListener { + fn on_chunk_started(&self) -> TestBehavior { + unreachable!("should not be reached"); + } +} + impl SnapshotCreator { fn for_tests(blob_store: Arc, pool: ConnectionPool) -> Self { Self { @@ -80,6 +90,13 @@ impl SnapshotCreator { ..self } } + + fn panic_on_chunk_start(self) -> Self { + Self { + event_listener: Box::new(UnreachableEventListener), + ..self + } + } } #[derive(Debug)] @@ -431,8 +448,9 @@ async fn persisting_snapshot_logs_for_v0_snapshot() { assert_eq!(actual_logs, expected_outputs.storage_logs); } +#[test_casing(2, [false, true])] #[tokio::test] -async fn recovery_workflow() { +async fn recovery_workflow(specify_batch_after_recovery: bool) { let pool = ConnectionPool::::test_pool().await; let mut rng = thread_rng(); let object_store = MockObjectStore::arc(); @@ -462,29 +480,9 @@ async fn recovery_workflow() { let actual_deps: HashSet<_> = factory_deps.into_iter().collect(); assert_eq!(actual_deps, expected_outputs.deps); - // Check that the creator does nothing unless it's requested to create a new snapshot. - SnapshotCreator::for_tests(object_store.clone(), pool.clone()) - .stop_after_chunk_count(2) - .run(SEQUENTIAL_TEST_CONFIG, MIN_CHUNK_COUNT) - .await - .unwrap(); - let snapshot_metadata = conn - .snapshots_dal() - .get_snapshot_metadata(snapshot_l1_batch_number) - .await - .unwrap() - .expect("No snapshot metadata"); - assert!( - snapshot_metadata - .storage_logs_filepaths - .iter() - .all(Option::is_none), - "{snapshot_metadata:?}" - ); - // Process 2 storage log chunks, then stop. let recovery_config = SnapshotsCreatorConfig { - l1_batch_number: Some(snapshot_l1_batch_number), + l1_batch_number: specify_batch_after_recovery.then_some(snapshot_l1_batch_number), ..SEQUENTIAL_TEST_CONFIG }; SnapshotCreator::for_tests(object_store.clone(), pool.clone()) @@ -510,11 +508,68 @@ async fn recovery_workflow() { // Process the remaining chunks. SnapshotCreator::for_tests(object_store.clone(), pool.clone()) + .run(recovery_config.clone(), MIN_CHUNK_COUNT) + .await + .unwrap(); + + assert_storage_logs(&*object_store, snapshot_l1_batch_number, &expected_outputs).await; + + // Check that the snapshot is not created anew after it is completed. + SnapshotCreator::for_tests(object_store.clone(), pool.clone()) + .panic_on_chunk_start() .run(recovery_config, MIN_CHUNK_COUNT) .await .unwrap(); + let snapshot_metadata = conn + .snapshots_dal() + .get_snapshot_metadata(snapshot_l1_batch_number) + .await + .unwrap() + .expect("No snapshot metadata"); + assert!(snapshot_metadata.is_complete(), "{snapshot_metadata:#?}"); +} + +#[tokio::test] +async fn recovery_workflow_with_new_l1_batch() { + let pool = ConnectionPool::::test_pool().await; + let mut rng = thread_rng(); + let object_store = MockObjectStore::arc(); + let mut conn = pool.connection().await.unwrap(); + let expected_outputs = prepare_postgres(&mut rng, &mut conn, 10).await; + + SnapshotCreator::for_tests(object_store.clone(), pool.clone()) + .stop_after_chunk_count(2) + .run(SEQUENTIAL_TEST_CONFIG, MIN_CHUNK_COUNT) + .await + .unwrap(); + + let snapshot_l1_batch_number = L1BatchNumber(8); + let snapshot_metadata = conn + .snapshots_dal() + .get_snapshot_metadata(snapshot_l1_batch_number) + .await + .unwrap() + .expect("No snapshot metadata"); + assert!(!snapshot_metadata.is_complete(), "{snapshot_metadata:#?}"); + + let new_logs = gen_storage_logs(&mut thread_rng(), 50); + create_l1_batch(&mut conn, snapshot_l1_batch_number + 2, &new_logs).await; + + // The old snapshot should be completed. + SnapshotCreator::for_tests(object_store.clone(), pool.clone()) + .run(SEQUENTIAL_TEST_CONFIG, MIN_CHUNK_COUNT) + .await + .unwrap(); assert_storage_logs(&*object_store, snapshot_l1_batch_number, &expected_outputs).await; + + let snapshot_metadata = conn + .snapshots_dal() + .get_snapshot_metadata(snapshot_l1_batch_number) + .await + .unwrap() + .expect("No snapshot metadata"); + assert!(snapshot_metadata.is_complete(), "{snapshot_metadata:#?}"); } #[tokio::test] From af614fe28554db61ec9a79ba423b926dd5b070fb Mon Sep 17 00:00:00 2001 From: Artem Makhortov <13339874+artmakh@users.noreply.github.com> Date: Mon, 16 Sep 2024 20:07:09 +0700 Subject: [PATCH 078/116] feat(build-base): Add sccache to build-base Dockerfile (#2889) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Adding installation of sccache to build-base Dockerfile ## Why ❔ To speed-up docker builds ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- docker/build-base/Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/docker/build-base/Dockerfile b/docker/build-base/Dockerfile index be3c6ddb452e..16ea566cef70 100644 --- a/docker/build-base/Dockerfile +++ b/docker/build-base/Dockerfile @@ -13,3 +13,4 @@ RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ rustup default nightly-2024-08-01 RUN cargo install sqlx-cli --version 0.8.0 +RUN cargo install sccache --version 0.8.1 From 9d40704bc9ecb5b884f33ca59438a3a724cefb18 Mon Sep 17 00:00:00 2001 From: Igor Aleksanov Date: Mon, 16 Sep 2024 17:17:07 +0400 Subject: [PATCH 079/116] refactor(selector-generator): Polish the code (#2888) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ - Changes procedural approach to more idiomatic for rust (introduces `Selectors` + `App` structures). `Selectors` are not FS aware, `App` is. - Introduces more strong typing -- when both selectors and function names are `String`, it's easy to misuse them (I did several times, while working on this PR). - Removes manual selector calculation and uses `ethabi` for that. - Makes the code async. - Adds docs for CLI (e.g. to see with `--help` argument). - Adds a test for `Selectors`. - Reduces number of panics and uses `anyhow` to propagate errors. - Adds new selectors (I guess I have more contracts compiled locally). ## Why ❔ Maintainability and testability, mostly. With the refactoring, it should be easier to add new functionality and tests for it. Also, the runtime has improved. Before ``` $ time cargo run --bin selector_generator -- contracts etc/selector-generator-data/selectors.json Compiling selector_generator v0.1.0 (/home/popzxc/workspace/current/zksync-era/core/bin/selector_generator) Finished `dev` profile [unoptimized + debuginfo] target(s) in 0.54s Running `target/debug/selector_generator contracts etc/selector-generator-data/selectors.json` Analyzed 401 files. Added 452 selectors (before: 516 after: 968) cargo run --bin selector_generator -- contracts 4.48s user 7.02s system 100% cpu 11.486 total ``` After ``` $ time cargo run --bin selector_generator -- contracts etc/selector-generator-data/selectors.json Finished `dev` profile [unoptimized + debuginfo] target(s) in 0.20s Running `target/debug/selector_generator contracts etc/selector-generator-data/selectors.json` Analyzed 1087 files. Added 0 selectors (before: 1023 after: 1023) cargo run --bin selector_generator -- contracts 1.24s user 0.12s system 100% cpu 1.360 total ``` ## Next steps I would love to see `--check` mode introduced (which checks that 0 new entries were added) and this tool is added to CI, so that we continously run it and prevent from rotting. @mm-zk leaving it to you. --- Cargo.lock | 5 +- core/bin/selector_generator/Cargo.toml | 7 +- core/bin/selector_generator/src/app.rs | 105 ++ core/bin/selector_generator/src/main.rs | 116 +- core/bin/selector_generator/src/selectors.rs | 118 ++ etc/selector-generator-data/selectors.json | 1465 ++++++++++++------ 6 files changed, 1240 insertions(+), 576 deletions(-) create mode 100644 core/bin/selector_generator/src/app.rs create mode 100644 core/bin/selector_generator/src/selectors.rs diff --git a/Cargo.lock b/Cargo.lock index 38300269e80a..2aaf875a2f49 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -6281,11 +6281,14 @@ dependencies = [ name = "selector_generator" version = "0.1.0" dependencies = [ + "anyhow", "clap 4.4.6", + "ethabi", "glob", + "hex", "serde", "serde_json", - "sha3 0.10.8", + "tokio", ] [[package]] diff --git a/core/bin/selector_generator/Cargo.toml b/core/bin/selector_generator/Cargo.toml index e0b0afe0ae2c..b3425c11b4ec 100644 --- a/core/bin/selector_generator/Cargo.toml +++ b/core/bin/selector_generator/Cargo.toml @@ -11,8 +11,11 @@ categories.workspace = true publish = false [dependencies] +anyhow.workspace = true serde = { workspace = true, features = ["derive"] } serde_json.workspace = true -sha3.workspace = true glob.workspace = true -clap = { workspace = true, features = ["derive"] } \ No newline at end of file +clap = { workspace = true, features = ["derive"] } +ethabi.workspace = true +hex.workspace = true +tokio = { workspace = true, features = ["full"] } diff --git a/core/bin/selector_generator/src/app.rs b/core/bin/selector_generator/src/app.rs new file mode 100644 index 000000000000..425bf9f42826 --- /dev/null +++ b/core/bin/selector_generator/src/app.rs @@ -0,0 +1,105 @@ +use std::path::PathBuf; + +use anyhow::Context; +use glob::glob; +use tokio::io::AsyncWriteExt as _; + +use crate::selectors::Selectors; + +#[derive(Debug, Default)] +pub(crate) struct App { + /// Selectors file. + file_path: PathBuf, + /// All the selectors. Initially, will be loaded from the file. + /// All the discovered selectors will be merged into it. + selectors: Selectors, + /// Number of selectors before processing the files. + /// Used for reporting. + selectors_before: usize, + /// Number of files analyzed. + /// Used for reporting. + analyzed_files: usize, +} + +impl App { + /// Loads the selectors from the file, or returns a new instance if the file doesn't exist. + pub async fn load(file_path: impl Into) -> anyhow::Result { + let file_path = file_path.into(); + // If doesn't exist, return default. + if !file_path.exists() { + return Ok(Self::default()); + } + + let file = tokio::fs::read(&file_path) + .await + .context("Failed to read file")?; + let selectors: Selectors = + serde_json::from_slice(&file).context("Failed to deserialize file")?; + let selectors_before = selectors.len(); + Ok(Self { + file_path, + selectors, + selectors_before, + analyzed_files: 0, + }) + } + + /// Analyses all the JSON files, looking for 'abi' entries, and then computing the selectors for them. + pub async fn process_files(&mut self, directory: &str) -> anyhow::Result<()> { + for file_path in Self::load_file_paths(directory) { + let Ok(new_selectors) = Selectors::load(&file_path).await.inspect_err(|e| { + eprintln!("Error parsing file {file_path:?}: {e:?}"); + }) else { + continue; + }; + self.merge(new_selectors); + } + Ok(()) + } + + /// Saves the selectors to the file. + pub async fn save(self) -> anyhow::Result<()> { + let mut file = tokio::fs::OpenOptions::new() + .write(true) + .create(true) + .truncate(true) + .open(self.file_path) + .await + .context("Failed to open file")?; + let json = serde_json::to_string_pretty(&self.selectors)?; + file.write_all(json.as_bytes()) + .await + .context("Failed to save file")?; + Ok(()) + } + + /// Merges the new selectors into the current ones. + pub fn merge(&mut self, new: Selectors) { + self.selectors.merge(new); + self.analyzed_files += 1; + } + + /// Reports the number of analyzed files and the number of added selectors. + pub fn report(&self) { + println!( + "Analyzed {} files. Added {} selectors (before: {} after: {})", + self.analyzed_files, + self.selectors.len() - self.selectors_before, + self.selectors_before, + self.selectors.len() + ); + } + + fn load_file_paths(dir: &str) -> Vec { + glob(&format!("{}/**/*.json", dir)) + .expect("Failed to read glob pattern") + .filter_map(|entry| match entry { + Ok(path) => Some(path), + Err(e) => { + eprintln!("Error reading file: {:?}", e); + None + } + }) + .collect() + } +} diff --git a/core/bin/selector_generator/src/main.rs b/core/bin/selector_generator/src/main.rs index ad6180413f14..f5ed2e01c582 100644 --- a/core/bin/selector_generator/src/main.rs +++ b/core/bin/selector_generator/src/main.rs @@ -1,105 +1,33 @@ -use std::{ - collections::HashMap, - fs::{File, OpenOptions}, - io::{self}, -}; - +use app::App; use clap::Parser; -use glob::glob; -use serde::{Deserialize, Serialize}; -use sha3::{Digest, Keccak256}; - -#[derive(Debug, Serialize, Deserialize)] -struct ABIEntry { - #[serde(rename = "type")] - entry_type: String, - name: Option, - inputs: Option>, -} -#[derive(Debug, Serialize, Deserialize)] -struct ABIInput { - #[serde(rename = "type")] - input_type: String, -} +pub(crate) mod app; +pub(crate) mod selectors; +/// Selector generator tool. +/// +/// Generates a mapping of short (4-byte) function selectors to their corresponding function names. +/// +/// The generated JSON can be used to lookup function names by their selectors, when interacting +/// with Ethereum contracts. #[derive(Debug, Parser)] -#[command(author, version, about, long_about = None)] +#[command(author, version, about, long_about)] struct Cli { + /// Path to the directory with JSON files containing ABI. + /// All JSON files in this directory will be processed. contracts_dir: String, + /// Path to the output file. + /// The file will contain the list of function selectors. + /// If the file already exists, new selectors will be appended to it. output_file: String, } -/// Computes solidity selector for a given method and arguments. -fn compute_selector(name: &str, inputs: &[ABIInput]) -> String { - let signature = format!( - "{}({})", - name, - inputs - .iter() - .map(|i| i.input_type.clone()) - .collect::>() - .join(",") - ); - let mut hasher = Keccak256::new(); - hasher.update(signature); - format!("{:x}", hasher.finalize())[..8].to_string() -} - -/// Analyses all the JSON files, looking for 'abi' entries, and then computing the selectors for them. -fn process_files(directory: &str, output_file: &str) -> io::Result<()> { - let mut selectors: HashMap = match File::open(output_file) { - Ok(file) => serde_json::from_reader(file).unwrap_or_default(), - Err(_) => HashMap::new(), - }; - let selectors_before = selectors.len(); - let mut analyzed_files = 0; - - for entry in glob(&format!("{}/**/*.json", directory)).expect("Failed to read glob pattern") { - match entry { - Ok(path) => { - let file_path = path.clone(); - let file = File::open(path)?; - let json: Result = serde_json::from_reader(file); - - if let Ok(json) = json { - if let Some(abi) = json.get("abi").and_then(|v| v.as_array()) { - analyzed_files += 1; - for item in abi { - let entry: ABIEntry = serde_json::from_value(item.clone()).unwrap(); - if entry.entry_type == "function" { - if let (Some(name), Some(inputs)) = (entry.name, entry.inputs) { - let selector = compute_selector(&name, &inputs); - selectors.entry(selector).or_insert(name); - } - } - } - } - } else { - eprintln!("Error parsing file: {:?} - ignoring.", file_path) - } - } - Err(e) => eprintln!("Error reading file: {:?}", e), - } - } - println!( - "Analyzed {} files. Added {} selectors (before: {} after: {})", - analyzed_files, - selectors.len() - selectors_before, - selectors_before, - selectors.len() - ); - - let file = OpenOptions::new() - .write(true) - .create(true) - .truncate(true) - .open(output_file)?; - serde_json::to_writer_pretty(file, &selectors)?; - Ok(()) -} - -fn main() -> io::Result<()> { +#[tokio::main] +async fn main() -> anyhow::Result<()> { let args = Cli::parse(); - process_files(&args.contracts_dir, &args.output_file) + let mut app = App::load(args.output_file).await?; + app.process_files(&args.contracts_dir).await?; + app.report(); + app.save().await?; + Ok(()) } diff --git a/core/bin/selector_generator/src/selectors.rs b/core/bin/selector_generator/src/selectors.rs new file mode 100644 index 000000000000..3b69854a9478 --- /dev/null +++ b/core/bin/selector_generator/src/selectors.rs @@ -0,0 +1,118 @@ +use std::{collections::HashMap, path::PathBuf}; + +use anyhow::Context; +use serde::{Deserialize, Serialize}; + +/// Short (4-byte) function selector. +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)] +#[serde(transparent)] +struct Selector(String); + +/// Function name without parameters. +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +#[serde(transparent)] +struct FunctionName(String); + +/// A set of function selectors and their corresponding function names. +#[derive(Debug, Default, Serialize, Deserialize)] +pub(crate) struct Selectors { + #[serde(flatten)] + selectors: HashMap, +} + +impl Selectors { + /// Loads the selectors from the file, or returns a new instance if the file is a valid + /// JSON, but doesn't contain `ABI` section. + /// + /// Will return an error if file doesn't exist or cannot be deserialized. + pub async fn load(file_path: &PathBuf) -> anyhow::Result { + let file = tokio::fs::read(file_path) + .await + .context("Failed to read file")?; + let json: serde_json::Value = + serde_json::from_slice(&file).context("Failed to deserialize file")?; + let Some(abi) = json.get("abi").cloned() else { + return Ok(Selectors::default()); + }; + + let contract: ethabi::Contract = + serde_json::from_value(abi).context("Failed to parse abi")?; + Ok(Self::new(contract)) + } + + /// Loads selectors from a given contract. + pub fn new(contract: ethabi::Contract) -> Self { + let selectors: HashMap<_, _> = contract + .functions + .into_values() + .flatten() + .map(|function| { + let selector = hex::encode(function.short_signature()); + (Selector(selector), FunctionName(function.name)) + }) + .collect(); + Self { selectors } + } + + /// Merges new selectors into the existing set. + pub fn merge(&mut self, new: Self) { + for (selector, name) in new.selectors { + self.selectors + .entry(selector.clone()) + .and_modify(|e| { + assert_eq!( + e, &name, + "Function name mismatch for selector '{:?}'", + selector + ) + }) + .or_insert(name); + } + } + + pub fn len(&self) -> usize { + self.selectors.len() + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_selectors() { + let contract_json = r#"[ + { + "type": "function", + "name": "transfer", + "inputs": [ + { "name": "to", "type": "address" }, + { "name": "value", "type": "uint256" } + ], + "outputs": [], + "stateMutability": "nonpayable" + }, + { + "type": "function", + "name": "bar", + "inputs": [], + "outputs": [], + "stateMutability": "nonpayable" + } + ] + "#; + + let contract: ethabi::Contract = serde_json::from_str(contract_json).unwrap(); + let selectors = Selectors::new(contract); + assert_eq!(selectors.len(), 2); + + // Check the generated selectors. + assert_eq!( + selectors + .selectors + .get(&Selector("a9059cbb".to_string())) + .expect("No selector for transfer found"), + &FunctionName("transfer".to_string()) + ); + } +} diff --git a/etc/selector-generator-data/selectors.json b/etc/selector-generator-data/selectors.json index 6ea986e4263a..f8a6d2e825b2 100644 --- a/etc/selector-generator-data/selectors.json +++ b/etc/selector-generator-data/selectors.json @@ -1,518 +1,1025 @@ { - "d0f2c663": "getBatchNumberAndTimestamp", - "2a79c611": "getCommitment", - "8129fc1c": "initialize", - "e2a9d554": "setUpgradeTimestamp", - "84c2ff75": "stmAssetId", - "7ac3a553": "withdrawLegacyBridge", - "e91659ae": "addNewChainIfNeeded", - "06d49e5b": "getPubdataPricingMode", - "1ff5a783": "execute", - "8310f2c6": "transferFundsFromSharedBridge", - "80b41246": "getBlockHashEVM", - "7da01cd6": "executeUpgrade", - "74044673": "addStateTransitionManager", - "82b57749": "forwardedBridgeMint", - "6478d8ed": "chainAdmin", - "4af63f02": "deploy", - "d0707b67": "aggregate", - "e0ab6368": "assetIdIsRegistered", - "27e86d6e": "getLastBlockHash", - "13bc9f20": "isOperationReady", - "4a2e35ba": "withdraw", - "1e4fba05": "getChainRoot", - "762008c2": "executeBatchesSharedBridge", - "155fd27a": "setValueUnderNonce", - "a6ae0aac": "coinbase", - "86d516e8": "getCurrentBlockGasLimit", - "3659cfe6": "upgradeTo", - "fa8f7ea6": "getAllHyperchains", - "7b510fe8": "getAccountInfo", - "40c10f19": "mint", - "e02e1bfd": "chainCount", - "015f58d7": "genesisUpgrade", - "28e439f3": "tryBlockAndAggregate", - "e76db865": "setPubdataPricingMode", - "62f84b24": "sendToL1", - "1c9f0149": "updateChainBalancesFromSharedBridge", - "38720778": "sharedBridge", - "64e130cf": "nativeTokenVault", - "adfca15e": "facetFunctionSelectors", - "af500fb7": "readBytes32", - "7b315630": "upgradeChainFromVersion", - "b6ea1757": "pushNewLeaf", - "e66c8c44": "validatorTimelock", + "e341eaa4": "sign", + "600903ad": "keyExistsToml", + "174dea71": "aggregate3Value", + "21ed2977": "assertApproxEqRelDecimal", + "90c5013b": "stopPrank", + "e4948f43": "proveL2MessageInclusion", "4f1ef286": "upgradeToAndCall", - "fe26699e": "getTotalBlocksCommitted", - "805b9869": "executeTransactionFromOutside", - "aa4593dc": "revertReceive", - "64b554ad": "forwardedBridgeBurn", - "ba238947": "getProtocolVersion", - "07f8c636": "multicall", - "39607382": "getTotalBlocksExecuted", - "796b89b9": "getBlockTimestamp", - "9cd939e4": "l2LogsRootHash", - "b298e36b": "push", + "e985e9c5": "isApprovedForAll", + "6ba3ba2b": "createFork", + "23b872dd": "transferFrom", + "740211ce": "commitValidatorCommittee", + "b292f5f1": "proveL1ToL2TransactionStatus", + "dbfe3e96": "updateSecurityCouncil", + "b12fc005": "assertLt", + "868085b1": "getBatchProofPublicInput", + "64d62353": "updateDelay", + "48c3241f": "closeFile", + "60f9bb11": "readFile", + "c88a5e6d": "deal", + "6f497ac6": "executeBatchesSharedBridge", + "966c523e": "blockAndAggregate", + "c438a9f2": "L2_LEGACY_SHARED_BRIDGE", + "59890bcb": "setExecutedBatches", + "e30c3978": "pendingOwner", + "31ba3498": "createFork", + "669efca7": "assertNotEqDecimal", + "c75ac8fa": "processL2Logs", + "ddc2651b": "envBytes", + "4de2e468": "getRawCodeHash", + "129de7eb": "blobhashes", + "b3e47705": "envOr", + "e543e5bf": "setChainCreationParams", + "8466f415": "assertLe", + "cc3fbc63": "setEraPostDiamondUpgradeFirstBatch", + "be6f11cf": "setPriorityTxMaxGasLimit", + "8da5cb5b": "owner", + "9f5684a2": "readLink", + "994057ef": "changeAttesterKey", + "711043ac": "assertEq", + "5875da2b": "changeValidatorKey", + "5139839c": "nodeOwners", + "7b048ccd": "parseJsonInt", + "a37dc1d4": "forwardedBridgeClaimFailedBurn", + "adfca15e": "facetFunctionSelectors", + "3e64a696": "getBasefee", + "99624cb6": "getAttesterCommittee", + "1497876c": "readDir", + "d48bfca7": "addToken", + "17d7de7c": "getName", + "eeb8cb09": "executeTransactionFromOutside", + "0c56efe9": "initializeV2", + "01eae183": "depositAmount", + "561cd6f3": "serializeString", + "b7909320": "assertNotEq", + "41c841c3": "L1_WETH_TOKEN", + "7475e9ea": "chainAdminAcceptAdmin", + "ca669fa7": "prank", + "c1899c1d": "createBatchCommitment", + "38a78092": "increaseMinNonce", + "a9059cbb": "transfer", + "f54266a2": "l1TokenAddress", + "e0eb04d4": "isFile", + "e9f18c17": "forceDeployOnAddresses", + "3d1fe08a": "assertGeDecimal", + "a2b1a1ae": "expectCall", + "5d83b6da": "__DEPRECATED_baseToken", "7890e5da": "side", - "5e1ac65f": "hashOperation", - "1806aa18": "getCodeSize", - "d4a4ca0d": "getBlockNumberAndTimestamp", - "06bed036": "setL2Block", - "aa970773": "validateAndPayForPaymasterTransaction", - "6223258e": "setDAValidatorPair", + "40f0b4e0": "assertLtDecimal", + "71aad10d": "toString", + "29f172ad": "unsafeOverrideBatch", + "8289e621": "assertApproxEqAbs", + "eccd2437": "assertGtDecimal", + "cdf25430": "L1_ASSET_ROUTER", + "a22cb465": "setApprovalForAll", + "9983c28a": "parseJsonIntArray", + "f67a965b": "broadcast", + "aa5cf788": "assertLeDecimal", + "8775a591": "assertNotEq", + "57f3921f": "stmAssetIdToAddress", + "2f2fd63f": "getMappingLength", + "2c431917": "scheduleTransparent", + "7b574586": "publishedBlobCommitments", + "31d50750": "isOperation", + "21f603d7": "setTransactionFilterer", + "8129fc1c": "initialize", "728cb93b": "bridgeClaimFailedBurn", - "d6abe642": "getAssetId", - "d2ef1b0e": "storedBatchZero", - "51b3c157": "hyperbridgingEnabled", - "53e61bdc": "processL2RollupDAValidatorOutputHash", - "95d89b41": "symbol", - "a37dc1d4": "forwardedBridgeClaimFailedBurn", - "db1f0bf9": "getTotalBatchesCommitted", - "beda594a": "setHyperchain", + "19698bc9": "infiniteFunction", + "3ce969e6": "revokePersistent", + "a322c40e": "toString", + "c29f093f": "setSTM", + "fd921be8": "parseJsonBytes", + "0d4aae9b": "stopMappingRecording", + "e03e9177": "assertEq", + "0a30b771": "assertGe", + "5e1ac65f": "hashOperation", + "9a188371": "requestL2TransactionDirect", + "62c6f9fb": "assertNotEq", + "b8c2f66f": "getTotalBatchesExecuted", + "42181150": "envInt", + "d17d4b0d": "assertLe", + "6ab8f82e": "proveL2LogInclusion", + "580d6bff": "updateAllNodesAtHeight", + "c3bbd2d7": "isFacetFreezable", + "8a75bb09": "saveL2LogsRootHash", + "71dce7da": "toString", + "1091a261": "assertNotEq", + "6ee1dc20": "validateNonceUsage", + "65bc9481": "accesses", + "f088ccdc": "callCodeOracle", + "0956441b": "stopExpectSafeMemory", + "5aa6fa1f": "NATIVE_TOKEN_VAULT", + "8310f2c6": "transferFundsFromSharedBridge", + "484f0505": "getHyperchainLegacy", "3977d71c": "getAggregatedRoot", - "c4d252f5": "cancel", - "2878fe74": "genesisUpgrade", - "2ab0f529": "isOperationDone", - "5d4edca7": "BRIDGE_HUB", - "d4b9f4fa": "messageRoot", - "fb1a9a57": "getDeploymentNonce", - "bb0fd610": "extendedAccountVersion", - "3cda3351": "create2", - "3a9d7f8d": "stmDeployer", - "db541184": "setShouldRevertOnExecuteBatches", - "74f4f547": "bridgeBurn", - "b852ad36": "l1SharedBridge", - "6ef25c3a": "baseFee", - "eb39e6d5": "stateTransitionManager", - "381c3f13": "checkDA", + "0008efda": "runDefaultUpgrader", + "328ef4fe": "setBaseTokenGasMultiplierPrice", + "ad31b9fa": "envAddress", + "65c428e7": "parseTomlAddressArray", + "bf529569": "setFreezability", + "ef3f0bae": "getTotalBatchesVerified", + "ce8365f9": "envExists", + "30bda03e": "setL1Erc20Bridge", + "cdc4878b": "nodeCount", + "015f58d7": "genesisUpgrade", + "a84328dd": "assertGe", + "1cc5d103": "setPorterAvailability", + "588570a5": "initialize", + "ae00b630": "runDeployConsensusRegistry", + "a31ee5b0": "initialize", + "9a7fbd8f": "assertNotEq", + "e23d2563": "getEraChainId", + "24fd57fb": "requestL2TransactionTwoBridges", + "0cc9ee84": "assertEq", + "f56ff18b": "getBlobhashes", "f92ad219": "initialize", - "9fa8826b": "depositHappened", - "01d23d4b": "diamondCut", - "55d35d18": "getValueUnderNonce", - "ee7fb38b": "calculateRoot", - "64d62353": "updateDelay", - "fd3c6b55": "processCalldataDA", - "39b34c6e": "requestBytecodeL1Publication", + "f2830f7b": "rollFork", + "2f90b184": "L1_CHAIN_ID", + "5de097b1": "nullifyChainBalanceByNTV", + "14e75680": "assertNotEqDecimal", + "204e1c7a": "getProxyImplementation", + "240f839d": "assertApproxEqAbs", "71623274": "l2TransactionBaseCost", - "53b9e632": "assetHandlerAddress", - "c987336c": "upgrade", - "5c975abb": "paused", - "4623c91d": "setValidator", - "4f1e1be0": "storeAccountConstructingCodeHash", - "b0f40a17": "processBatch", - "2c4f2a58": "bridgehubDepositBaseToken", - "ced531eb": "setHashes", - "18160ddd": "totalSupply", - "7cb9357e": "gasPerPubdataByte", - "7877a797": "blockGasLimit", - "cdc4878b": "nodeCount", - "c2eeeebd": "l1Address", - "0f23da43": "revertBatchesSharedBridge", - "e1239cd8": "incrementMinNonceIfEquals", - "8456cb59": "pause", - "9a42c2c2": "zeroPointerTest", - "f9f3ee2d": "setResult", - "7ba8be34": "decodeUint8", + "d9a3c4d2": "assertGt", + "e23cd19f": "writeJson", + "a0803ef7": "currentBlockInfo", + "aa4593dc": "revertReceive", + "d0468156": "getPendingAdmin", + "3e9173c5": "assertEq", + "a6368557": "deleteSnapshot", + "f413f0b6": "assertEq", + "e03fe177": "getCodeHash", + "812a44b2": "parseTomlKeys", + "80b41246": "getBlockHashEVM", + "1f7b4f30": "roll", + "c87b56dd": "tokenURI", + "dc8e4b26": "registerSettlementLayer", + "949431dc": "approvalBased", + "35e1349b": "eth_getLogs", + "5d4edca7": "BRIDGE_HUB", + "607457f2": "setShouldRevertOnCommitBatches", + "18717dc1": "setPorterAvailability", + "ede25608": "protocolVersionToUpgradeTimestamp", + "9ebf6827": "selectFork", + "84d9fedd": "popFront", + "06d49e5b": "getPubdataPricingMode", + "b3a056d7": "loadAllocs", + "49a7cc72": "payForTransaction", + "b381724e": "setFeeParams", + "d30dced6": "parseTomlBool", + "72c7e0b5": "assertNotEq", + "6223258e": "setDAValidatorPair", + "f90eb963": "getPorterAvailability", + "3f58f5b5": "createNewChain", + "c126e860": "hashOperation", + "491cc7c2": "expectEmit", + "7eff275e": "changeProxyAdmin", + "252dba42": "aggregate", + "01ffc9a7": "supportsInterface", + "a5748aad": "getNonce", + "69c76df2": "readUint32", + "d0bf6fd4": "setSharedBridge", + "dbe8d88b": "assertLtDecimal", + "3d1f16d4": "commitAttesterCommittee", + "48016c04": "assertEqDecimal", + "8dd14802": "setBridge", + "923b3b56": "forceDeployOnAddress", + "def9d6af": "protocolVersionIsActive", + "714a2f13": "assertEq", + "cf1c049c": "assertEq", + "087e6e81": "parseBytes32", + "6d016688": "expectSafeMemory", + "aa970773": "validateAndPayForPaymasterTransaction", "a635f01d": "delegateCall", - "2f90b184": "L1_CHAIN_ID", - "6c0960f9": "finalizeEthWithdrawal", - "31d50750": "isOperation", - "59ec65a2": "baseToken", - "a9b0d128": "setPriorityTreeStartIndex", - "c4879440": "bridgehubDepositBaseToken", - "823f1d96": "l2TokenProxyBytecodeHash", - "18876a04": "chunkPubdataToBlobs", - "699b0fb9": "bridgeBurn", - "17338945": "unfreezeDiamond", - "8a75bb09": "saveL2LogsRootHash", - "91b19874": "validators", - "63dc94b1": "forceDeploy", - "5a590335": "getDAValidatorPair", - "60144197": "setTokenMultiplierSetter", - "938b5f32": "origin", - "36ba0355": "bridgeMint", - "6dde7209": "l2TokenBeacon", - "bf54096e": "MAX_NUMBER_OF_HYPERCHAINS", - "7e44bc5e": "setImmutables", - "8e8acf87": "getL2BlockNumberAndTimestamp", - "e30c3978": "pendingOwner", + "2f103f22": "activeFork", + "8102d70d": "readDir", + "3cf78e28": "assertNotEq", + "97949042": "envBytes32", "f5e69a47": "publishCompressedBytecode", - "84da1fb4": "getNewAddressCreate2", - "47fcedb8": "setFeeParams", - "b22dd78e": "storedBatchHash", + "39509351": "increaseAllowance", + "97bb3ce9": "tokenAddress", + "f9f3ee2d": "setResult", + "ae65def1": "node", + "d124dc4f": "send", + "64b554ad": "forwardedBridgeBurn", + "f0259e92": "breakpoint", + "ebc73ab4": "getMappingSlotAt", + "0e18b681": "acceptAdmin", + "f3385fb6": "forceDeployOnAddress", + "667f9d70": "load", + "8f5d232d": "parseBytes", + "515361f6": "assertEq", + "6d315d7e": "blobBaseFee", + "4724c5b9": "assertNotEq", + "3425eb89": "tokenMultiplierSetter", + "2d0335ab": "getNonce", + "07ee9355": "l2BridgeAddress", + "189a5a17": "nodes", + "9a8a0592": "chainId", + "1d9e269e": "makePersistent", + "3d5bc8bc": "assertApproxEqAbsDecimal", + "701f58c5": "commitBatches", + "7877a797": "blockGasLimit", + "e6962cdb": "broadcast", + "5aa9b6b5": "getRawNonce", + "876e24e6": "getMappingKeyAndParentOf", + "b67187f3": "assertNotEq", + "933999fb": "deposit", + "4074e0a8": "makePersistent", + "0dbad27e": "upgradeChainFromVersion", + "3601e63e": "bridgeRecoverFailedTransfer", + "f710b062": "assertApproxEqAbs", + "e516761e": "markFactoryDeps", "57e6246b": "initialCutHash", - "2b805192": "setNewVersionUpgrade", - "dbfe3e96": "updateSecurityCouncil", - "e03fe177": "getCodeHash", - "02fa5779": "setNewBatch", - "a225efcb": "setPubdataInfo", - "9cc395d0": "bridgeCheckCounterpartAddress", - "868085b1": "getBatchProofPublicInput", - "6a0cd1f5": "removeValidator", - "2ae9c600": "protocolVersion", - "61f91b2e": "initialForceDeploymentHash", - "72425d9d": "getCurrentBlockDifficulty", - "8c2a993e": "bridgeMint", - "b473318e": "l2TransactionBaseCost", - "f851a440": "admin", - "681fe70c": "isEmpty", - "ef3f0bae": "getTotalBatchesVerified", + "c0406226": "run", + "7b30c8da": "getL2SystemContractsUpgradeTxHash", + "74637a7a": "computeCreateAddress", + "ba334825": "hyperchain", + "ca408c23": "bridgehubDeposit", + "6352211e": "ownerOf", + "9f86dc91": "parseJsonBool", "ba75bbd8": "front", - "cdffacc6": "facetAddress", - "89f9a072": "validatePubdata", - "66869d49": "changeFeeParams", - "e8b99b1b": "deposit", - "4d4a1eca": "setTokenMultiplier", - "a0803ef7": "currentBlockInfo", - "fb4baf17": "changeFeeParams", - "3591c1a0": "getBridgehub", - "fd791f3c": "getL2DefaultAccountBytecodeHash", - "ec8067c7": "updateNonceOrdering", - "a3912ec8": "receiveEther", - "79823c9a": "getFirstUnprocessedPriorityTx", - "235d9eb5": "setTokenMultiplier", - "dd354a06": "calculateCreate2TokenAddress", - "7efda2ae": "proveL2LeafInclusion", - "f120e6c4": "encodeTxDataHash", - "f5f15168": "l2TokenAddress", - "4d2301cc": "getEthBalance", - "ab07b2e9": "getL2GasPrice", - "363bf964": "setAddresses", - "607457f2": "setShouldRevertOnCommitBatches", - "d1ba7e97": "hyperchainAddress", - "841a9d42": "aggregate3Value", + "b473318e": "l2TransactionBaseCost", + "c304aab7": "assertLeDecimal", + "64bf8d66": "changeFeeParams", + "f4844814": "expectRevert", + "2878fe74": "genesisUpgrade", + "89160467": "ffi", + "27af7d9c": "assertEqDecimal", + "ebe4a3d7": "getTransactionHashes", + "7fec2a8d": "startBroadcast", + "d505accf": "permit", + "09824a80": "registerToken", + "c21a38e2": "proveL2MessageInclusion", + "c31eb0e0": "expectRevert", + "561fe540": "envOr", + "f8d33b9b": "assertGt", "ea6c029c": "baseTokenGasPriceMultiplierNominator", - "de8fa431": "getSize", - "24a55db9": "markBytecodeAsPublished", - "c438a9f2": "L2_LEGACY_SHARED_BRIDGE", - "ddeaa8e6": "getBatchHash", - "8f31f052": "isWithdrawalFinalized", - "41cf49bb": "prepareChainCommitment", - "5d382700": "create2Account", - "6d9860e1": "l1AssetRouter", + "ef277d72": "assertApproxEqRel", + "c846f6df": "transferFundsFromLegacy", + "7ca29682": "createFork", + "5ca1e165": "getRoot", + "37736e08": "parseToml", + "29b98c67": "isDiamondStorageFrozen", + "f0e9da23": "readAddress", + "1206c8a8": "rpc", + "f8ccbf47": "IS_SCRIPT", + "dd62ed3e": "allowance", + "56ca623e": "toString", + "f6370c7b": "setChainCreationParams", + "06447d56": "startPrank", + "05838bf4": "expectSafeMemoryCall", "e1ad1162": "transfer", - "bf1fe420": "setGasPrice", - "a1954fc5": "getTotalPriorityTxs", - "c0a16dda": "setAssetDeploymentTracker", - "4145ca27": "removePriorityQueueFront", - "09e14277": "setStateTransitionManager", - "1f067457": "revertTransfer", - "b8c2f66f": "getTotalBatchesExecuted", - "07ee9355": "l2BridgeAddress", - "095ea7b3": "approve", - "84b0196e": "eip712Domain", - "18b1771f": "getAssetId", - "f85894c5": "forwardedBridgeBurn", + "c7ca373c": "initFromCommitment", + "5a362d45": "assertGt", + "14b02bc9": "envString", + "f7d39a8d": "breakpoint", + "b2dad155": "trim", + "4ad0bac9": "readCallers", + "53ce2061": "revertBatches", "bd7c5412": "isEthWithdrawalFinalized", - "70a08231": "balanceOf", - "3425eb89": "tokenMultiplierSetter", - "5aa9b6b5": "getRawNonce", - "7ab08472": "finalizeWithdrawalLegacyErc20Bridge", - "205c2878": "withdrawTo", - "ec3d5f88": "setPriorityTxMaxGasLimit", - "8eb7db57": "bridgehubConfirmL2Transaction", - "2a72b707": "bridgehubRequestL2Transaction", - "0f3fa211": "setNativeTokenVault", - "4bed8212": "isWithdrawalFinalized", - "0c56efe9": "initializeV2", - "501e60d5": "setUpgradeDiamondCut", - "c29f093f": "setSTM", - "f2fde38b": "transferOwnership", - "8c5a3445": "general", - "ca8f93f1": "setLegacyBaseTokenAssetId", - "71abd109": "upgrade", - "eced0bf0": "__DEPRECATED_tokenIsRegistered", - "dc8e4b26": "registerSettlementLayer", - "310ab089": "getImmutable", - "19cae462": "difficulty", - "77421056": "setFunctionToCall", - "3997d064": "tryAggregate", - "f1d357e5": "L1_SHARED_BRIDGE", - "952a3ee7": "getERC20Getters", - "29b98c67": "isDiamondStorageFrozen", - "17d7de7c": "getName", - "e81e0ba1": "isFunctionFreezable", - "7ebba672": "setTokenMultiplier", - "6ee1dc20": "validateNonceUsage", - "6a27e8b5": "getSettlementLayer", - "7a28adb2": "proveL2LogInclusion", - "671a7131": "settlementLayer", - "accdd16c": "freezeChain", - "c3bbd2d7": "isFacetFreezable", - "99a88ec4": "upgrade", + "a6ae0aac": "coinbase", + "ab07b2e9": "getL2GasPrice", + "e02e1bfd": "chainCount", + "d92d8efd": "isPersistent", + "79823c9a": "getFirstUnprocessedPriorityTx", + "78bdcea7": "assertNotEq", + "699b0fb9": "bridgeBurn", + "18e3a941": "getVerifierParams", + "ee7fb38b": "calculateRoot", + "f851a440": "admin", "95f11a40": "bridgeInitialize", - "c9f5c932": "requestL2TransactionTwoBridges", - "f1a78aa6": "postTransaction", - "ca65fe79": "finalizeDeposit", - "5518c73b": "getStateTransitionManager", - "b5b18fe5": "processL2Logs", - "969b53da": "l1Bridge", - "e8a71ca9": "forwardedBridgeMint", - "505e6d47": "updateAllLeaves", - "ecf95b8a": "createAccount", - "84d9fedd": "popFront", "3f4ba83a": "unpause", - "1f98fa08": "createNewChain", - "313ce567": "decimals", - "3ce695e7": "registerSTMAssetOnL1", - "73c58a2d": "publishBlobs", - "f0e9da23": "readAddress", - "e23d2563": "getEraChainId", - "0ec6b0b7": "getPriorityTxMaxGasLimit", - "fdbb0301": "__DEPRECATED_l2BridgeAddress", - "52d1902d": "proxiableUUID", - "97bb3ce9": "tokenAddress", - "5d83b6da": "__DEPRECATED_baseToken", - "966c523e": "blockAndAggregate", - "f4943a20": "protocolVersionDeadline", - "46746c7d": "commitBatchesSharedBridge", - "87d9d023": "verify", - "57f3921f": "stmAssetIdToAddress", - "e516761e": "markFactoryDeps", - "daa51a8c": "pushBack", - "2e1a7d4d": "withdraw", - "af6ed122": "executeUpgrade", - "a6f2c076": "setDataToBeReturnedInFinalizeWithdrawal", - "01eae183": "depositAmount", - "9e8945d2": "verificationKeyHash", - "a3bd0112": "genesisUpgrade", - "927c4bf7": "upgradeExternal", - "56079ac8": "sendL2ToL1Log", - "d92f86a2": "setLegacyChainAddress", - "be6f11cf": "setPriorityTxMaxGasLimit", - "7321c485": "dummySetValidator", - "c0991525": "claimFailedDeposit", - "72d74cd7": "reinitializeToken", - "ab93d6f3": "requestL2TransactionToGatewayMailbox", - "3601e63e": "bridgeRecoverFailedTransfer", - "eb672419": "requestL2Transaction", - "af6a2dcd": "getTotalBlocksVerified", - "27eb6c0f": "securityCouncil", - "4c6314f0": "getMarker", - "49a7cc72": "payForTransaction", - "f20265d2": "setRevertTransfer", + "33ce93fe": "getProtocolVersion", + "d9caed12": "withdraw", + "c37533bb": "proveBatchesSharedBridge", + "07168226": "deployBeaconProxy", + "9cd939e4": "l2LogsRootHash", + "af500fb7": "readBytes32", + "db4235f6": "keyExistsJson", + "8c5a3445": "general", + "607e2cb2": "setRevertReceive", + "27ae4c16": "freezeDiamond", + "e0bf0850": "setShouldRevertOnProveBatches", + "8cb7f3d0": "forceDeployOnAddresses", + "47fcedb8": "setFeeParams", + "5d18c73a": "assertEq", + "efb77a75": "makePersistent", + "421ae469": "deleteSnapshots", + "39607382": "getTotalBlocksExecuted", + "d145736c": "envOr", + "76eadd36": "stopBroadcast", + "2f9c8f0d": "add", "84bc3eb0": "withdrawWithMessage", - "79c4f929": "markBytecodeAsPublished", - "580d6bff": "updateAllNodesAtHeight", + "a28c1aee": "prepareForPaymaster", + "150b7a02": "onERC721Received", + "45c62011": "removeDir", + "8cf2b2f0": "uncheckedInc", + "74da756b": "execute", + "1c50cfea": "addTokenAssetId", + "c2e4ff97": "markAccountCodeHashAsConstructed", + "e91659ae": "addNewChainIfNeeded", + "59ec65a2": "baseToken", + "350d56bf": "envAddress", + "4d8abc4b": "transact", + "2077337e": "assertLtDecimal", + "938b5f32": "origin", + "f2fde38b": "transferOwnership", + "0d4651aa": "storeAccountConstructedCodeHash", + "b993549e": "getCommittedBatchTimestamp", + "7c84c69b": "assertEq", + "70f5c679": "setMessageRoot", + "e1239cd8": "incrementMinNonceIfEquals", + "04a5c7ab": "assertGtDecimal", + "f20265d2": "setRevertTransfer", + "95d89b41": "symbol", + "11a2ccc1": "finalizeWithdrawal", + "65b7b7cc": "expectCall", + "1f21fc80": "writeFileBinary", + "a457c2d7": "decreaseAllowance", + "7fb5297f": "startBroadcast", + "9ff531e3": "assertLt", + "127cfe9a": "parseTomlBoolArray", + "2b805192": "setNewVersionUpgrade", + "d241f618": "genesisUpgrade", + "a3bd0112": "genesisUpgrade", + "39b34c6e": "requestBytecodeL1Publication", + "3f33db60": "serializeInt", + "d3977322": "assertNotEq", + "bdfacbe8": "assertNotEq", + "8bb75533": "split", + "7ed1ec7d": "envBool", + "d2ef1b0e": "storedBatchZero", + "edecd035": "assertNotEq", + "7ebba672": "setTokenMultiplier", + "9d2ad72a": "rpcUrlStructs", + "a6f2c076": "setDataToBeReturnedInFinalizeWithdrawal", + "f8f7cd76": "validateTransaction", + "4777f3cf": "envOr", + "505e6d47": "updateAllLeaves", + "8eb7db57": "bridgehubConfirmL2Transaction", + "2f2769d1": "assertEq", + "548a5a33": "setAssetHandlerAddressThisChain", + "8c2a993e": "bridgeMint", + "3e9705c0": "startMappingRecording", + "d1a5b36f": "pauseGasMetering", + "84d52b7a": "createSelectFork", + "6dde7209": "l2TokenBeacon", + "dead6f7f": "getHyperchain", + "bbcb713e": "envOr", + "c0a16dda": "setAssetDeploymentTracker", + "ca65fe79": "finalizeDeposit", + "579952fc": "transferFromTo", + "805b5b74": "tokenIsRegistered", + "709ecd3f": "dumpState", + "c657c718": "label", + "fb4baf17": "changeFeeParams", + "e8295588": "zeros", + "85940ef1": "parseJson", + "213e4198": "parseJsonKeys", + "3f8be2c8": "toBase64", + "16d207c6": "assertApproxEqAbs", + "f26f3c8f": "proveL2MessageInclusion", + "b88d4fde": "safeTransferFrom", + "1e4fba05": "getChainRoot", + "facd743b": "isValidator", + "2c4f2a58": "bridgehubDepositBaseToken", + "82d6c8fd": "assertApproxEqRelDecimal", + "85df51fd": "blockHash", + "897e0a97": "writeFile", + "263b7f8e": "proveL2LogInclusion", + "ab93d6f3": "requestL2TransactionToGatewayMailbox", + "f5ba4232": "removeStateTransitionManager", + "f85894c5": "forwardedBridgeBurn", + "584b153e": "isOperationPending", + "95570d12": "getValidatorCommittee", + "70ca10bb": "store", + "aaaddeaf": "envBool", + "d0707b67": "aggregate", + "ec3d5f88": "setPriorityTxMaxGasLimit", + "8456cb59": "pause", + "6cd8c355": "reinitializeChainGovernance", + "48f50c0f": "txGasPrice", + "d930a0e6": "projectRoot", + "628b636e": "publishPubdataAndClearState", + "b5a85e9d": "forceDeploy", + "6a82600a": "parseJson", + "a85a8418": "rpcUrls", "e5355c75": "getL2SystemContractsUpgradeBatchNumber", - "ca408c23": "bridgehubDeposit", - "6ab8f82e": "proveL2LogInclusion", - "7528c2c6": "applyL1ToL2Alias", - "59890bcb": "setExecutedBatches", + "9cb1c0d4": "prevrandao", + "6a8237b3": "assertNotEq", + "26782247": "pendingAdmin", + "66869d49": "changeFeeParams", + "762008c2": "executeBatchesSharedBridge", + "426cb766": "attestersCommit", + "88da6d35": "serializeString", + "1e356e1a": "serializeAddress", + "8a0807b7": "indexOf", + "566338a9": "getL1TokenAddress", + "2555d2c1": "chunkAndPublishPubdata", + "e2f318e3": "payForTransaction", + "72d74cd7": "reinitializeToken", + "3997d064": "tryAggregate", + "57180981": "updateAccountVersion", + "f28dceb3": "expectRevert", + "db1f0bf9": "getTotalBatchesCommitted", + "51b3c157": "hyperbridgingEnabled", + "4be99e1d": "getCurrentPubdataCost", + "e34a329a": "executeUpgrade", + "546b6d2a": "SHARED_BRIDGE", + "7ab08472": "finalizeWithdrawalLegacyErc20Bridge", + "6a27e8b5": "getSettlementLayer", + "17338945": "unfreezeDiamond", + "df9c1589": "executeTransaction", + "99c16d1a": "proveL2MessageInclusion", + "98acd7a6": "getBaseToken", + "f4c004e3": "assertNotEq", + "a54a87d8": "copyFile", + "c1fa1ed0": "assertEq", + "47b4a7a6": "changeAttesterWeight", + "be646da1": "transact", + "d1132332": "attesterPubKeyHashes", + "30e5ccbd": "incrementTxNumberInBatch", + "7958004c": "getOperationState", + "045c55ce": "assertApproxEqAbsDecimal", + "56079ac8": "sendL2ToL1Log", + "d86970d8": "getL2BootloaderBytecodeHash", + "e8a71ca9": "forwardedBridgeMint", + "9a42c2c2": "zeroPointerTest", + "06fdde03": "name", + "086a56f8": "getBaseTokenBridge", + "18b1771f": "getAssetId", + "306395c6": "incrementDeploymentNonce", + "63dc94b1": "forceDeploy", + "1c9f0149": "updateChainBalancesFromSharedBridge", + "3591c1a0": "getBridgehub", + "d6abe642": "getAssetId", + "95fd154e": "assertLe", + "72425d9d": "getCurrentBlockDifficulty", "b19f0ade": "executeUpgradeNoOverlap", - "15f9a2fe": "prepareForPaymaster", - "6e9d7899": "legacyBridge", - "ef0e2ff4": "setChainId", - "e52db4ca": "baseTokenAssetId", - "0f28c97d": "getCurrentBlockTimestamp", + "44d7f0a4": "revertTo", + "51cff8d9": "withdraw", + "817b17f0": "postTransaction", + "33949f0b": "assertNotEqDecimal", + "83eddd19": "governanceAcceptOwner", + "301e7765": "getChainAdmin", + "a8b0574e": "getCurrentBlockCoinbase", + "52c9eacb": "upgradeCutHash", + "9c4d535b": "create", + "e8de12df": "validatorsCommit", + "b11a19e8": "toString", + "6d9860e1": "l1AssetRouter", + "9ec3f927": "changeValidatorWeight", + "9507540e": "assertNotEq", + "c0865ba7": "writeToml", + "e81e0ba1": "isFunctionFreezable", + "49c4fac8": "parseJsonString", + "0b72f4ef": "assertNotEq", + "235d9eb5": "setTokenMultiplier", + "c3d93e7c": "executeBatches", + "4af63f02": "deploy", + "a0ed82fa": "governanceAcceptAdmin", + "60144197": "setTokenMultiplierSetter", + "7ba8be34": "decodeUint8", + "f5a55558": "assertNotEqDecimal", + "6631aa99": "parseJsonBytesArray", + "b2332f51": "assertNotEq", + "08e4e116": "expectCallMinGas", "d0e30db0": "deposit", - "9623609d": "upgradeAndCall", - "5ca1e165": "getRoot", - "fe173b97": "gasPrice", - "a851ae78": "setTxOrigin", - "18717dc1": "setPorterAvailability", - "cbcf2e3c": "isTransactionAllowed", + "fd791f3c": "getL2DefaultAccountBytecodeHash", + "b197c247": "parseTomlBytesArray", + "3e716f81": "parseTomlBytes32Array", + "74f4d30d": "storedBlockHash", + "2a72b707": "bridgehubRequestL2Transaction", + "7e44bc5e": "setImmutables", + "927c4bf7": "upgradeExternal", + "440ed10d": "expectEmit", + "b7b080ab": "transferTokenToSharedBridge", + "db541184": "setShouldRevertOnExecuteBatches", + "eff6b27d": "assertEq", + "fdbb0301": "__DEPRECATED_l2BridgeAddress", + "b873634c": "assertNotEq", + "7a675bb6": "createWallet", + "399542e9": "tryBlockAndAggregate", + "81bad6f3": "expectEmit", + "ae1f6aaf": "l2Bridge", + "46cc92d9": "difficulty", + "498fdcf4": "parseJsonStringArray", + "d3522ae6": "parseTomlIntArray", + "ae5a2ae8": "serializeUintToHex", + "23dc4a09": "keccakPerformUpgrade", + "07f8c636": "multicall", + "1dd93b33": "keccakValidationTest", + "fd3c6b55": "processCalldataDA", + "e6d9923b": "proveL2LogInclusion", + "c3077fa9": "blockAndAggregate", + "c6ce059d": "parseAddress", + "cf347e17": "setValidator", + "b22dd78e": "storedBatchHash", + "f21d52c7": "serializeBytes", + "975d5a12": "assertEq", + "7ac3a553": "withdrawLegacyBridge", "c4d66de8": "initialize", - "7c9bd1f3": "publishTimestampDataToL1", - "69c76df2": "readUint32", - "a75b496d": "getAllHyperchainChainIDs", - "f5ba4232": "removeStateTransitionManager", - "42cbb15c": "getBlockNumber", - "607e2cb2": "setRevertReceive", - "328ef4fe": "setBaseTokenGasMultiplierPrice", - "1c50cfea": "addTokenAssetId", - "6d1d8363": "scheduleShadow", - "9cc7f708": "balanceOf", - "933999fb": "deposit", - "c2e047ff": "aggregate3", - "bb7044b6": "stateTransitionManagerIsRegistered", - "d4ce08c2": "addNewChain", - "f34d1868": "setExecutionDelay", - "9caf9bac": "setX", + "d77bfdb9": "parseTomlBytes", + "7b315630": "upgradeChainFromVersion", + "168b64d3": "createDir", + "cdffacc6": "facetAddress", + "e0ab6368": "assetIdIsRegistered", + "e25242c0": "assertGe", + "d566afd3": "createBatchCommitment", + "afc98040": "broadcast", + "cc7b0487": "parseTomlUint", + "3fdf4e15": "clearMockedCalls", + "ee82ac5e": "getBlockHash", + "79c4f929": "markBytecodeAsPublished", + "e5d6bf02": "warp", + "03e0aca9": "revertToAndDelete", + "d83e4e03": "genesisUpgrade", + "4a2e35ba": "withdraw", + "ba238947": "getProtocolVersion", + "88b44c85": "assertEq", + "fee9a469": "serializeUint", + "09e14277": "setStateTransitionManager", + "c63c4e9b": "minDelay", + "95218ecd": "executeInstant", + "abbf21cc": "assertApproxEqRelDecimal", + "addde2b6": "parseJsonUint", + "a1a7cddb": "runDeploySharedBridge", + "3644e515": "DOMAIN_SEPARATOR", + "6a5066d4": "assertApproxEqAbsDecimal", + "6edd4f12": "commitBatchesSharedBridge", + "78611f0e": "assertGtDecimal", "f113c88b": "createNewChain", - "1cc5d103": "setPorterAvailability", - "cdf25430": "L1_ASSET_ROUTER", - "def9d6af": "protocolVersionIsActive", - "c21a38e2": "proveL2MessageInclusion", - "e543e5bf": "setChainCreationParams", - "4be99e1d": "getCurrentPubdataCost", - "74f4d30d": "storedBlockHash", - "f8f7cd76": "validateTransaction", - "7a0ed627": "facets", - "38a78092": "increaseMinNonce", - "8cb7f3d0": "forceDeployOnAddresses", - "a2d5a0cc": "proveBatchesSharedBridge", - "301e7765": "getChainAdmin", - "fb644fc5": "addChainBatchRoot", + "f3dec099": "envUint", + "4700d74b": "envOr", + "ed7c5462": "createWallet", + "7676e127": "serializeInt", + "b6ea1757": "pushNewLeaf", + "b2ded522": "initialize", + "b298e36b": "push", + "c987336c": "upgrade", + "3cda3351": "create2", + "dd85df2d": "setEraLegacyBridgeLastDepositTime", + "32c8176d": "deriveKey", + "e9420f8c": "whitelistedSettlementLayers", + "e24fed00": "assertEq", + "d323826a": "computeCreate2Address", "6006d8b5": "verifyCompressedStateDiffs", - "39509351": "increaseAllowance", - "51cff8d9": "withdraw", - "8ffe1b81": "setBridgeHubAddress", - "95ce3e93": "decodeString", - "09824a80": "registerToken", - "d86970d8": "getL2BootloaderBytecodeHash", - "a31ee5b0": "initialize", - "0d4651aa": "storeAccountConstructedCodeHash", - "9a188371": "requestL2TransactionDirect", - "ed1d7d97": "chainIndexToId", - "c63c4e9b": "minDelay", - "546b6d2a": "SHARED_BRIDGE", - "187598a5": "getNewAddressCreate", - "bf529569": "setFreezability", + "1e279d41": "promptSecret", + "0d14edf7": "registerAlreadyDeployedHyperchain", + "5d382700": "create2Account", + "38720778": "sharedBridge", + "4d2301cc": "getEthBalance", + "11d1364a": "assertLeDecimal", + "1ff5a783": "execute", + "5c975abb": "paused", + "cf22e3c9": "startStateDiffRecording", + "64bc3e64": "envOr", + "55d35d18": "getValueUnderNonce", + "592151f0": "parseToml", + "8fbb3711": "claimFailedDepositLegacyErc20Bridge", + "53e61bdc": "processL2RollupDAValidatorOutputHash", + "41cf49bb": "prepareChainCommitment", + "86d516e8": "getCurrentBlockGasLimit", "cfe7af7c": "finalizeDeposit", - "bcf284e5": "executeTransaction", - "3437949a": "l1GenesisUpgrade", - "f54266a2": "l1TokenAddress", + "3ce695e7": "registerSTMAssetOnL1", + "b71bcf90": "reinitializeToken", + "db07fcd2": "assertGt", + "969b53da": "l1Bridge", + "e717bab7": "proveL1ToL2TransactionStatusViaGateway", + "1f067457": "revertTransfer", + "eb85e83b": "envOr", + "19cae462": "difficulty", + "7a1d8d3a": "safeTransferFundsFromLegacy", + "619d897f": "writeLine", + "5518c73b": "getStateTransitionManager", + "ca8f93f1": "setLegacyBaseTokenAssetId", + "eced0bf0": "__DEPRECATED_tokenIsRegistered", + "51d218f7": "unfreezeChain", + "4c6314f0": "getMarker", + "b760faf9": "depositTo", + "a1954fc5": "getTotalPriorityTxs", + "f120e6c4": "encodeTxDataHash", + "a2d5a0cc": "proveBatchesSharedBridge", + "8cf25ef4": "assertApproxEqRel", + "b8776d4d": "chainRegistered", + "7528c2c6": "applyL1ToL2Alias", + "91c75bc3": "parseJsonBytes32Array", + "22100064": "rememberKey", + "46657fe9": "getVerifier", + "e8b99b1b": "deposit", + "f1a78aa6": "postTransaction", + "a888cc3a": "bridgehubRequestL2TransactionOnGateway", + "2bcd50e0": "resumeGasMetering", + "4db19e7e": "assertEq", + "af6ed122": "executeUpgrade", + "47e50cce": "prank", + "03c5d8af": "forwardTransactionOnGateway", + "d23cd037": "mockCallRevert", + "16ed7bc4": "readFileBinary", + "28e439f3": "tryBlockAndAggregate", + "ced531eb": "setHashes", + "038a24bc": "validateAndPayForPaymasterTransaction", + "4cc5b15e": "diamondCut", + "27e86d6e": "getLastBlockHash", + "ae3165b3": "toBase64URL", + "ce817d47": "startBroadcast", + "823f1d96": "l2TokenProxyBytecodeHash", + "4b561753": "addValidator", + "d9bbf3a1": "rollFork", + "6fadcf72": "forward", + "bd6af434": "expectCall", + "4dd18bf5": "setPendingAdmin", + "84c2ff75": "stmAssetId", + "681fe70c": "isEmpty", + "fe26699e": "getTotalBlocksCommitted", + "a75b496d": "getAllHyperchainChainIDs", + "35d6ad46": "writeJson", + "40c10f19": "mint", + "06e7517b": "appendTransactionToCurrentL2Block", + "b852ad36": "l1SharedBridge", + "3ea053eb": "deactivate", + "e02da327": "readUint256", + "3ebf73b4": "getDeployedCode", + "86b7f856": "publishPubdataAndClearState", + "4145ca27": "removePriorityQueueFront", + "ef0e2ff4": "setChainId", + "68c09202": "executeUpgradeNoOverlap", + "402efc91": "stateTransitionManager", "c9d1c097": "stmAssetIdFromChainId", - "39d7d4aa": "getPriorityTreeRoot", - "41c841c3": "L1_WETH_TOKEN", - "19fa7f62": "claimFailedDeposit", - "5c60da1b": "implementation", - "dd62ed3e": "allowance", - "9cd45184": "chainBalance", - "7958004c": "getOperationState", - "8cf2b2f0": "uncheckedInc", - "715018a6": "renounceOwnership", - "30bda03e": "setL1Erc20Bridge", + "315fff4e": "THIS_ADDRESS", + "3558c188": "executeBatches", "c0d5b949": "getCurrentPubdataSpent", - "4de2e468": "getRawCodeHash", - "7ecebe00": "nonces", - "0e18b681": "acceptAdmin", - "d0468156": "getPendingAdmin", - "d83e4e03": "genesisUpgrade", - "49eb3b50": "getTransactionHashes", - "ebf0c717": "root", - "8da5cb5b": "owner", - "11a2ccc1": "finalizeWithdrawal", - "1dd93b33": "keccakValidationTest", - "f088ccdc": "callCodeOracle", - "aad74262": "setProtocolVersionDeadline", - "72c84445": "callKeccak", - "21f603d7": "setTransactionFilterer", - "52ef6b2c": "facetAddresses", - "9e6ea417": "depositLegacyErc20Bridge", + "3d5923ee": "setEnv", + "57e22dde": "makePersistent", + "2b589b28": "lastCallGas", + "896909dc": "getMinNonce", + "dbaad147": "mockCallRevert", + "3437949a": "l1GenesisUpgrade", + "c4d252f5": "cancel", + "dd82d13e": "skip", "960dcf24": "getBaseTokenAssetId", - "a888cc3a": "bridgehubRequestL2TransactionOnGateway", - "c7ca373c": "initFromCommitment", - "548a5a33": "setAssetHandlerAddressThisChain", - "402efc91": "stateTransitionManager", - "7b30c8da": "getL2SystemContractsUpgradeTxHash", - "0ef26743": "height", - "79ba5097": "acceptOwnership", - "584b153e": "isOperationPending", - "06fdde03": "name", - "e717bab7": "proveL1ToL2TransactionStatusViaGateway", - "a8b0574e": "getCurrentBlockCoinbase", - "30e5ccbd": "incrementTxNumberInBatch", - "ef011dff": "ERA_CHAIN_ID", - "f8c1f0d2": "upgradeChainFromVersion", + "1777e59d": "parseJsonBytes32", + "2e522851": "setNewVersionUpgrade", + "d4ce08c2": "addNewChain", + "c74e9deb": "envOr", + "f320d963": "assertEq", + "a5277a02": "initialize", + "9366518b": "createNewChain", + "7e77b0c5": "assertEqDecimal", + "29233b1f": "deriveKey", + "dc28c0f1": "assertGeDecimal", + "975a6ce9": "rpcUrl", + "65e7c844": "parseTomlAddress", + "e13a1834": "expectCallMinGas", + "4d7baf06": "envBytes", + "5c60da1b": "implementation", + "0f3fa211": "setNativeTokenVault", + "46746c7d": "commitBatchesSharedBridge", + "cbcf2e3c": "isTransactionAllowed", + "bcf284e5": "executeTransaction", + "a8d4d1d9": "assertGe", + "7a28adb2": "proveL2LogInclusion", + "972c6062": "serializeAddress", + "b4866c43": "setFeeParams", + "08dc3360": "validatorPubKeyHashes", + "e66c8c44": "validatorTimelock", + "d74c83a4": "rollFork", + "2ab0f529": "isOperationDone", + "187598a5": "getNewAddressCreate", "f3b7dead": "getProxyAdmin", - "f26f3c8f": "proveL2MessageInclusion", - "3558c188": "executeBatches", - "bcd1b23d": "updateFullTree", + "952a3ee7": "getERC20Getters", + "0f23da43": "revertBatchesSharedBridge", + "87d9d023": "verify", + "0ec6b0b7": "getPriorityTxMaxGasLimit", + "b25c5a25": "sign", + "3408e470": "getChainId", + "707df785": "assertEq", + "7a0ed627": "facets", + "85e4e16a": "assetDeploymentTracker", + "d0f2c663": "getBatchNumberAndTimestamp", + "01d23d4b": "diamondCut", "3a3f36f9": "codeOracleTest", - "1de72e34": "baseTokenGasPriceMultiplierDenominator", + "42842e0e": "safeTransferFrom", + "28a249b0": "getLabel", + "625387dc": "unixTime", + "997a0222": "revokePersistent", + "6478d8ed": "chainAdmin", + "823447c8": "setResult", + "47eaf474": "prompt", + "d4b9f4fa": "messageRoot", + "e76db865": "setPubdataPricingMode", + "f8e18b57": "setNonce", + "d4a4ca0d": "getBlockNumberAndTimestamp", + "6bcb2c1b": "deriveKey", + "89f9a072": "validatePubdata", + "6a0cd1f5": "removeValidator", + "fe173b97": "gasPrice", + "2fce7883": "parseJsonAddressArray", + "1f98fa08": "createNewChain", + "796b89b9": "getBlockTimestamp", + "9cd45184": "chainBalance", + "363bf964": "setAddresses", + "8e8acf87": "getL2BlockNumberAndTimestamp", + "191553a4": "getRecordedLogs", + "7b510fe8": "getAccountInfo", + "9caf9bac": "setX", + "fe74f05b": "assertEq", + "e00ad03e": "replace", + "97624631": "assertEq", + "6e9d7899": "legacyBridge", + "5e97348f": "envOr", + "7ba04809": "assertFalse", + "fc57565f": "upgradeChainFromVersion", + "3635f3e6": "resetTxNumberInBatch", + "522074ab": "parseJsonUintArray", + "97c09d34": "revertBatches", + "c87325f1": "finalizeWithdrawal", + "64af255d": "isContext", + "5af231c1": "envBytes32", + "b96213e4": "mockCall", + "f877cb19": "envString", + "6900a3ae": "toString", + "e52db4ca": "baseTokenAssetId", + "60429eb2": "assertApproxEqAbsDecimal", + "1dcd1f68": "assertNotEq", + "73c58a2d": "publishBlobs", + "7f61885c": "proveBatches", + "7ecebe00": "nonces", + "26e4ae25": "initialize", + "bb7044b6": "stateTransitionManagerIsRegistered", + "91b19874": "validators", + "c1adbbff": "expectCall", + "06bed036": "setL2Block", + "02fa5779": "setNewBatch", + "0f29772b": "rollFork", + "4f1e1be0": "storeAccountConstructingCodeHash", + "7c9bd1f3": "publishTimestampDataToL1", + "a3912ec8": "receiveEther", "81d100a3": "scheduleTransparent", - "85e4e16a": "assetDeploymentTracker", - "204e1c7a": "getProxyImplementation", - "d566afd3": "createBatchCommitment", - "70f5c679": "setMessageRoot", - "07168226": "deployBeaconProxy", - "7b574586": "publishedBlobCommitments", - "fcc73360": "updateLeaf", - "631f4bac": "getPriorityQueueSize", - "3e64a696": "getBasefee", - "facd743b": "isValidator", - "7fb67816": "setValidatorTimelock", - "ee82ac5e": "getBlockHash", - "6e9960c3": "getAdmin", - "98acd7a6": "getBaseToken", - "06e7517b": "appendTransactionToCurrentL2Block", - "b993549e": "getCommittedBatchTimestamp", - "23dc4a09": "keccakPerformUpgrade", - "cf347e17": "setValidator", - "3408e470": "getChainId", - "ae1f6aaf": "l2Bridge", - "c2e90293": "bridgeRecoverFailedTransfer", - "86b7f856": "publishPubdataAndClearState", - "b292f5f1": "proveL1ToL2TransactionStatus", - "7a592065": "calculateRoot", - "a5277a02": "initialize", - "ef939455": "keccakUpgradeTest", - "3644e515": "DOMAIN_SEPARATOR", - "306395c6": "incrementDeploymentNonce", + "2ae9c600": "protocolVersion", + "ed1d7d97": "chainIndexToId", + "c4bc59e0": "readDir", "b277f199": "uncheckedAdd", - "6fadcf72": "forward", - "ae65def1": "node", - "e0bf0850": "setShouldRevertOnProveBatches", - "a457c2d7": "decreaseAllowance", - "9f3f89dc": "getZero", - "4dd18bf5": "setPendingAdmin", - "33ce93fe": "getProtocolVersion", - "c87325f1": "finalizeWithdrawal", - "40a434d5": "transferTokenToNTV", - "e9420f8c": "whitelistedSettlementLayers", - "3f704d2a": "setAssetHandlerAddress", - "ede25608": "protocolVersionToUpgradeTimestamp", - "042901c7": "proveL1ToL2TransactionStatus", + "a5982885": "assertFalse", + "98680034": "createSelectFork", + "aa5cf90e": "stopAndReturnStateDiff", + "3b925549": "prevrandao", + "fcc73360": "updateLeaf", "cab7e8eb": "isNonceUsed", - "5aa6fa1f": "NATIVE_TOKEN_VAULT", - "b8776d4d": "chainRegistered", - "8fbb3711": "claimFailedDepositLegacyErc20Bridge", - "8dd14802": "setBridge", + "7404f1d2": "createWallet", + "7321c485": "dummySetValidator", + "501e60d5": "setUpgradeDiamondCut", + "fa9d8713": "sleep", + "310ab089": "getImmutable", + "2e1a7d4d": "withdraw", + "2986c0e5": "index", + "2f745c59": "tokenOfOwnerByIndex", + "52d1902d": "proxiableUUID", + "898e83fc": "assertNotEq", + "4f6ccce7": "tokenByIndex", + "8e214810": "parseTomlBytes32", + "15f9a2fe": "prepareForPaymaster", + "91f3b94f": "parseJsonBoolArray", + "be65940a": "setEraPostLegacyBridgeUpgradeFirstBatch", + "40a434d5": "transferTokenToNTV", + "ef011dff": "ERA_CHAIN_ID", + "c2aaf9c4": "receiveEth", + "84b0196e": "eip712Domain", + "074ae3d7": "toUppercase", + "313ce567": "decimals", + "a9f6d941": "executeUpgrade", + "d0cbbdef": "assertEqDecimal", + "4049ddd2": "chainId", + "8466d8d1": "getBridgeHubAddress", + "bcd1b23d": "updateFullTree", + "fccc11c4": "assertApproxEqRelDecimal", + "f34d1868": "setExecutionDelay", + "892a0c61": "envInt", + "41af2f52": "recordLogs", + "b4a85892": "envOr", + "ad7e232e": "setImmutables", + "74f4f547": "bridgeBurn", + "f5407abe": "setValues", + "b1fde1a8": "sharedTree", + "a972d037": "assertLtDecimal", + "a225efcb": "setPubdataInfo", + "b0f40a17": "processBatch", + "d1ba7e97": "hyperchainAddress", + "4dfe692c": "assertLe", + "7fb67816": "setValidatorTimelock", + "36f656d8": "assertEq", + "890c283b": "computeCreate2Address", + "83211b40": "signP256", + "98461504": "setUpgradeDiamondCut", + "18876a04": "chunkPubdataToBlobs", + "f30c7ba3": "expectCall", + "eb672419": "requestL2Transaction", + "7069d0c0": "executeInstant", + "6229498b": "deriveKey", + "1ecb7d33": "assertApproxEqRel", + "bf1fe420": "setGasPrice", + "72c84445": "callKeccak", + "1c72346d": "resetNonce", + "4cd88b76": "initialize", + "8c374c65": "ensNamehash", + "b12e1694": "assertNotEq", + "e5fb9b4a": "assertEq", + "bf54096e": "MAX_NUMBER_OF_HYPERCHAINS", + "24a55db9": "markBytecodeAsPublished", + "fa91454d": "parseUint", + "62ee05f4": "promptAddress", "b3160bad": "executeBatchesSharedBridge", + "08284e57": "upgrade", + "48ceb85e": "chainIndex", + "70f55728": "readLine", + "e4441b98": "initialize", + "65d5c135": "assertLt", + "8bff9133": "assertGeDecimal", + "191f1b30": "assertEq", + "ff483c54": "coinbase", + "3f704d2a": "setAssetHandlerAddress", + "c0991525": "claimFailedDeposit", + "86b9620d": "expectEmit", + "9cc7f708": "balanceOf", + "56f29cba": "assertNotEq", + "eb39e6d5": "stateTransitionManager", + "9cc395d0": "bridgeCheckCounterpartAddress", + "51ac6a33": "writeToml", + "0f28c97d": "getCurrentBlockTimestamp", + "99a88ec4": "upgrade", + "beda594a": "setHyperchain", + "60f78733": "chainSetTokenMultiplierSetter", + "9e8945d2": "verificationKeyHash", + "61f91b2e": "initialForceDeploymentHash", + "fa8f7ea6": "getAllHyperchains", + "205c2878": "withdrawTo", + "36ba0355": "bridgeMint", + "4c63e562": "assume", + "62f84b24": "sendToL1", + "1f6d6ef7": "getBlobBaseFee", + "b5b18fe5": "processL2Logs", + "9e6ea417": "depositLegacyErc20Bridge", + "c8bd0e4a": "toBase64URL", + "2281f367": "envOr", + "75fe6a99": "pushBack", + "ecf95b8a": "createAccount", + "ebf0c717": "root", + "81409b91": "mockCall", + "715018a6": "renounceOwnership", + "56142d7a": "priorityQueueFrontOperation", "f5c1182c": "getSemverProtocolVersion", + "13bc9f20": "isOperationReady", + "a9b0d128": "setPriorityTreeStartIndex", + "95ce3e93": "decodeString", + "042901c7": "proveL1ToL2TransactionStatus", + "f7fe3477": "assertEq", + "98296c54": "assertEq", + "ac22e971": "serializeBool", + "92925aa1": "serializeBool", + "3868ac34": "assertEq", + "74044673": "addStateTransitionManager", + "1e19e657": "parseJsonAddress", + "9b3358b0": "serializeJson", + "671a7131": "settlementLayer", + "ddeaa8e6": "getBatchHash", + "7da01cd6": "executeUpgrade", + "0c9fd581": "assertTrue", + "39d7d4aa": "getPriorityTreeRoot", + "53b9e632": "assetHandlerAddress", + "9884b232": "serializeBytes", + "8d1cc925": "getCode", + "23361207": "expectCall", + "77421056": "setFunctionToCall", + "af368a08": "fsMetadata", + "689992b3": "undoL1ToL2Alias", + "5a590335": "getDAValidatorPair", + "19fa7f62": "claimFailedDeposit", + "2a79c611": "getCommitment", + "202bcce7": "validateTransaction", + "155fd27a": "setValueUnderNonce", + "bb0fd610": "extendedAccountVersion", + "46d0b252": "assertNotEq", + "f5f15168": "l2TokenAddress", + "fb644fc5": "addChainBatchRoot", + "3a9d7f8d": "stmDeployer", "8b257989": "executionDelay", - "588570a5": "initialize", + "3e914080": "assertLt", + "c2eeeebd": "l1Address", + "71ee464d": "createSelectFork", + "a34edc03": "assertTrue", + "8bb8dd43": "parseTomlString", + "236e4d66": "assertNotEq", + "64e130cf": "nativeTokenVault", + "aad74262": "setProtocolVersionDeadline", + "9711715a": "snapshot", + "64949a8d": "assertGtDecimal", + "4d4a1eca": "setTokenMultiplier", + "79ba5097": "acceptOwnership", + "e2a9d554": "setUpgradeTimestamp", + "27eb6c0f": "securityCouncil", + "45b56078": "startPrank", + "652fd489": "promptUint", + "4623c91d": "setValidator", + "82b57749": "forwardedBridgeMint", + "f1afe04d": "removeFile", + "8ffe1b81": "setBridgeHubAddress", + "accdd16c": "freezeChain", + "12f43dab": "bridgehubRequestL2Transaction", + "c9f5c932": "requestL2TransactionTwoBridges", + "98f9bdbd": "assertNotEq", + "42346c5e": "parseInt", + "9b67b21c": "setNonceUnsafe", + "9f629281": "parseTomlStringArray", + "3659cfe6": "upgradeTo", + "631f4bac": "getPriorityQueueSize", + "bce38bd7": "tryAggregate", + "fea2d14f": "assertApproxEqRel", + "2d812b44": "serializeBytes32", + "94ca304b": "numNodes", + "799cd333": "sign", + "c2e90293": "bridgeRecoverFailedTransfer", + "69340beb": "multicall", + "70a08231": "balanceOf", "4cd40a02": "setLegacyTokenAssetId", - "d124dc4f": "send", - "23b872dd": "transferFrom", - "086a56f8": "getBaseTokenBridge", - "689992b3": "undoL1ToL2Alias", - "03c5d8af": "forwardTransactionOnGateway", - "48ceb85e": "chainIndex", - "ba334825": "hyperchain", - "b1fde1a8": "sharedTree", - "7069d0c0": "executeInstant", - "c2aaf9c4": "receiveEth", - "2986c0e5": "index", - "b5872958": "timestamps", - "c2e4ff97": "markAccountCodeHashAsConstructed", - "9c4d535b": "create", - "923b3b56": "forceDeployOnAddress", - "3635f3e6": "resetTxNumberInBatch", - "19698bc9": "infiniteFunction", - "315fff4e": "THIS_ADDRESS", - "52c9eacb": "upgradeCutHash", - "18e3a941": "getVerifierParams", - "29f172ad": "unsafeOverrideBatch", - "4b561753": "addValidator", - "a9059cbb": "transfer", - "949431dc": "approvalBased", + "4bed8212": "isWithdrawalFinalized", + "84da1fb4": "getNewAddressCreate2", + "ffa18649": "addr", + "9f3f89dc": "getZero", + "081812fc": "getApproved", + "6ef25c3a": "baseFee", + "29092d0e": "remove", + "201e43e2": "serializeBytes32", + "74318528": "envOr", + "ec8067c7": "updateNonceOrdering", + "841a9d42": "aggregate3Value", + "b5df27c8": "parseTomlUintArray", + "f1d357e5": "L1_SHARED_BRIDGE", + "1de72e34": "baseTokenGasPriceMultiplierDenominator", + "a5cbfe65": "toBase64", + "528a683c": "keyExists", + "71abd109": "upgrade", + "381c3f13": "checkDA", + "0ef26743": "height", + "7fefbbe0": "assertLeDecimal", + "ef939455": "keccakUpgradeTest", + "1806aa18": "getCodeSize", + "f4943a20": "protocolVersionDeadline", + "de8fa431": "getSize", + "a851ae78": "setTxOrigin", + "f45c1ce7": "tryFfi", + "39b37ab0": "fee", + "261a323e": "exists", + "7cb9357e": "gasPerPubdataByte", "8f283970": "changeAdmin", - "85df51fd": "blockHash", - "dead6f7f": "getHyperchain", - "896909dc": "getMinNonce", - "7eff275e": "changeProxyAdmin", - "27ae4c16": "freezeDiamond", - "566338a9": "getL1TokenAddress", + "266cf109": "record", + "7efda2ae": "proveL2LeafInclusion", + "246a61de": "ERA_DIAMOND_PROXY", + "b4d6c782": "etch", + "c05afaa6": "initializeDevBridge", + "52ef6b2c": "facetAddresses", + "8c1aa205": "sign", + "129e9002": "serializeUint", + "d52471c1": "requestL2TransactionDirect", + "c2e047ff": "aggregate3", + "e48a8f8d": "assertEq", + "027f12e1": "changeFeeParams", + "095ea7b3": "approve", + "8f31f052": "isWithdrawalFinalized", + "7a592065": "calculateRoot", + "1c5a9d9c": "activate", + "dd354a06": "calculateCreate2TokenAddress", + "9d1b5a81": "getL2SystemContractsUpgradeBlockNumber", + "fb1a9a57": "getDeploymentNonce", + "6d1d8363": "scheduleShadow", + "42cbb15c": "getBlockNumber", + "18160ddd": "totalSupply", + "c1350739": "parseTomlInt", + "859216bc": "envOr", + "d92f86a2": "setLegacyChainAddress", + "f280efbe": "initializeChainGovernance", + "6e9960c3": "getAdmin", + "af6a2dcd": "getTotalBlocksVerified", + "50bb0884": "toLowercase", + "9623609d": "upgradeAndCall", "8ac84c0e": "txNumberInBlock", - "53ce2061": "revertBatches", - "9a8a0592": "chainId", - "f5407abe": "setValues", - "46657fe9": "getVerifier", - "484f0505": "getHyperchainLegacy", - "b760faf9": "depositTo", - "5de097b1": "nullifyChainBalanceByNTV", - "e8295588": "zeros", - "f90eb963": "getPorterAvailability", - "57180981": "updateAccountVersion", - "579952fc": "transferFromTo", - "d505accf": "permit", - "e02da327": "readUint256", - "51d218f7": "unfreezeChain", - "8466d8d1": "getBridgeHubAddress", - "b381724e": "setFeeParams", - "d9caed12": "withdraw", - "9d1b5a81": "getL2SystemContractsUpgradeBlockNumber" + "9fa8826b": "depositHappened", + "b5872958": "timestamps", + "49eb3b50": "getTransactionHashes", + "6c0960f9": "finalizeEthWithdrawal", + "c1978d1f": "envUint", + "c4879440": "bridgehubDepositBaseToken", + "0603ea68": "assertNotEq", + "c924de35": "transferEthToSharedBridge", + "f8c1f0d2": "upgradeChainFromVersion", + "7d15d019": "isDir", + "ea060291": "allowCheatcodes", + "82ad56cb": "aggregate3", + "805b9869": "executeTransactionFromOutside", + "1624f6c6": "initialize", + "daa51a8c": "pushBack", + "286fafea": "assertNotEq", + "5df93c9b": "assertGeDecimal", + "974ef924": "parseBool" } \ No newline at end of file From 73c0b7c5d7f8f156657fd1c9ed502cc2fff7e063 Mon Sep 17 00:00:00 2001 From: Manuel Mauro Date: Mon, 16 Sep 2024 16:30:14 +0200 Subject: [PATCH 080/116] feat(zk_toolbox): add `zki ecosystem build` subcommand (#2787) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Add `zki ecosystem build` subcommand, which builds L1 transactions without signing and broadcasting them. This allows a security team to sign them using keys in cold storage and multisig. ```bash Create transactions to build ecosystem contracts Usage: zki ecosystem build-transactions [OPTIONS] Options: --sender Address of the transaction sender --l1-rpc-url L1 RPC URL -o, --out Output directory for the generated files -h, --help Print help (see a summary with '-h') ``` ```bash Create unsigned transactions for chain deployment Usage: zki chain build-transactions [OPTIONS] Options: -o, --out Output directory for the generated files -h, --help Print help (see a summary with '-h') ``` ### Output ```json # /transactions/deploy.json { "transactions": [ { "hash": null, "transactionType": "CREATE", "contractName": null, "contractAddress": "0xddca24376aa96d8f56667d78306a3bbbdc65b1ff", "function": null, "arguments": null, "transaction": { "from": "0xaf9d732e8a5607caccb72df525851849c33edf9e", "gas": "0x15a02", "value": "0x0", "input": "0x604580600e600039806000f350fe7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe03601600081602082378035828234f58015156039578182fd5b8082525050506014600cf3", "nonce": "0x0", "chainId": "0x9" }, "additionalContracts": [], "isFixedGasLimit": false }, { "hash": null, "transactionType": "CALL", "contractName": null, "contractAddress": "0xddca24376aa96d8f56667d78306a3bbbdc65b1ff", "function": null, "arguments": null, "transaction": { "from": "0xaf9d732e8a5607caccb72df525851849c33edf9e", "to": "0xddca24376aa96d8f56667d78306a3bbbdc65b1ff", "gas": "0xfb603", "value": "0x0", "input": ... ``` ## Why ❔ ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- .github/workflows/ci-zk-toolbox-reusable.yml | 38 ++++- .gitignore | 1 + zk_toolbox/Cargo.lock | 2 + zk_toolbox/Cargo.toml | 2 +- zk_toolbox/crates/common/src/forge.rs | 11 ++ .../forge_interface/register_chain/input.rs | 3 +- .../commands/chain/args/build_transactions.rs | 60 ++++++++ .../src/commands/chain/args/mod.rs | 1 + .../src/commands/chain/build_transactions.rs | 90 ++++++++++++ .../zk_inception/src/commands/chain/common.rs | 125 +++++++++++++++++ .../src/commands/chain/deploy_paymaster.rs | 32 +++-- .../zk_inception/src/commands/chain/init.rs | 131 ++--------------- .../zk_inception/src/commands/chain/mod.rs | 8 +- .../ecosystem/args/build_transactions.rs | 68 +++++++++ .../src/commands/ecosystem/args/mod.rs | 1 + .../commands/ecosystem/build_transactions.rs | 79 +++++++++++ .../src/commands/ecosystem/common.rs | 75 ++++++++++ .../src/commands/ecosystem/init.rs | 85 +++-------- .../src/commands/ecosystem/mod.rs | 7 + .../src/commands/ecosystem/utils.rs | 14 ++ zk_toolbox/crates/zk_inception/src/consts.rs | 1 + .../crates/zk_inception/src/messages.rs | 18 +++ zk_toolbox/crates/zk_supervisor/Cargo.toml | 2 + .../crates/zk_supervisor/src/commands/mod.rs | 1 + .../commands/send_transactions/args/mod.rs | 69 +++++++++ .../src/commands/send_transactions/mod.rs | 132 ++++++++++++++++++ zk_toolbox/crates/zk_supervisor/src/consts.rs | 1 + .../crates/zk_supervisor/src/defaults.rs | 1 + zk_toolbox/crates/zk_supervisor/src/main.rs | 13 +- .../crates/zk_supervisor/src/messages.rs | 16 +++ 30 files changed, 881 insertions(+), 206 deletions(-) create mode 100644 zk_toolbox/crates/zk_inception/src/commands/chain/args/build_transactions.rs create mode 100644 zk_toolbox/crates/zk_inception/src/commands/chain/build_transactions.rs create mode 100644 zk_toolbox/crates/zk_inception/src/commands/chain/common.rs create mode 100644 zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/build_transactions.rs create mode 100644 zk_toolbox/crates/zk_inception/src/commands/ecosystem/build_transactions.rs create mode 100644 zk_toolbox/crates/zk_inception/src/commands/ecosystem/common.rs create mode 100644 zk_toolbox/crates/zk_inception/src/commands/ecosystem/utils.rs create mode 100644 zk_toolbox/crates/zk_supervisor/src/commands/send_transactions/args/mod.rs create mode 100644 zk_toolbox/crates/zk_supervisor/src/commands/send_transactions/mod.rs create mode 100644 zk_toolbox/crates/zk_supervisor/src/consts.rs diff --git a/.github/workflows/ci-zk-toolbox-reusable.yml b/.github/workflows/ci-zk-toolbox-reusable.yml index 638f168de309..98a44c443581 100644 --- a/.github/workflows/ci-zk-toolbox-reusable.yml +++ b/.github/workflows/ci-zk-toolbox-reusable.yml @@ -23,7 +23,6 @@ jobs: submodules: "recursive" fetch-depth: 0 - - name: Setup environment run: | echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV @@ -79,6 +78,41 @@ jobs: --ignore-prerequisites --verbose \ --observability=false + - name: Create and register chain with transactions signed "offline" + run: | + ci_run zk_inception chain create \ + --chain-name offline_chain \ + --chain-id sequential \ + --prover-mode no-proofs \ + --wallet-creation localhost \ + --l1-batch-commit-data-generator-mode rollup \ + --base-token-address 0x0000000000000000000000000000000000000001 \ + --base-token-price-nominator 1 \ + --base-token-price-denominator 1 \ + --set-as-default false \ + --ignore-prerequisites + + ci_run zk_inception chain build-transactions --chain offline_chain --l1-rpc-url http://127.0.0.1:8545 + + governor_pk=$(awk '/governor:/ {flag=1} flag && /private_key:/ {print $2; exit}' ./configs/wallets.yaml) + + ci_run zk_supervisor send-transactions \ + --file ./transactions/chain/offline_chain/register-hyperchain-txns.json \ + --l1-rpc-url http://127.0.0.1:8545 \ + --private-key $governor_pk + + bridge_hub=$(awk '/bridgehub_proxy_addr/ {print $2}' ./configs/contracts.yaml) + chain_id=$(awk '/chain_id:/ {print $2}' ./chains/offline_chain/ZkStack.yaml) + + hyperchain_output=$(ci_run cast call $bridge_hub "getHyperchain(uint256)" $chain_id) + + if [[ $hyperchain_output == 0x* && ${#hyperchain_output} -eq 66 ]]; then + echo "Chain successfully registered: $hyperchain_output" + else + echo "Failed to register chain: $hyperchain_output" + exit 1 + fi + - name: Read Custom Token address and set as environment variable run: | address=$(awk -F": " '/tokens:/ {found_tokens=1} found_tokens && /DAI:/ {found_dai=1} found_dai && /address:/ {print $2; exit}' ./configs/erc20.yaml) @@ -297,7 +331,6 @@ jobs: wait $PID3 wait $PID4 - # Upgrade tests should run last, because as soon as they # finish the bootloader will be different # TODO make upgrade tests safe to run multiple times @@ -305,7 +338,6 @@ jobs: run: | ci_run zk_supervisor test upgrade --no-deps --chain era - - name: Upload logs uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0 if: always() diff --git a/.gitignore b/.gitignore index 725b5940afeb..c3de7a2df84d 100644 --- a/.gitignore +++ b/.gitignore @@ -117,3 +117,4 @@ chains/era/configs/* configs/* era-observability/ core/tests/ts-integration/deployments-zk +transactions/ diff --git a/zk_toolbox/Cargo.lock b/zk_toolbox/Cargo.lock index 91c379d6443e..02da0311991a 100644 --- a/zk_toolbox/Cargo.lock +++ b/zk_toolbox/Cargo.lock @@ -6442,6 +6442,7 @@ name = "zk_supervisor" version = "0.1.0" dependencies = [ "anyhow", + "chrono", "clap", "clap-markdown", "common", @@ -6457,6 +6458,7 @@ dependencies = [ "types", "url", "xshell", + "zksync_basic_types", ] [[package]] diff --git a/zk_toolbox/Cargo.toml b/zk_toolbox/Cargo.toml index 875db3794d41..d8b84f93adde 100644 --- a/zk_toolbox/Cargo.toml +++ b/zk_toolbox/Cargo.toml @@ -39,10 +39,10 @@ zksync_protobuf = "=0.1.1" # External dependencies anyhow = "1.0.82" clap = { version = "4.4", features = ["derive", "wrap_help", "string"] } -chrono = "0.4" slugify-rs = "0.0.3" cliclack = "0.2.5" console = "0.15.8" +chrono = "0.4.38" ethers = "2.0" futures = "0.3.30" human-panic = "2.0" diff --git a/zk_toolbox/crates/common/src/forge.rs b/zk_toolbox/crates/common/src/forge.rs index f00921a0bf20..7fd5399cc66b 100644 --- a/zk_toolbox/crates/common/src/forge.rs +++ b/zk_toolbox/crates/common/src/forge.rs @@ -96,6 +96,12 @@ impl ForgeScript { self } + /// Add the sender address to the forge script command. + pub fn with_sender(mut self, address: String) -> Self { + self.args.add_arg(ForgeScriptArg::Sender { address }); + self + } + /// Add the rpc-url flag to the forge script command. pub fn with_rpc_url(mut self, rpc_url: String) -> Self { self.args.add_arg(ForgeScriptArg::RpcUrl { url: rpc_url }); @@ -135,6 +141,7 @@ impl ForgeScript { }); self } + // Do not start the script if balance is not enough pub fn private_key(&self) -> Option { self.args.args.iter().find_map(|a| { @@ -244,6 +251,10 @@ pub enum ForgeScriptArg { }, Verify, Resume, + #[strum(to_string = "sender={address}")] + Sender { + address: String, + }, } /// ForgeScriptArgs is a set of arguments that can be passed to the forge script command. diff --git a/zk_toolbox/crates/config/src/forge_interface/register_chain/input.rs b/zk_toolbox/crates/config/src/forge_interface/register_chain/input.rs index 29494ba5d8f5..e2e60294e867 100644 --- a/zk_toolbox/crates/config/src/forge_interface/register_chain/input.rs +++ b/zk_toolbox/crates/config/src/forge_interface/register_chain/input.rs @@ -54,7 +54,6 @@ impl ZkToolboxConfig for RegisterChainL1Config {} impl RegisterChainL1Config { pub fn new(chain_config: &ChainConfig, contracts: &ContractsConfig) -> anyhow::Result { - let genesis_config = chain_config.get_genesis_config()?; let wallets_config = chain_config.get_wallets_config()?; Ok(Self { contracts_config: Contracts { @@ -72,7 +71,7 @@ impl RegisterChainL1Config { validator_timelock_addr: contracts.ecosystem_contracts.validator_timelock_addr, }, chain: ChainL1Config { - chain_chain_id: genesis_config.l2_chain_id, + chain_chain_id: chain_config.chain_id, base_token_gas_price_multiplier_nominator: chain_config.base_token.nominator, base_token_gas_price_multiplier_denominator: chain_config.base_token.denominator, base_token_addr: chain_config.base_token.address, diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/args/build_transactions.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/args/build_transactions.rs new file mode 100644 index 000000000000..793bea487f7e --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/args/build_transactions.rs @@ -0,0 +1,60 @@ +use std::path::PathBuf; + +use clap::Parser; +use common::{config::global_config, forge::ForgeScriptArgs, Prompt}; +use serde::{Deserialize, Serialize}; +use url::Url; + +use crate::{ + consts::DEFAULT_UNSIGNED_TRANSACTIONS_DIR, + defaults::LOCAL_RPC_URL, + messages::{MSG_L1_RPC_URL_HELP, MSG_L1_RPC_URL_INVALID_ERR, MSG_L1_RPC_URL_PROMPT}, +}; + +const CHAIN_SUBDIR: &str = "chain"; + +#[derive(Debug, Clone, Serialize, Deserialize, Parser)] +pub struct BuildTransactionsArgs { + /// Output directory for the generated files. + #[arg(long, short)] + pub out: Option, + /// All ethereum environment related arguments + #[clap(flatten)] + #[serde(flatten)] + pub forge_args: ForgeScriptArgs, + #[clap(long, help = MSG_L1_RPC_URL_HELP)] + pub l1_rpc_url: Option, +} + +impl BuildTransactionsArgs { + pub fn fill_values_with_prompt(self, default_chain: String) -> BuildTransactionsArgsFinal { + let chain_name = global_config().chain_name.clone(); + + let l1_rpc_url = self.l1_rpc_url.unwrap_or_else(|| { + Prompt::new(MSG_L1_RPC_URL_PROMPT) + .default(LOCAL_RPC_URL) + .validate_with(|val: &String| -> Result<(), String> { + Url::parse(val) + .map(|_| ()) + .map_err(|_| MSG_L1_RPC_URL_INVALID_ERR.to_string()) + }) + .ask() + }); + + BuildTransactionsArgsFinal { + out: self + .out + .unwrap_or(PathBuf::from(DEFAULT_UNSIGNED_TRANSACTIONS_DIR).join(CHAIN_SUBDIR)) + .join(chain_name.unwrap_or(default_chain)), + forge_args: self.forge_args, + l1_rpc_url, + } + } +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct BuildTransactionsArgsFinal { + pub out: PathBuf, + pub forge_args: ForgeScriptArgs, + pub l1_rpc_url: String, +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/args/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/args/mod.rs index 08f39a90a843..f2a5f6b8be1f 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/args/mod.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/args/mod.rs @@ -1,3 +1,4 @@ +pub mod build_transactions; pub mod create; pub mod genesis; pub mod init; diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/build_transactions.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/build_transactions.rs new file mode 100644 index 000000000000..68cb7a9a0742 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/build_transactions.rs @@ -0,0 +1,90 @@ +use anyhow::Context; +use common::{config::global_config, git, logger, spinner::Spinner}; +use config::{ + copy_configs, traits::SaveConfigWithBasePath, update_from_chain_config, EcosystemConfig, +}; +use ethers::utils::hex::ToHex; +use xshell::Shell; + +use super::common::register_chain; +use crate::{ + commands::chain::args::build_transactions::BuildTransactionsArgs, + messages::{ + MSG_BUILDING_CHAIN_REGISTRATION_TXNS_SPINNER, MSG_CHAIN_NOT_FOUND_ERR, + MSG_CHAIN_TRANSACTIONS_BUILT, MSG_CHAIN_TXN_MISSING_CONTRACT_CONFIG, + MSG_CHAIN_TXN_OUT_PATH_INVALID_ERR, MSG_PREPARING_CONFIG_SPINNER, MSG_SELECTED_CONFIG, + MSG_WRITING_OUTPUT_FILES_SPINNER, + }, +}; + +const REGISTER_CHAIN_TXNS_FILE_SRC: &str = + "contracts/l1-contracts/broadcast/RegisterHyperchain.s.sol/9/dry-run/run-latest.json"; +const REGISTER_CHAIN_TXNS_FILE_DST: &str = "register-hyperchain-txns.json"; + +const SCRIPT_CONFIG_FILE_SRC: &str = + "contracts/l1-contracts/script-config/register-hyperchain.toml"; +const SCRIPT_CONFIG_FILE_DST: &str = "register-hyperchain.toml"; + +pub(crate) async fn run(args: BuildTransactionsArgs, shell: &Shell) -> anyhow::Result<()> { + let config = EcosystemConfig::from_file(shell)?; + let chain_name = global_config().chain_name.clone(); + let chain_config = config + .load_chain(chain_name) + .context(MSG_CHAIN_NOT_FOUND_ERR)?; + + let args = args.fill_values_with_prompt(config.default_chain.clone()); + + git::submodule_update(shell, config.link_to_code.clone())?; + + let spinner = Spinner::new(MSG_PREPARING_CONFIG_SPINNER); + copy_configs(shell, &config.link_to_code, &chain_config.configs)?; + + logger::note(MSG_SELECTED_CONFIG, logger::object_to_string(&chain_config)); + + let mut genesis_config = chain_config.get_genesis_config()?; + update_from_chain_config(&mut genesis_config, &chain_config); + + // Copy ecosystem contracts + let mut contracts_config = config + .get_contracts_config() + .context(MSG_CHAIN_TXN_MISSING_CONTRACT_CONFIG)?; + contracts_config.l1.base_token_addr = chain_config.base_token.address; + spinner.finish(); + + let spinner = Spinner::new(MSG_BUILDING_CHAIN_REGISTRATION_TXNS_SPINNER); + let governor: String = config.get_wallets()?.governor.address.encode_hex_upper(); + + register_chain( + shell, + args.forge_args.clone(), + &config, + &chain_config, + &mut contracts_config, + args.l1_rpc_url.clone(), + Some(governor), + false, + ) + .await?; + + contracts_config.save_with_base_path(shell, &args.out)?; + spinner.finish(); + + let spinner = Spinner::new(MSG_WRITING_OUTPUT_FILES_SPINNER); + shell + .create_dir(&args.out) + .context(MSG_CHAIN_TXN_OUT_PATH_INVALID_ERR)?; + + shell.copy_file( + config.link_to_code.join(REGISTER_CHAIN_TXNS_FILE_SRC), + args.out.join(REGISTER_CHAIN_TXNS_FILE_DST), + )?; + + shell.copy_file( + config.link_to_code.join(SCRIPT_CONFIG_FILE_SRC), + args.out.join(SCRIPT_CONFIG_FILE_DST), + )?; + spinner.finish(); + + logger::success(MSG_CHAIN_TRANSACTIONS_BUILT); + Ok(()) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/common.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/common.rs new file mode 100644 index 000000000000..ec70d6122d23 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/common.rs @@ -0,0 +1,125 @@ +use common::{ + forge::{Forge, ForgeScriptArgs}, + spinner::Spinner, +}; +use config::{ + forge_interface::{ + register_chain::{input::RegisterChainL1Config, output::RegisterChainOutput}, + script_params::REGISTER_CHAIN_SCRIPT_PARAMS, + }, + traits::{ReadConfig, SaveConfig}, + ChainConfig, ContractsConfig, EcosystemConfig, +}; +use types::{BaseToken, L1Network, WalletCreation}; +use xshell::Shell; + +use crate::{ + consts::AMOUNT_FOR_DISTRIBUTION_TO_WALLETS, + messages::{MSG_DISTRIBUTING_ETH_SPINNER, MSG_MINT_BASE_TOKEN_SPINNER}, + utils::forge::{check_the_balance, fill_forge_private_key}, +}; + +#[allow(clippy::too_many_arguments)] +pub async fn register_chain( + shell: &Shell, + forge_args: ForgeScriptArgs, + config: &EcosystemConfig, + chain_config: &ChainConfig, + contracts: &mut ContractsConfig, + l1_rpc_url: String, + sender: Option, + broadcast: bool, +) -> anyhow::Result<()> { + let deploy_config_path = REGISTER_CHAIN_SCRIPT_PARAMS.input(&config.link_to_code); + + let deploy_config = RegisterChainL1Config::new(chain_config, contracts)?; + deploy_config.save(shell, deploy_config_path)?; + + let mut forge = Forge::new(&config.path_to_foundry()) + .script(®ISTER_CHAIN_SCRIPT_PARAMS.script(), forge_args.clone()) + .with_ffi() + .with_rpc_url(l1_rpc_url); + + if broadcast { + forge = forge.with_broadcast(); + } + + if let Some(address) = sender { + forge = forge.with_sender(address); + } else { + forge = fill_forge_private_key(forge, config.get_wallets()?.governor_private_key())?; + check_the_balance(&forge).await?; + } + + forge.run(shell)?; + + let register_chain_output = RegisterChainOutput::read( + shell, + REGISTER_CHAIN_SCRIPT_PARAMS.output(&chain_config.link_to_code), + )?; + contracts.set_chain_contracts(®ister_chain_output); + Ok(()) +} + +// Distribute eth to the chain wallets for localhost environment +pub async fn distribute_eth( + ecosystem_config: &EcosystemConfig, + chain_config: &ChainConfig, + l1_rpc_url: String, +) -> anyhow::Result<()> { + if chain_config.wallet_creation == WalletCreation::Localhost + && ecosystem_config.l1_network == L1Network::Localhost + { + let spinner = Spinner::new(MSG_DISTRIBUTING_ETH_SPINNER); + let wallets = ecosystem_config.get_wallets()?; + let chain_wallets = chain_config.get_wallets_config()?; + let mut addresses = vec![ + chain_wallets.operator.address, + chain_wallets.blob_operator.address, + chain_wallets.governor.address, + ]; + if let Some(deployer) = chain_wallets.deployer { + addresses.push(deployer.address) + } + common::ethereum::distribute_eth( + wallets.operator, + addresses, + l1_rpc_url, + ecosystem_config.l1_network.chain_id(), + AMOUNT_FOR_DISTRIBUTION_TO_WALLETS, + ) + .await?; + spinner.finish(); + } + Ok(()) +} + +pub async fn mint_base_token( + ecosystem_config: &EcosystemConfig, + chain_config: &ChainConfig, + l1_rpc_url: String, +) -> anyhow::Result<()> { + if chain_config.wallet_creation == WalletCreation::Localhost + && ecosystem_config.l1_network == L1Network::Localhost + && chain_config.base_token != BaseToken::eth() + { + let spinner = Spinner::new(MSG_MINT_BASE_TOKEN_SPINNER); + let wallets = ecosystem_config.get_wallets()?; + let chain_wallets = chain_config.get_wallets_config()?; + let base_token = &chain_config.base_token; + let addresses = vec![wallets.governor.address, chain_wallets.governor.address]; + let amount = AMOUNT_FOR_DISTRIBUTION_TO_WALLETS * base_token.nominator as u128 + / base_token.denominator as u128; + common::ethereum::mint_token( + wallets.operator, + base_token.address, + addresses, + l1_rpc_url, + ecosystem_config.l1_network.chain_id(), + amount, + ) + .await?; + spinner.finish(); + } + Ok(()) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_paymaster.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_paymaster.rs index 81ac457cd884..58c199189bd7 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_paymaster.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_paymaster.rs @@ -2,7 +2,6 @@ use anyhow::Context; use common::{ config::global_config, forge::{Forge, ForgeScriptArgs}, - spinner::Spinner, }; use config::{ forge_interface::{ @@ -15,9 +14,7 @@ use config::{ use xshell::Shell; use crate::{ - messages::{ - MSG_CHAIN_NOT_INITIALIZED, MSG_DEPLOYING_PAYMASTER, MSG_L1_SECRETS_MUST_BE_PRESENTED, - }, + messages::{MSG_CHAIN_NOT_INITIALIZED, MSG_L1_SECRETS_MUST_BE_PRESENTED}, utils::forge::{check_the_balance, fill_forge_private_key}, }; @@ -28,7 +25,7 @@ pub async fn run(args: ForgeScriptArgs, shell: &Shell) -> anyhow::Result<()> { .load_chain(chain_name) .context(MSG_CHAIN_NOT_INITIALIZED)?; let mut contracts = chain_config.get_contracts_config()?; - deploy_paymaster(shell, &chain_config, &mut contracts, args).await?; + deploy_paymaster(shell, &chain_config, &mut contracts, args, None, true).await?; contracts.save_with_base_path(shell, chain_config.configs) } @@ -37,6 +34,8 @@ pub async fn deploy_paymaster( chain_config: &ChainConfig, contracts_config: &mut ContractsConfig, forge_args: ForgeScriptArgs, + sender: Option, + broadcast: bool, ) -> anyhow::Result<()> { let input = DeployPaymasterInput::new(chain_config)?; let foundry_contracts_path = chain_config.path_to_foundry(); @@ -56,18 +55,23 @@ pub async fn deploy_paymaster( .l1_rpc_url .expose_str() .to_string(), - ) - .with_broadcast(); + ); - forge = fill_forge_private_key( - forge, - chain_config.get_wallets_config()?.governor_private_key(), - )?; + if let Some(address) = sender { + forge = forge.with_sender(address); + } else { + forge = fill_forge_private_key( + forge, + chain_config.get_wallets_config()?.governor_private_key(), + )?; + } + + if broadcast { + forge = forge.with_broadcast(); + check_the_balance(&forge).await?; + } - let spinner = Spinner::new(MSG_DEPLOYING_PAYMASTER); - check_the_balance(&forge).await?; forge.run(shell)?; - spinner.finish(); let output = DeployPaymasterOutput::read( shell, diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/init.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/init.rs index 734e5e54863b..fa2388a69be8 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/init.rs @@ -1,24 +1,13 @@ use anyhow::{bail, Context}; -use common::{ - config::global_config, - forge::{Forge, ForgeScriptArgs}, - git, logger, - spinner::Spinner, -}; +use common::{config::global_config, git, logger, spinner::Spinner}; use config::{ - copy_configs, - forge_interface::{ - register_chain::{input::RegisterChainL1Config, output::RegisterChainOutput}, - script_params::REGISTER_CHAIN_SCRIPT_PARAMS, - }, - ports_config, set_l1_rpc_url, - traits::{ReadConfig, SaveConfig, SaveConfigWithBasePath}, - update_from_chain_config, update_ports, ChainConfig, ContractsConfig, EcosystemConfig, - GeneralConfig, + copy_configs, ports_config, set_l1_rpc_url, traits::SaveConfigWithBasePath, + update_from_chain_config, update_ports, ChainConfig, EcosystemConfig, GeneralConfig, }; -use types::{BaseToken, L1Network, WalletCreation}; +use types::BaseToken; use xshell::Shell; +use super::common::{distribute_eth, mint_base_token, register_chain}; use crate::{ accept_ownership::accept_admin, commands::{ @@ -31,18 +20,14 @@ use crate::{ }, portal::update_portal_config, }, - consts::AMOUNT_FOR_DISTRIBUTION_TO_WALLETS, messages::{ msg_initializing_chain, MSG_ACCEPTING_ADMIN_SPINNER, MSG_CHAIN_INITIALIZED, - MSG_CHAIN_NOT_FOUND_ERR, MSG_DISTRIBUTING_ETH_SPINNER, MSG_GENESIS_DATABASE_ERR, - MSG_MINT_BASE_TOKEN_SPINNER, MSG_PORTAL_FAILED_TO_CREATE_CONFIG_ERR, MSG_PORTS_CONFIG_ERR, + MSG_CHAIN_NOT_FOUND_ERR, MSG_DEPLOYING_PAYMASTER, MSG_GENESIS_DATABASE_ERR, + MSG_PORTAL_FAILED_TO_CREATE_CONFIG_ERR, MSG_PORTS_CONFIG_ERR, MSG_REGISTERING_CHAIN_SPINNER, MSG_SELECTED_CONFIG, MSG_UPDATING_TOKEN_MULTIPLIER_SETTER_SPINNER, MSG_WALLET_TOKEN_MULTIPLIER_SETTER_NOT_FOUND, }, - utils::{ - consensus::{generate_consensus_keys, get_consensus_config, get_consensus_secrets}, - forge::{check_the_balance, fill_forge_private_key}, - }, + utils::consensus::{generate_consensus_keys, get_consensus_config, get_consensus_secrets}, }; pub(crate) async fn run(args: InitArgs, shell: &Shell) -> anyhow::Result<()> { @@ -106,6 +91,8 @@ pub async fn init( chain_config, &mut contracts_config, init_args.l1_rpc_url.clone(), + None, + true, ) .await?; contracts_config.save_with_base_path(shell, &chain_config.configs)?; @@ -165,14 +152,18 @@ pub async fn init( } if init_args.deploy_paymaster { + let spinner = Spinner::new(MSG_DEPLOYING_PAYMASTER); deploy_paymaster::deploy_paymaster( shell, chain_config, &mut contracts_config, init_args.forge_args.clone(), + None, + true, ) .await?; contracts_config.save_with_base_path(shell, &chain_config.configs)?; + spinner.finish(); } genesis(init_args.genesis_args.clone(), shell, chain_config) @@ -186,100 +177,6 @@ pub async fn init( Ok(()) } -async fn register_chain( - shell: &Shell, - forge_args: ForgeScriptArgs, - config: &EcosystemConfig, - chain_config: &ChainConfig, - contracts: &mut ContractsConfig, - l1_rpc_url: String, -) -> anyhow::Result<()> { - let deploy_config_path = REGISTER_CHAIN_SCRIPT_PARAMS.input(&config.link_to_code); - - let deploy_config = RegisterChainL1Config::new(chain_config, contracts)?; - deploy_config.save(shell, deploy_config_path)?; - - let mut forge = Forge::new(&config.path_to_foundry()) - .script(®ISTER_CHAIN_SCRIPT_PARAMS.script(), forge_args.clone()) - .with_ffi() - .with_rpc_url(l1_rpc_url) - .with_broadcast(); - - forge = fill_forge_private_key(forge, config.get_wallets()?.governor_private_key())?; - check_the_balance(&forge).await?; - forge.run(shell)?; - - let register_chain_output = RegisterChainOutput::read( - shell, - REGISTER_CHAIN_SCRIPT_PARAMS.output(&chain_config.link_to_code), - )?; - contracts.set_chain_contracts(®ister_chain_output); - Ok(()) -} - -// Distribute eth to the chain wallets for localhost environment -pub async fn distribute_eth( - ecosystem_config: &EcosystemConfig, - chain_config: &ChainConfig, - l1_rpc_url: String, -) -> anyhow::Result<()> { - if chain_config.wallet_creation == WalletCreation::Localhost - && ecosystem_config.l1_network == L1Network::Localhost - { - let spinner = Spinner::new(MSG_DISTRIBUTING_ETH_SPINNER); - let wallets = ecosystem_config.get_wallets()?; - let chain_wallets = chain_config.get_wallets_config()?; - let mut addresses = vec![ - chain_wallets.operator.address, - chain_wallets.blob_operator.address, - chain_wallets.governor.address, - ]; - if let Some(deployer) = chain_wallets.deployer { - addresses.push(deployer.address) - } - common::ethereum::distribute_eth( - wallets.operator, - addresses, - l1_rpc_url, - ecosystem_config.l1_network.chain_id(), - AMOUNT_FOR_DISTRIBUTION_TO_WALLETS, - ) - .await?; - spinner.finish(); - } - Ok(()) -} - -pub async fn mint_base_token( - ecosystem_config: &EcosystemConfig, - chain_config: &ChainConfig, - l1_rpc_url: String, -) -> anyhow::Result<()> { - if chain_config.wallet_creation == WalletCreation::Localhost - && ecosystem_config.l1_network == L1Network::Localhost - && chain_config.base_token != BaseToken::eth() - { - let spinner = Spinner::new(MSG_MINT_BASE_TOKEN_SPINNER); - let wallets = ecosystem_config.get_wallets()?; - let chain_wallets = chain_config.get_wallets_config()?; - let base_token = &chain_config.base_token; - let addresses = vec![wallets.governor.address, chain_wallets.governor.address]; - let amount = AMOUNT_FOR_DISTRIBUTION_TO_WALLETS * base_token.nominator as u128 - / base_token.denominator as u128; - common::ethereum::mint_token( - wallets.operator, - base_token.address, - addresses, - l1_rpc_url, - ecosystem_config.l1_network.chain_id(), - amount, - ) - .await?; - spinner.finish(); - } - Ok(()) -} - fn apply_port_offset(port_offset: u16, general_config: &mut GeneralConfig) -> anyhow::Result<()> { let Some(mut ports_config) = ports_config(general_config) else { bail!("Missing ports config"); diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs index 61a164c16553..4ddc4bf58569 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs @@ -1,6 +1,7 @@ +use ::common::forge::ForgeScriptArgs; +use args::build_transactions::BuildTransactionsArgs; pub(crate) use args::create::ChainCreateArgsFinal; use clap::Subcommand; -use common::forge::ForgeScriptArgs; pub(crate) use create::create_chain_inner; use xshell::Shell; @@ -10,6 +11,8 @@ use crate::commands::chain::{ }; pub(crate) mod args; +mod build_transactions; +mod common; mod create; pub mod deploy_l2_contracts; pub mod deploy_paymaster; @@ -22,6 +25,8 @@ mod setup_legacy_bridge; pub enum ChainCommands { /// Create a new chain, setting the necessary configurations for later initialization Create(ChainCreateArgs), + /// Create unsigned transactions for chain deployment + BuildTransactions(BuildTransactionsArgs), /// Initialize chain, deploying necessary contracts and performing on-chain operations Init(InitArgs), /// Run server genesis @@ -48,6 +53,7 @@ pub(crate) async fn run(shell: &Shell, args: ChainCommands) -> anyhow::Result<() match args { ChainCommands::Create(args) => create::run(args, shell), ChainCommands::Init(args) => init::run(args, shell).await, + ChainCommands::BuildTransactions(args) => build_transactions::run(args, shell).await, ChainCommands::Genesis(args) => genesis::run(args, shell).await, ChainCommands::DeployL2Contracts(args) => { deploy_l2_contracts::run(args, shell, Deploy2ContractsOption::All).await diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/build_transactions.rs b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/build_transactions.rs new file mode 100644 index 000000000000..697fa518b6e4 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/build_transactions.rs @@ -0,0 +1,68 @@ +use std::{path::PathBuf, str::FromStr}; + +use clap::Parser; +use common::{forge::ForgeScriptArgs, Prompt}; +use serde::{Deserialize, Serialize}; +use url::Url; +use zksync_basic_types::H160; + +use crate::{ + consts::DEFAULT_UNSIGNED_TRANSACTIONS_DIR, + defaults::LOCAL_RPC_URL, + messages::{ + MSG_L1_RPC_URL_HELP, MSG_L1_RPC_URL_INVALID_ERR, MSG_L1_RPC_URL_PROMPT, + MSG_SENDER_ADDRESS_PROMPT, + }, +}; + +#[derive(Debug, Clone, Serialize, Deserialize, Parser)] +pub struct BuildTransactionsArgs { + /// Address of the transaction sender. + #[clap(long)] + pub sender: Option, + #[clap(long, help = MSG_L1_RPC_URL_HELP)] + pub l1_rpc_url: Option, + /// Output directory for the generated files. + #[arg(long, short)] + pub out: Option, + #[clap(flatten)] + #[serde(flatten)] + pub forge_args: ForgeScriptArgs, +} + +impl BuildTransactionsArgs { + pub fn fill_values_with_prompt(self) -> BuildTransactionsFinal { + let sender = self.sender.unwrap_or_else(|| { + Prompt::new(MSG_SENDER_ADDRESS_PROMPT) + .validate_with(|val: &String| -> Result<(), String> { + H160::from_str(val).map_or_else(|err| Err(err.to_string()), |_| Ok(())) + }) + .ask() + }); + + let l1_rpc_url = self.l1_rpc_url.unwrap_or_else(|| { + Prompt::new(MSG_L1_RPC_URL_PROMPT) + .default(LOCAL_RPC_URL) + .validate_with(|val: &String| -> Result<(), String> { + Url::parse(val) + .map(|_| ()) + .map_err(|_| MSG_L1_RPC_URL_INVALID_ERR.to_string()) + }) + .ask() + }); + BuildTransactionsFinal { + sender, + out: self.out.unwrap_or(DEFAULT_UNSIGNED_TRANSACTIONS_DIR.into()), + forge_args: self.forge_args.clone(), + l1_rpc_url, + } + } +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct BuildTransactionsFinal { + pub sender: String, + pub out: PathBuf, + pub forge_args: ForgeScriptArgs, + pub l1_rpc_url: String, +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/mod.rs index 8a6048a8643b..c25eebda3d6d 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/mod.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/mod.rs @@ -1,3 +1,4 @@ +pub mod build_transactions; pub mod change_default; pub mod create; pub mod init; diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/build_transactions.rs b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/build_transactions.rs new file mode 100644 index 000000000000..ff7132360972 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/build_transactions.rs @@ -0,0 +1,79 @@ +use anyhow::Context; +use common::{git, logger, spinner::Spinner}; +use config::{traits::SaveConfigWithBasePath, EcosystemConfig}; +use xshell::Shell; + +use super::{ + args::build_transactions::BuildTransactionsArgs, + common::deploy_l1, + create_configs::create_initial_deployments_config, + utils::{build_system_contracts, install_yarn_dependencies}, +}; +use crate::messages::{ + MSG_BUILDING_ECOSYSTEM, MSG_BUILDING_ECOSYSTEM_CONTRACTS_SPINNER, MSG_ECOSYSTEM_TXN_OUTRO, + MSG_ECOSYSTEM_TXN_OUT_PATH_INVALID_ERR, MSG_INTALLING_DEPS_SPINNER, + MSG_WRITING_OUTPUT_FILES_SPINNER, +}; + +const DEPLOY_TRANSACTIONS_FILE_SRC: &str = + "contracts/l1-contracts/broadcast/DeployL1.s.sol/9/dry-run/run-latest.json"; +const DEPLOY_TRANSACTIONS_FILE_DST: &str = "deploy-l1-txns.json"; + +const SCRIPT_CONFIG_FILE_SRC: &str = "contracts/l1-contracts/script-config/config-deploy-l1.toml"; +const SCRIPT_CONFIG_FILE_DST: &str = "config-deploy-l1.toml"; + +pub async fn run(args: BuildTransactionsArgs, shell: &Shell) -> anyhow::Result<()> { + let args = args.fill_values_with_prompt(); + let ecosystem_config = EcosystemConfig::from_file(shell)?; + + git::submodule_update(shell, ecosystem_config.link_to_code.clone())?; + + let initial_deployment_config = match ecosystem_config.get_initial_deployment_config() { + Ok(config) => config, + Err(_) => create_initial_deployments_config(shell, &ecosystem_config.config)?, + }; + + logger::info(MSG_BUILDING_ECOSYSTEM); + + let spinner = Spinner::new(MSG_INTALLING_DEPS_SPINNER); + install_yarn_dependencies(shell, &ecosystem_config.link_to_code)?; + build_system_contracts(shell, &ecosystem_config.link_to_code)?; + spinner.finish(); + + let spinner = Spinner::new(MSG_BUILDING_ECOSYSTEM_CONTRACTS_SPINNER); + let contracts_config = deploy_l1( + shell, + &args.forge_args, + &ecosystem_config, + &initial_deployment_config, + &args.l1_rpc_url, + Some(args.sender), + false, + ) + .await?; + + contracts_config.save_with_base_path(shell, &args.out)?; + spinner.finish(); + + let spinner = Spinner::new(MSG_WRITING_OUTPUT_FILES_SPINNER); + shell + .create_dir(&args.out) + .context(MSG_ECOSYSTEM_TXN_OUT_PATH_INVALID_ERR)?; + + shell.copy_file( + ecosystem_config + .link_to_code + .join(DEPLOY_TRANSACTIONS_FILE_SRC), + args.out.join(DEPLOY_TRANSACTIONS_FILE_DST), + )?; + + shell.copy_file( + ecosystem_config.link_to_code.join(SCRIPT_CONFIG_FILE_SRC), + args.out.join(SCRIPT_CONFIG_FILE_DST), + )?; + spinner.finish(); + + logger::outro(MSG_ECOSYSTEM_TXN_OUTRO); + + Ok(()) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/common.rs b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/common.rs new file mode 100644 index 000000000000..950d39876b09 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/common.rs @@ -0,0 +1,75 @@ +use anyhow::Context; +use common::forge::{Forge, ForgeScriptArgs}; +use config::{ + forge_interface::{ + deploy_ecosystem::{ + input::{DeployL1Config, InitialDeploymentConfig}, + output::DeployL1Output, + }, + script_params::DEPLOY_ECOSYSTEM_SCRIPT_PARAMS, + }, + traits::{ReadConfig, ReadConfigWithBasePath, SaveConfig}, + ContractsConfig, EcosystemConfig, GenesisConfig, +}; +use types::{L1Network, ProverMode}; +use xshell::Shell; + +use crate::utils::forge::{check_the_balance, fill_forge_private_key}; + +pub async fn deploy_l1( + shell: &Shell, + forge_args: &ForgeScriptArgs, + config: &EcosystemConfig, + initial_deployment_config: &InitialDeploymentConfig, + l1_rpc_url: &str, + sender: Option, + broadcast: bool, +) -> anyhow::Result { + let deploy_config_path = DEPLOY_ECOSYSTEM_SCRIPT_PARAMS.input(&config.link_to_code); + let default_genesis_config = + GenesisConfig::read_with_base_path(shell, config.get_default_configs_path()) + .context("Context")?; + + let wallets_config = config.get_wallets()?; + // For deploying ecosystem we only need genesis batch params + let deploy_config = DeployL1Config::new( + &default_genesis_config, + &wallets_config, + initial_deployment_config, + config.era_chain_id, + config.prover_version == ProverMode::NoProofs, + ); + deploy_config.save(shell, deploy_config_path)?; + + let mut forge = Forge::new(&config.path_to_foundry()) + .script(&DEPLOY_ECOSYSTEM_SCRIPT_PARAMS.script(), forge_args.clone()) + .with_ffi() + .with_rpc_url(l1_rpc_url.to_string()); + + if config.l1_network == L1Network::Localhost { + // It's a kludge for reth, just because it doesn't behave properly with large amount of txs + forge = forge.with_slow(); + } + + if let Some(address) = sender { + forge = forge.with_sender(address); + } else { + forge = fill_forge_private_key(forge, wallets_config.deployer_private_key())?; + } + + if broadcast { + forge = forge.with_broadcast(); + check_the_balance(&forge).await?; + } + + forge.run(shell)?; + + let script_output = DeployL1Output::read( + shell, + DEPLOY_ECOSYSTEM_SCRIPT_PARAMS.output(&config.link_to_code), + )?; + let mut contracts_config = ContractsConfig::default(); + contracts_config.update_from_l1_output(&script_output); + + Ok(contracts_config) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs index 7d34437ef2d2..2d31aad10336 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs @@ -1,11 +1,7 @@ -use std::{ - path::{Path, PathBuf}, - str::FromStr, -}; +use std::{path::PathBuf, str::FromStr}; use anyhow::Context; use common::{ - cmd::Cmd, config::global_config, forge::{Forge, ForgeScriptArgs}, git, logger, @@ -15,25 +11,22 @@ use common::{ use config::{ forge_interface::{ deploy_ecosystem::{ - input::{ - DeployErc20Config, DeployL1Config, Erc20DeploymentConfig, InitialDeploymentConfig, - }, - output::{DeployL1Output, ERC20Tokens}, + input::{DeployErc20Config, Erc20DeploymentConfig, InitialDeploymentConfig}, + output::ERC20Tokens, }, - script_params::{DEPLOY_ECOSYSTEM_SCRIPT_PARAMS, DEPLOY_ERC20_SCRIPT_PARAMS}, - }, - traits::{ - FileConfigWithDefaultName, ReadConfig, ReadConfigWithBasePath, SaveConfig, - SaveConfigWithBasePath, + script_params::DEPLOY_ERC20_SCRIPT_PARAMS, }, - ContractsConfig, EcosystemConfig, GenesisConfig, + traits::{FileConfigWithDefaultName, ReadConfig, SaveConfig, SaveConfigWithBasePath}, + ContractsConfig, EcosystemConfig, }; -use types::{L1Network, ProverMode}; -use xshell::{cmd, Shell}; +use types::L1Network; +use xshell::Shell; use super::{ args::init::{EcosystemArgsFinal, EcosystemInitArgs, EcosystemInitArgsFinal}, + common::deploy_l1, setup_observability, + utils::{build_system_contracts, install_yarn_dependencies}, }; use crate::{ accept_ownership::{accept_admin, accept_owner}, @@ -280,47 +273,19 @@ async fn deploy_ecosystem_inner( initial_deployment_config: &InitialDeploymentConfig, l1_rpc_url: String, ) -> anyhow::Result { - let deploy_config_path = DEPLOY_ECOSYSTEM_SCRIPT_PARAMS.input(&config.link_to_code); - - let default_genesis_config = - GenesisConfig::read_with_base_path(shell, config.get_default_configs_path()) - .context("Context")?; - - let wallets_config = config.get_wallets()?; - // For deploying ecosystem we only need genesis batch params - let deploy_config = DeployL1Config::new( - &default_genesis_config, - &wallets_config, - initial_deployment_config, - config.era_chain_id, - config.prover_version == ProverMode::NoProofs, - ); - deploy_config.save(shell, deploy_config_path)?; - - let mut forge = Forge::new(&config.path_to_foundry()) - .script(&DEPLOY_ECOSYSTEM_SCRIPT_PARAMS.script(), forge_args.clone()) - .with_ffi() - .with_rpc_url(l1_rpc_url.clone()) - .with_broadcast(); - - if config.l1_network == L1Network::Localhost { - // It's a kludge for reth, just because it doesn't behave properly with large amount of txs - forge = forge.with_slow(); - } - - forge = fill_forge_private_key(forge, wallets_config.deployer_private_key())?; - let spinner = Spinner::new(MSG_DEPLOYING_ECOSYSTEM_CONTRACTS_SPINNER); - check_the_balance(&forge).await?; - forge.run(shell)?; + let contracts_config = deploy_l1( + shell, + &forge_args, + config, + initial_deployment_config, + &l1_rpc_url, + None, + true, + ) + .await?; spinner.finish(); - let script_output = DeployL1Output::read( - shell, - DEPLOY_ECOSYSTEM_SCRIPT_PARAMS.output(&config.link_to_code), - )?; - let mut contracts_config = ContractsConfig::default(); - contracts_config.update_from_l1_output(&script_output); accept_owner( shell, config, @@ -393,13 +358,3 @@ async fn deploy_ecosystem_inner( Ok(contracts_config) } - -fn install_yarn_dependencies(shell: &Shell, link_to_code: &Path) -> anyhow::Result<()> { - let _dir_guard = shell.push_dir(link_to_code); - Ok(Cmd::new(cmd!(shell, "yarn install")).run()?) -} - -fn build_system_contracts(shell: &Shell, link_to_code: &Path) -> anyhow::Result<()> { - let _dir_guard = shell.push_dir(link_to_code.join("contracts")); - Ok(Cmd::new(cmd!(shell, "yarn sc build")).run()?) -} diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/mod.rs index cb5195ccf937..5fa791b97abf 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/mod.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/mod.rs @@ -1,3 +1,4 @@ +use args::build_transactions::BuildTransactionsArgs; use clap::Subcommand; use xshell::Shell; @@ -6,11 +7,14 @@ use crate::commands::ecosystem::args::{ }; mod args; +pub(crate) mod build_transactions; mod change_default; +mod common; mod create; pub mod create_configs; pub(crate) mod init; mod setup_observability; +mod utils; #[derive(Subcommand, Debug)] #[allow(clippy::large_enum_variant)] @@ -18,6 +22,8 @@ pub enum EcosystemCommands { /// Create a new ecosystem and chain, /// setting necessary configurations for later initialization Create(EcosystemCreateArgs), + /// Create transactions to build ecosystem contracts + BuildTransactions(BuildTransactionsArgs), /// Initialize ecosystem and chain, /// deploying necessary contracts and performing on-chain operations Init(EcosystemInitArgs), @@ -33,6 +39,7 @@ pub enum EcosystemCommands { pub(crate) async fn run(shell: &Shell, args: EcosystemCommands) -> anyhow::Result<()> { match args { EcosystemCommands::Create(args) => create::run(args, shell), + EcosystemCommands::BuildTransactions(args) => build_transactions::run(args, shell).await, EcosystemCommands::Init(args) => init::run(args, shell).await, EcosystemCommands::ChangeDefaultChain(args) => change_default::run(args, shell), EcosystemCommands::SetupObservability => setup_observability::run(shell), diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/utils.rs b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/utils.rs new file mode 100644 index 000000000000..a51adc75fb42 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/utils.rs @@ -0,0 +1,14 @@ +use std::path::Path; + +use common::cmd::Cmd; +use xshell::{cmd, Shell}; + +pub(super) fn install_yarn_dependencies(shell: &Shell, link_to_code: &Path) -> anyhow::Result<()> { + let _dir_guard = shell.push_dir(link_to_code); + Ok(Cmd::new(cmd!(shell, "yarn install")).run()?) +} + +pub(super) fn build_system_contracts(shell: &Shell, link_to_code: &Path) -> anyhow::Result<()> { + let _dir_guard = shell.push_dir(link_to_code.join("contracts")); + Ok(Cmd::new(cmd!(shell, "yarn sc build")).run()?) +} diff --git a/zk_toolbox/crates/zk_inception/src/consts.rs b/zk_toolbox/crates/zk_inception/src/consts.rs index 22e570a5439e..87315dcd8186 100644 --- a/zk_toolbox/crates/zk_inception/src/consts.rs +++ b/zk_toolbox/crates/zk_inception/src/consts.rs @@ -8,6 +8,7 @@ pub const PROVER_MIGRATIONS: &str = "prover/crates/lib/prover_dal/migrations"; pub const PROVER_STORE_MAX_RETRIES: u16 = 10; pub const DEFAULT_CREDENTIALS_FILE: &str = "~/.config/gcloud/application_default_credentials.json"; pub const DEFAULT_PROOF_STORE_DIR: &str = "artifacts"; +pub const DEFAULT_UNSIGNED_TRANSACTIONS_DIR: &str = "transactions"; pub const BELLMAN_CUDA_DIR: &str = "era-bellman-cuda"; pub const L2_BASE_TOKEN_ADDRESS: &str = "0x000000000000000000000000000000000000800A"; diff --git a/zk_toolbox/crates/zk_inception/src/messages.rs b/zk_toolbox/crates/zk_inception/src/messages.rs index c5b77f63ebae..b20e8edf8ad9 100644 --- a/zk_toolbox/crates/zk_inception/src/messages.rs +++ b/zk_toolbox/crates/zk_inception/src/messages.rs @@ -75,6 +75,7 @@ pub(super) const MSG_DISTRIBUTING_ETH_SPINNER: &str = "Distributing eth..."; pub(super) const MSG_MINT_BASE_TOKEN_SPINNER: &str = "Minting base token to the governance addresses..."; pub(super) const MSG_INTALLING_DEPS_SPINNER: &str = "Installing and building dependencies..."; +pub(super) const MSG_PREPARING_CONFIG_SPINNER: &str = "Preparing config files..."; pub(super) const MSG_DEPLOYING_ERC20_SPINNER: &str = "Deploying ERC20 contracts..."; pub(super) const MSG_DEPLOYING_ECOSYSTEM_CONTRACTS_SPINNER: &str = "Deploying ecosystem contracts..."; @@ -121,6 +122,14 @@ pub(super) fn msg_chain_load_err(chain_name: &str) -> String { format!("Failed to load chain config for {chain_name}") } +/// Build ecosystem transactions related messages +pub(super) const MSG_SENDER_ADDRESS_PROMPT: &str = "What is the address of the transaction sender?"; +pub(super) const MSG_BUILDING_ECOSYSTEM: &str = "Building ecosystem transactions"; +pub(super) const MSG_BUILDING_ECOSYSTEM_CONTRACTS_SPINNER: &str = "Building ecosystem contracts..."; +pub(super) const MSG_WRITING_OUTPUT_FILES_SPINNER: &str = "Writing output files..."; +pub(super) const MSG_ECOSYSTEM_TXN_OUTRO: &str = "Transactions successfully built"; +pub(super) const MSG_ECOSYSTEM_TXN_OUT_PATH_INVALID_ERR: &str = "Invalid path"; + /// Chain create related messages pub(super) const MSG_PROVER_MODE_HELP: &str = "Prover options"; pub(super) const MSG_CHAIN_ID_HELP: &str = "Chain ID"; @@ -177,6 +186,7 @@ pub(super) const MSG_INITIALIZING_SERVER_DATABASE: &str = "Initializing server d pub(super) const MSG_FAILED_TO_DROP_SERVER_DATABASE_ERR: &str = "Failed to drop server database"; pub(super) const MSG_INITIALIZING_PROVER_DATABASE: &str = "Initializing prover database"; pub(super) const MSG_FAILED_TO_DROP_PROVER_DATABASE_ERR: &str = "Failed to drop prover database"; + /// Chain update related messages pub(super) const MSG_WALLETS_CONFIG_MUST_BE_PRESENT: &str = "Wallets configuration must be present"; @@ -218,6 +228,14 @@ pub(super) const MSG_DEPLOYING_L2_CONTRACT_SPINNER: &str = "Deploying l2 contrac /// Chain deploy paymaster related messages pub(super) const MSG_DEPLOYING_PAYMASTER: &str = "Deploying paymaster"; +/// Chain build related messages +pub(super) const MSG_BUILDING_CHAIN_REGISTRATION_TXNS_SPINNER: &str = + "Building chain registration transactions..."; +pub(super) const MSG_CHAIN_TXN_OUT_PATH_INVALID_ERR: &str = "Invalid path"; +pub(super) const MSG_CHAIN_TXN_MISSING_CONTRACT_CONFIG: &str = + "Missing contract.yaml, please be sure to run this command within initialized ecosystem"; +pub(super) const MSG_CHAIN_TRANSACTIONS_BUILT: &str = "Chain transactions successfully built"; + /// Run server related messages pub(super) const MSG_SERVER_COMPONENTS_HELP: &str = "Components of server to run"; pub(super) const MSG_ENABLE_CONSENSUS_HELP: &str = "Enable consensus"; diff --git a/zk_toolbox/crates/zk_supervisor/Cargo.toml b/zk_toolbox/crates/zk_supervisor/Cargo.toml index d9c5c2196fae..4c450a736393 100644 --- a/zk_toolbox/crates/zk_supervisor/Cargo.toml +++ b/zk_toolbox/crates/zk_supervisor/Cargo.toml @@ -15,6 +15,7 @@ anyhow.workspace = true clap.workspace = true common.workspace = true config.workspace = true +chrono.workspace = true ethers.workspace = true human-panic.workspace = true strum.workspace = true @@ -27,3 +28,4 @@ clap-markdown.workspace = true futures.workspace = true types.workspace = true serde_yaml.workspace = true +zksync_basic_types.workspace = true diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs b/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs index 875f2982c959..1f3893e293ef 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs @@ -5,5 +5,6 @@ pub mod fmt; pub mod lint; pub(crate) mod lint_utils; pub mod prover; +pub mod send_transactions; pub mod snapshot; pub mod test; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/send_transactions/args/mod.rs b/zk_toolbox/crates/zk_supervisor/src/commands/send_transactions/args/mod.rs new file mode 100644 index 000000000000..e3d4f220ff28 --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/commands/send_transactions/args/mod.rs @@ -0,0 +1,69 @@ +use std::path::PathBuf; + +use clap::Parser; +use common::Prompt; +use url::Url; + +use crate::{ + defaults::LOCAL_RPC_URL, + messages::{ + MSG_INVALID_L1_RPC_URL_ERR, MSG_PROMPT_L1_RPC_URL, MSG_PROMPT_SECRET_KEY, + MSG_PROMPT_TRANSACTION_FILE, + }, +}; + +const DEFAULT_TRANSACTION_CONFIRMATIONS: usize = 2; + +#[derive(Debug, Parser)] +pub struct SendTransactionsArgs { + #[clap(long)] + pub file: Option, + #[clap(long)] + pub private_key: Option, + #[clap(long)] + pub l1_rpc_url: Option, + #[clap(long)] + pub confirmations: Option, +} + +#[derive(Debug)] +pub struct SendTransactionsArgsFinal { + pub file: PathBuf, + pub private_key: String, + pub l1_rpc_url: String, + pub confirmations: usize, +} + +impl SendTransactionsArgs { + pub fn fill_values_with_prompt(self) -> SendTransactionsArgsFinal { + let file = self + .file + .unwrap_or_else(|| Prompt::new(MSG_PROMPT_TRANSACTION_FILE).ask()); + + let private_key = self + .private_key + .unwrap_or_else(|| Prompt::new(MSG_PROMPT_SECRET_KEY).ask()); + + let l1_rpc_url = self.l1_rpc_url.unwrap_or_else(|| { + Prompt::new(MSG_PROMPT_L1_RPC_URL) + .default(LOCAL_RPC_URL) + .validate_with(|val: &String| -> Result<(), String> { + Url::parse(val) + .map(|_| ()) + .map_err(|_| MSG_INVALID_L1_RPC_URL_ERR.to_string()) + }) + .ask() + }); + + let confirmations = self + .confirmations + .unwrap_or(DEFAULT_TRANSACTION_CONFIRMATIONS); + + SendTransactionsArgsFinal { + file, + private_key, + l1_rpc_url, + confirmations, + } + } +} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/send_transactions/mod.rs b/zk_toolbox/crates/zk_supervisor/src/commands/send_transactions/mod.rs new file mode 100644 index 000000000000..79d8efc600e8 --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/commands/send_transactions/mod.rs @@ -0,0 +1,132 @@ +use std::{ + fs::{File, OpenOptions}, + io::{Read, Write}, + ops::Add, + path::PathBuf, + time::Duration, +}; + +use anyhow::Context; +use args::SendTransactionsArgs; +use chrono::Local; +use common::{ethereum::create_ethers_client, logger}; +use config::EcosystemConfig; +use ethers::{abi::Bytes, providers::Middleware, types::TransactionRequest, utils::hex}; +use serde::Deserialize; +use tokio::time::sleep; +use xshell::Shell; +use zksync_basic_types::{H160, U256}; + +use crate::{ + consts::DEFAULT_UNSIGNED_TRANSACTIONS_DIR, + messages::{ + msg_send_txns_outro, MSG_FAILED_TO_SEND_TXN_ERR, MSG_UNABLE_TO_OPEN_FILE_ERR, + MSG_UNABLE_TO_READ_FILE_ERR, MSG_UNABLE_TO_READ_PARSE_JSON_ERR, + MSG_UNABLE_TO_WRITE_FILE_ERR, + }, +}; + +pub mod args; + +const MAX_ATTEMPTS: u32 = 3; + +#[derive(Deserialize)] +struct Transaction { + from: String, + gas: String, + input: String, +} + +#[derive(Deserialize)] +struct Txn { + #[serde(rename = "contractAddress")] + contract_address: String, + transaction: Transaction, +} + +#[derive(Deserialize)] +struct Txns { + transactions: Vec, +} + +pub async fn run(shell: &Shell, args: SendTransactionsArgs) -> anyhow::Result<()> { + let args = args.fill_values_with_prompt(); + + let ecosystem_config = EcosystemConfig::from_file(shell)?; + let chain_id = ecosystem_config.l1_network.chain_id(); + + // Read the JSON file + let mut file = File::open(args.file).context(MSG_UNABLE_TO_OPEN_FILE_ERR)?; + let mut data = String::new(); + file.read_to_string(&mut data) + .context(MSG_UNABLE_TO_READ_FILE_ERR)?; + + // Parse the JSON file + let txns: Txns = serde_json::from_str(&data).context(MSG_UNABLE_TO_READ_PARSE_JSON_ERR)?; + + let timestamp = Local::now().format("%Y%m%d_%H%M%S").to_string(); + let log_file = ecosystem_config + .link_to_code + .join(DEFAULT_UNSIGNED_TRANSACTIONS_DIR) + .join(format!("{}_receipt.log", timestamp)); + + let client = create_ethers_client(args.private_key.parse()?, args.l1_rpc_url, Some(chain_id))?; + let mut nonce = client.get_transaction_count(client.address(), None).await?; + let gas_price = client.get_gas_price().await?; + + for txn in txns.transactions { + let to: H160 = txn.contract_address.parse()?; + let from: H160 = txn.transaction.from.parse()?; + let gas_limit: U256 = txn.transaction.gas.parse()?; + let input_data: Bytes = hex::decode(txn.transaction.input)?; + + let tx = TransactionRequest::new() + .to(to) + .from(from) + .gas(gas_limit) + .gas_price(gas_price) + .nonce(nonce) + .data(input_data) + .chain_id(chain_id); + + nonce = nonce.add(1); + + let mut attempts = 0; + let receipt = loop { + attempts += 1; + match client + .send_transaction(tx.clone(), None) + .await? + .confirmations(args.confirmations) + .interval(Duration::from_millis(30)) + .await + { + Ok(receipt) => break receipt, + Err(e) if attempts < MAX_ATTEMPTS => { + logger::info(format!("Attempt {} failed: {:?}", attempts, e).as_str()); + sleep(Duration::from_secs(1)).await; + continue; + } + Err(e) => return Err(e).context(MSG_FAILED_TO_SEND_TXN_ERR)?, + } + }; + + log_receipt(&log_file, format!("{:?}", receipt).as_str())?; + } + + logger::outro(msg_send_txns_outro(log_file.to_string_lossy().as_ref())); + + Ok(()) +} + +fn log_receipt(path: &PathBuf, receipt: &str) -> anyhow::Result<()> { + let mut file = OpenOptions::new() + .append(true) + .create(true) + .open(path) + .context(MSG_UNABLE_TO_OPEN_FILE_ERR)?; + + writeln!(file, "{}", receipt).context(MSG_UNABLE_TO_WRITE_FILE_ERR)?; + + Ok(()) +} diff --git a/zk_toolbox/crates/zk_supervisor/src/consts.rs b/zk_toolbox/crates/zk_supervisor/src/consts.rs new file mode 100644 index 000000000000..66f00c7553b5 --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/consts.rs @@ -0,0 +1 @@ +pub const DEFAULT_UNSIGNED_TRANSACTIONS_DIR: &str = "transactions"; diff --git a/zk_toolbox/crates/zk_supervisor/src/defaults.rs b/zk_toolbox/crates/zk_supervisor/src/defaults.rs index f4bae739c2d1..d9325402f533 100644 --- a/zk_toolbox/crates/zk_supervisor/src/defaults.rs +++ b/zk_toolbox/crates/zk_supervisor/src/defaults.rs @@ -2,3 +2,4 @@ pub const TEST_DATABASE_SERVER_URL: &str = "postgres://postgres:notsecurepassword@localhost:5433/zksync_local_test"; pub const TEST_DATABASE_PROVER_URL: &str = "postgres://postgres:notsecurepassword@localhost:5433/prover_local_test"; +pub const LOCAL_RPC_URL: &str = "http://127.0.0.1:8545"; diff --git a/zk_toolbox/crates/zk_supervisor/src/main.rs b/zk_toolbox/crates/zk_supervisor/src/main.rs index 67a5179f4ecf..3c34d0596569 100644 --- a/zk_toolbox/crates/zk_supervisor/src/main.rs +++ b/zk_toolbox/crates/zk_supervisor/src/main.rs @@ -1,7 +1,7 @@ use clap::{Parser, Subcommand}; use commands::{ contracts::ContractsArgs, database::DatabaseCommands, lint::LintArgs, prover::ProverCommands, - snapshot::SnapshotCommands, test::TestCommands, + send_transactions::args::SendTransactionsArgs, snapshot::SnapshotCommands, test::TestCommands, }; use common::{ check_general_prerequisites, @@ -13,14 +13,16 @@ use common::{ use config::EcosystemConfig; use messages::{ msg_global_chain_does_not_exist, MSG_CONTRACTS_ABOUT, MSG_PROVER_VERSION_ABOUT, - MSG_SUBCOMMAND_CLEAN, MSG_SUBCOMMAND_DATABASE_ABOUT, MSG_SUBCOMMAND_FMT_ABOUT, - MSG_SUBCOMMAND_LINT_ABOUT, MSG_SUBCOMMAND_SNAPSHOTS_CREATOR_ABOUT, MSG_SUBCOMMAND_TESTS_ABOUT, + MSG_SEND_TXNS_ABOUT, MSG_SUBCOMMAND_CLEAN, MSG_SUBCOMMAND_DATABASE_ABOUT, + MSG_SUBCOMMAND_FMT_ABOUT, MSG_SUBCOMMAND_LINT_ABOUT, MSG_SUBCOMMAND_SNAPSHOTS_CREATOR_ABOUT, + MSG_SUBCOMMAND_TESTS_ABOUT, }; use xshell::Shell; use crate::commands::{clean::CleanCommands, fmt::FmtArgs}; mod commands; +mod consts; mod dals; mod defaults; mod messages; @@ -57,6 +59,8 @@ enum SupervisorSubcommands { Prover(ProverCommands), #[command(about = MSG_CONTRACTS_ABOUT)] Contracts(ContractsArgs), + #[command(about = MSG_SEND_TXNS_ABOUT)] + SendTransactions(SendTransactionsArgs), } #[derive(Parser, Debug)] @@ -117,6 +121,9 @@ async fn run_subcommand(args: Supervisor, shell: &Shell) -> anyhow::Result<()> { SupervisorSubcommands::Fmt(args) => commands::fmt::run(shell.clone(), args).await?, SupervisorSubcommands::Prover(command) => commands::prover::run(shell, command).await?, SupervisorSubcommands::Contracts(args) => commands::contracts::run(shell, args)?, + SupervisorSubcommands::SendTransactions(args) => { + commands::send_transactions::run(shell, args).await? + } } Ok(()) } diff --git a/zk_toolbox/crates/zk_supervisor/src/messages.rs b/zk_toolbox/crates/zk_supervisor/src/messages.rs index 311a6e11c326..72887e40a2ba 100644 --- a/zk_toolbox/crates/zk_supervisor/src/messages.rs +++ b/zk_toolbox/crates/zk_supervisor/src/messages.rs @@ -202,3 +202,19 @@ pub(super) const MSG_DESERIALIZE_TEST_WALLETS_ERR: &str = "Impossible to deseria pub(super) const MSG_WALLETS_TEST_SUCCESS: &str = "Wallets test success"; pub(super) const MSG_LOADTEST_ABOUT: &str = "Run loadtest"; + +// Send transactions related messages +pub(super) const MSG_SEND_TXNS_ABOUT: &str = "Send transactions from file"; +pub(super) const MSG_PROMPT_TRANSACTION_FILE: &str = "Path to transactions file"; +pub(super) const MSG_PROMPT_SECRET_KEY: &str = "Secret key of the sender"; +pub(super) const MSG_PROMPT_L1_RPC_URL: &str = "L1 RPC URL"; +pub(super) fn msg_send_txns_outro(log_file: &str) -> String { + format!("Transaction receipts logged to: {}", log_file) +} + +pub(super) const MSG_UNABLE_TO_OPEN_FILE_ERR: &str = "Unable to open file"; +pub(super) const MSG_UNABLE_TO_READ_FILE_ERR: &str = "Unable to read file"; +pub(super) const MSG_UNABLE_TO_WRITE_FILE_ERR: &str = "Unable to write data to file"; +pub(super) const MSG_UNABLE_TO_READ_PARSE_JSON_ERR: &str = "Unable to parse JSON"; +pub(super) const MSG_FAILED_TO_SEND_TXN_ERR: &str = "Failed to send transaction"; +pub(super) const MSG_INVALID_L1_RPC_URL_ERR: &str = "Invalid L1 RPC URL"; From 6d4090f025286ab62b7ab5f5e2eaaeac34b0aec6 Mon Sep 17 00:00:00 2001 From: Grzegorz Prusak Date: Mon, 16 Sep 2024 16:46:43 +0200 Subject: [PATCH 081/116] feat: deployment of multicall3 on L2 by zk_toolbox (#2872) For complex atomic operations on consensus registry. Should be merged after https://github.com/matter-labs/era-contracts/pull/805 Tested manually by running `cargo -p zk_inception -- e init --dev`, which populated the multicall3 address in contract.yaml to non-zero value. --- contracts | 2 +- zk_toolbox/crates/config/src/contracts.rs | 7 +++++++ .../src/forge_interface/deploy_l2_contracts/output.rs | 6 ++++++ .../zk_inception/src/commands/chain/deploy_l2_contracts.rs | 6 +++++- 4 files changed, 19 insertions(+), 2 deletions(-) diff --git a/contracts b/contracts index 3a1b5d4b94ff..bce4b2d0f34b 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 3a1b5d4b94ffb00f03d436a7db7e48589eb74d39 +Subproject commit bce4b2d0f34bd87f1aaadd291772935afb1c3bd6 diff --git a/zk_toolbox/crates/config/src/contracts.rs b/zk_toolbox/crates/config/src/contracts.rs index 0d4b1c7b1f81..8296aa188527 100644 --- a/zk_toolbox/crates/config/src/contracts.rs +++ b/zk_toolbox/crates/config/src/contracts.rs @@ -7,6 +7,7 @@ use crate::{ deploy_ecosystem::output::DeployL1Output, deploy_l2_contracts::output::{ ConsensusRegistryOutput, DefaultL2UpgradeOutput, InitializeBridgeOutput, + Multicall3Output, }, register_chain::output::RegisterChainOutput, }, @@ -102,6 +103,11 @@ impl ContractsConfig { self.l2.default_l2_upgrader = default_upgrade_output.l2_default_upgrader; Ok(()) } + + pub fn set_multicall3(&mut self, multicall3_output: &Multicall3Output) -> anyhow::Result<()> { + self.l2.multicall3 = Some(multicall3_output.multicall3); + Ok(()) + } } impl FileConfigWithDefaultName for ContractsConfig { @@ -152,4 +158,5 @@ pub struct L2Contracts { pub testnet_paymaster_addr: Address, pub default_l2_upgrader: Address, pub consensus_registry: Option
, + pub multicall3: Option
, } diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/output.rs b/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/output.rs index 860e7e293f99..ca5cac12c02d 100644 --- a/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/output.rs +++ b/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/output.rs @@ -6,6 +6,7 @@ use crate::traits::ZkToolboxConfig; impl ZkToolboxConfig for InitializeBridgeOutput {} impl ZkToolboxConfig for DefaultL2UpgradeOutput {} impl ZkToolboxConfig for ConsensusRegistryOutput {} +impl ZkToolboxConfig for Multicall3Output {} #[derive(Debug, Clone, Serialize, Deserialize)] pub struct InitializeBridgeOutput { @@ -23,3 +24,8 @@ pub struct ConsensusRegistryOutput { pub consensus_registry_implementation: Address, pub consensus_registry_proxy: Address, } + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Multicall3Output { + pub multicall3: Address, +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_l2_contracts.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_l2_contracts.rs index 7545ec2ec26f..5bfc0a623488 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_l2_contracts.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_l2_contracts.rs @@ -11,7 +11,10 @@ use config::{ forge_interface::{ deploy_l2_contracts::{ input::DeployL2ContractsInput, - output::{ConsensusRegistryOutput, DefaultL2UpgradeOutput, InitializeBridgeOutput}, + output::{ + ConsensusRegistryOutput, DefaultL2UpgradeOutput, InitializeBridgeOutput, + Multicall3Output, + }, }, script_params::DEPLOY_L2_CONTRACTS_SCRIPT_PARAMS, }, @@ -205,6 +208,7 @@ pub async fn deploy_l2_contracts( contracts_config.set_l2_shared_bridge(&InitializeBridgeOutput::read(shell, out)?)?; contracts_config.set_default_l2_upgrade(&DefaultL2UpgradeOutput::read(shell, out)?)?; contracts_config.set_consensus_registry(&ConsensusRegistryOutput::read(shell, out)?)?; + contracts_config.set_multicall3(&Multicall3Output::read(shell, out)?)?; Ok(()) }, ) From 413856fa5fc00d14d77afc1e6a21e3257885c170 Mon Sep 17 00:00:00 2001 From: Danil Date: Mon, 16 Sep 2024 17:33:18 +0200 Subject: [PATCH 082/116] fix(zk_toolbox): Clone era observability (#2892) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. Signed-off-by: Danil --- etc/lint-config/ignore.yaml | 3 ++- etc/utils/src/index.ts | 4 +++- .../crates/zk_inception/src/commands/containers.rs | 14 +++++++++++--- .../zk_inception/src/commands/ecosystem/mod.rs | 2 +- 4 files changed, 17 insertions(+), 6 deletions(-) diff --git a/etc/lint-config/ignore.yaml b/etc/lint-config/ignore.yaml index 108192b18438..3d0c4869df84 100644 --- a/etc/lint-config/ignore.yaml +++ b/etc/lint-config/ignore.yaml @@ -22,5 +22,6 @@ dirs: [ "system-contracts", "artifacts-zk", "cache-zk", - "contracts/" + "contracts/", + "era-observability" ] diff --git a/etc/utils/src/index.ts b/etc/utils/src/index.ts index 28cd864a1bf6..e64439c53fcb 100644 --- a/etc/utils/src/index.ts +++ b/etc/utils/src/index.ts @@ -25,7 +25,8 @@ const IGNORED_DIRS = [ 'artifacts-zk', 'cache-zk', // Ignore directories with OZ and forge submodules. - 'contracts/l1-contracts/lib' + 'contracts/l1-contracts/lib', + 'era-observability' ]; const IGNORED_FILES = ['KeysWithPlonkVerifier.sol', 'TokenInit.sol', '.tslintrc.js', '.prettierrc.js']; @@ -33,6 +34,7 @@ const IGNORED_FILES = ['KeysWithPlonkVerifier.sol', 'TokenInit.sol', '.tslintrc. // spawns a new shell and can execute arbitrary commands, like "ls -la | grep .env" // returns { stdout, stderr } const promisified = promisify(_exec); + export function exec(command: string) { command = command.replace(/\n/g, ' '); return promisified(command); diff --git a/zk_toolbox/crates/zk_inception/src/commands/containers.rs b/zk_toolbox/crates/zk_inception/src/commands/containers.rs index 81d7970df839..9c11cc2e3efc 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/containers.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/containers.rs @@ -6,9 +6,13 @@ use config::{EcosystemConfig, DOCKER_COMPOSE_FILE, ERA_OBSERVABILITY_COMPOSE_FIL use xshell::Shell; use super::args::ContainersArgs; -use crate::messages::{ - MSG_CONTAINERS_STARTED, MSG_FAILED_TO_FIND_ECOSYSTEM_ERR, MSG_RETRY_START_CONTAINERS_PROMPT, - MSG_STARTING_CONTAINERS, MSG_STARTING_DOCKER_CONTAINERS_SPINNER, +use crate::{ + commands::ecosystem::setup_observability, + messages::{ + MSG_CONTAINERS_STARTED, MSG_FAILED_TO_FIND_ECOSYSTEM_ERR, + MSG_RETRY_START_CONTAINERS_PROMPT, MSG_STARTING_CONTAINERS, + MSG_STARTING_DOCKER_CONTAINERS_SPINNER, + }, }; pub fn run(shell: &Shell, args: ContainersArgs) -> anyhow::Result<()> { @@ -20,6 +24,10 @@ pub fn run(shell: &Shell, args: ContainersArgs) -> anyhow::Result<()> { logger::info(MSG_STARTING_CONTAINERS); let spinner = Spinner::new(MSG_STARTING_DOCKER_CONTAINERS_SPINNER); + if args.observability { + setup_observability::run(shell)?; + } + start_containers(shell, args.observability)?; spinner.finish(); diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/mod.rs index 5fa791b97abf..3f4aa7565e19 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/mod.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/mod.rs @@ -13,7 +13,7 @@ mod common; mod create; pub mod create_configs; pub(crate) mod init; -mod setup_observability; +pub(crate) mod setup_observability; mod utils; #[derive(Subcommand, Debug)] From beca173580aaedf4324047a8b0643be54799450c Mon Sep 17 00:00:00 2001 From: Grzegorz Prusak Date: Mon, 16 Sep 2024 18:17:32 +0200 Subject: [PATCH 083/116] feat: updated the docker examples to generate consensus keys and use gossipnet (#2476) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Tested on a hetzner instance. --------- Co-authored-by: Bruno França --- .../external-node/09_decentralization.md | 82 +------------------ .../configs/generate_secrets.sh | 5 ++ .../configs/mainnet_consensus_config.yaml | 10 +++ .../configs/testnet_consensus_config.yaml | 10 +++ .../mainnet-external-node-docker-compose.yml | 43 ++++++++-- .../testnet-external-node-docker-compose.yml | 43 ++++++++-- 6 files changed, 95 insertions(+), 98 deletions(-) create mode 100755 docs/guides/external-node/docker-compose-examples/configs/generate_secrets.sh create mode 100644 docs/guides/external-node/docker-compose-examples/configs/mainnet_consensus_config.yaml create mode 100644 docs/guides/external-node/docker-compose-examples/configs/testnet_consensus_config.yaml diff --git a/docs/guides/external-node/09_decentralization.md b/docs/guides/external-node/09_decentralization.md index 41f59486bef6..d97ec5b30cf9 100644 --- a/docs/guides/external-node/09_decentralization.md +++ b/docs/guides/external-node/09_decentralization.md @@ -7,85 +7,7 @@ will eventually be used instead of ZKsync API for synchronizing data. On the gossipnet, the data integrity will be protected by the BFT (byzantine fault-tolerant) consensus algorithm (currently data is signed just by the main node though). -## Enabling gossipnet on your node - -> [!NOTE] -> -> Because the data transmitted over the gossipnet is signed by the main node (and eventually by the consensus quorum), -> the signatures need to be backfilled to the node's local storage the first time you switch from centralized (ZKsync -> API based) synchronization to the decentralized (gossipnet based) synchronization (this is a one-time thing). With the -> current implementation it may take a couple of hours and gets faster the more nodes you add to the -> `gossip_static_outbound` list (see below). We are working to remove this inconvenience. - -> [!NOTE] -> -> The minimal supported server version for this is -> [24.11.0](https://github.com/matter-labs/zksync-era/releases/tag/core-v24.11.0) - -### Generating secrets - -Each participant node of the gossipnet has to have an identity (a public/secret key pair). When running your node for -the first time, generate the secrets by running: - -``` -docker run --entrypoint /usr/bin/zksync_external_node "matterlabs/external-node:2.0-v24.12.0" generate-secrets > consensus_secrets.yaml -chmod 600 consensus_secrets.yaml -``` - -> [!NOTE] -> -> NEVER reveal the secret keys used by your node. Otherwise, someone can impersonate your node on the gossipnet. If you -> suspect that your secret key has been leaked, you can generate fresh keys using the same tool. -> -> If you want someone else to connect to your node, give them your PUBLIC key instead. Both public and secret keys are -> present in the `consensus_secrets.yaml` (public keys are in comments). - -### Preparing configuration file - -Copy the template of the consensus configuration file (for -[mainnet](https://github.com/matter-labs/zksync-era/blob/main/docs/guides/external-node/prepared_configs/mainnet_consensus_config.yaml) -or -[testnet](https://github.com/matter-labs/zksync-era/blob/main/docs/guides/external-node/prepared_configs/testnet_consensus_config.yaml) -). - -> [!NOTE] -> -> You need to fill in the `public_addr` field. This is the address that will (not implemented yet) be advertised over -> gossipnet to other nodes, so that they can establish connections to your node. If you don't want to expose your node -> to the public internet, you can use IP in your local network. - -Currently the config contains the following fields (refer to config -[schema](https://github.com/matter-labs/zksync-era/blob/990676c5f84afd2ff8cd337f495c82e8d1f305a4/core/lib/protobuf_config/src/proto/core/consensus.proto#L66) -for more details): - -- `server_addr` - local TCP socket address that the node should listen on for incoming connections. Note that this is an - additional TCP port that will be opened by the node. -- `public_addr` - the public address of your node that will be advertised over the gossipnet. -- `max_payload_size` - limit (in bytes) on the sized of the ZKsync ERA block received from the gossipnet. This protects - your node from getting DoS`ed by too large network messages. Use the value from the template. -- `gossip_dynamic_inbound_limit` - maximal number of unauthenticated concurrent inbound connections that can be - established to your node. This is a DDoS protection measure. -- `gossip_static_outbound` - list of trusted peers that your node should always try to connect to. The template contains - the nodes maintained by Matterlabs, but you can add more if you know any. Note that the list contains both the network - address AND the public key of the node - this prevents spoofing attacks. - -### Setting environment variables - -Uncomment (or add) the following lines in your `.env` config: - -``` -EN_CONSENSUS_CONFIG_PATH=... -EN_CONSENSUS_SECRETS_PATH=... -``` - -These variables should point to your consensus config and secrets files that we have just created. Tweak the paths to -the files if you have placed them differently. - ### Add `--enable-consensus` flag to your entry point command -For the consensus configuration to take effect you have to add `--enable-consensus` flag to the command line when -running the node, for example: - -``` -docker run "matterlabs/external-node:2.0-v24.12.0" --enable-consensus -``` +For the consensus configuration to take effect you have to add `--enable-consensus` flag when +running the node. You can do that by editing the docker compose files (mainnet-external-node-docker-compose.yml or testnet-external-node-docker-compose.yml) and uncommenting the line with `--enable-consensus`. diff --git a/docs/guides/external-node/docker-compose-examples/configs/generate_secrets.sh b/docs/guides/external-node/docker-compose-examples/configs/generate_secrets.sh new file mode 100755 index 000000000000..e4d8ceed67b6 --- /dev/null +++ b/docs/guides/external-node/docker-compose-examples/configs/generate_secrets.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +if [ ! -s $1 ]; then + /usr/bin/zksync_external_node generate-secrets > $1 +fi diff --git a/docs/guides/external-node/docker-compose-examples/configs/mainnet_consensus_config.yaml b/docs/guides/external-node/docker-compose-examples/configs/mainnet_consensus_config.yaml new file mode 100644 index 000000000000..01c9d323a931 --- /dev/null +++ b/docs/guides/external-node/docker-compose-examples/configs/mainnet_consensus_config.yaml @@ -0,0 +1,10 @@ +server_addr: '0.0.0.0:3054' +public_addr: '127.0.0.1:3054' +max_payload_size: 5000000 +gossip_dynamic_inbound_limit: 100 +gossip_static_outbound: + # preconfigured ENs owned by Matterlabs that you can connect to + - key: 'node:public:ed25519:68d29127ab03408bf5c838553b19c32bdb3aaaae9bf293e5e078c3a0d265822a' + addr: 'external-node-consensus-mainnet.zksync.dev:3054' + - key: 'node:public:ed25519:b521e1bb173d04bc83d46b859d1296378e94a40427a6beb9e7fdd17cbd934c11' + addr: 'external-node-moby-consensus-mainnet.zksync.dev:3054' diff --git a/docs/guides/external-node/docker-compose-examples/configs/testnet_consensus_config.yaml b/docs/guides/external-node/docker-compose-examples/configs/testnet_consensus_config.yaml new file mode 100644 index 000000000000..cfcc6b9d43e5 --- /dev/null +++ b/docs/guides/external-node/docker-compose-examples/configs/testnet_consensus_config.yaml @@ -0,0 +1,10 @@ +server_addr: '0.0.0.0:3054' +public_addr: '127.0.0.1:3054' +max_payload_size: 5000000 +gossip_dynamic_inbound_limit: 100 +gossip_static_outbound: + # preconfigured ENs owned by Matterlabs that you can connect to + - key: 'node:public:ed25519:4a94067664e7b8d0927ab1443491dab71a1d0c63f861099e1852f2b6d0831c3e' + addr: 'external-node-consensus-sepolia.zksync.dev:3054' + - key: 'node:public:ed25519:cfbbebc74127099680584f07a051a2573e2dd7463abdd000d31aaa44a7985045' + addr: 'external-node-moby-consensus-sepolia.zksync.dev:3054' diff --git a/docs/guides/external-node/docker-compose-examples/mainnet-external-node-docker-compose.yml b/docs/guides/external-node/docker-compose-examples/mainnet-external-node-docker-compose.yml index 369ce50be0b2..64bef02b17a1 100644 --- a/docs/guides/external-node/docker-compose-examples/mainnet-external-node-docker-compose.yml +++ b/docs/guides/external-node/docker-compose-examples/mainnet-external-node-docker-compose.yml @@ -1,16 +1,16 @@ -version: "3.2" +name: "mainnet-node" services: prometheus: image: prom/prometheus:v2.35.0 volumes: - - mainnet-prometheus-data:/prometheus + - prometheus-data:/prometheus - ./prometheus/prometheus.yml:/etc/prometheus/prometheus.yml expose: - 9090 grafana: image: grafana/grafana:9.3.6 volumes: - - mainnet-grafana-data:/var/lib/grafana + - grafana-data:/var/lib/grafana - ./grafana/provisioning:/etc/grafana/provisioning environment: GF_AUTH_ANONYMOUS_ORG_ROLE: "Admin" @@ -37,7 +37,7 @@ services: expose: - 5430 volumes: - - mainnet-postgres:/var/lib/postgresql/data + - postgres:/var/lib/postgresql/data healthcheck: interval: 1s timeout: 3s @@ -49,17 +49,39 @@ services: environment: - POSTGRES_PASSWORD=notsecurepassword - PGPORT=5430 + # Generation of consensus secrets. + # The secrets are generated iff the secrets file doesn't already exist. + generate-secrets: + image: "matterlabs/external-node:2.0-v24.16.0" + entrypoint: + [ + "/configs/generate_secrets.sh", + "/configs/mainnet_consensus_secrets.yaml", + ] + volumes: + - ./configs:/configs external-node: image: "matterlabs/external-node:2.0-v24.16.0" + entrypoint: + [ + "/usr/bin/entrypoint.sh", + # Uncomment the following line to enable consensus + # "--enable-consensus", + ] + restart: always depends_on: postgres: condition: service_healthy + generate-secrets: + condition: service_completed_successfully ports: + - "0.0.0.0:3054:3054" # consensus public port - "127.0.0.1:3060:3060" - "127.0.0.1:3061:3061" - "127.0.0.1:3081:3081" volumes: - - mainnet-rocksdb:/db + - rocksdb:/db + - ./configs:/configs expose: - 3322 environment: @@ -83,8 +105,11 @@ services: EN_SNAPSHOTS_OBJECT_STORE_MODE: "GCSAnonymousReadOnly" RUST_LOG: "warn,zksync=info,zksync_core::metadata_calculator=debug,zksync_state=debug,zksync_utils=debug,zksync_web3_decl::client=error" + EN_CONSENSUS_CONFIG_PATH: "/configs/mainnet_consensus_config.yaml" + EN_CONSENSUS_SECRETS_PATH: "/configs/mainnet_consensus_secrets.yaml" + volumes: - mainnet-postgres: {} - mainnet-rocksdb: {} - mainnet-prometheus-data: {} - mainnet-grafana-data: {} + postgres: {} + rocksdb: {} + prometheus-data: {} + grafana-data: {} diff --git a/docs/guides/external-node/docker-compose-examples/testnet-external-node-docker-compose.yml b/docs/guides/external-node/docker-compose-examples/testnet-external-node-docker-compose.yml index 1417c6cc360f..f865f500c5b3 100644 --- a/docs/guides/external-node/docker-compose-examples/testnet-external-node-docker-compose.yml +++ b/docs/guides/external-node/docker-compose-examples/testnet-external-node-docker-compose.yml @@ -1,16 +1,16 @@ -version: "3.2" +name: "testnet-node" services: prometheus: image: prom/prometheus:v2.35.0 volumes: - - testnet-prometheus-data:/prometheus + - prometheus-data:/prometheus - ./prometheus/prometheus.yml:/etc/prometheus/prometheus.yml expose: - 9090 grafana: image: grafana/grafana:9.3.6 volumes: - - testnet-grafana-data:/var/lib/grafana + - grafana-data:/var/lib/grafana - ./grafana/provisioning:/etc/grafana/provisioning environment: GF_AUTH_ANONYMOUS_ORG_ROLE: "Admin" @@ -37,7 +37,7 @@ services: expose: - 5430 volumes: - - testnet-postgres:/var/lib/postgresql/data + - postgres:/var/lib/postgresql/data healthcheck: interval: 1s timeout: 3s @@ -49,17 +49,39 @@ services: environment: - POSTGRES_PASSWORD=notsecurepassword - PGPORT=5430 + # Generation of consensus secrets. + # The secrets are generated iff the secrets file doesn't already exist. + generate-secrets: + image: "matterlabs/external-node:2.0-v24.16.0" + entrypoint: + [ + "/configs/generate_secrets.sh", + "/configs/testnet_consensus_secrets.yaml", + ] + volumes: + - ./configs:/configs external-node: image: "matterlabs/external-node:2.0-v24.16.0" + entrypoint: + [ + "/usr/bin/entrypoint.sh", + # Uncomment the following line to enable consensus + # "--enable-consensus", + ] + restart: always depends_on: postgres: condition: service_healthy + generate-secrets: + condition: service_completed_successfully ports: + - "0.0.0.0:3054:3054" # consensus public port - "127.0.0.1:3060:3060" - "127.0.0.1:3061:3061" - "127.0.0.1:3081:3081" volumes: - - testnet-rocksdb:/db + - rocksdb:/db + - ./configs:/configs expose: - 3322 environment: @@ -83,8 +105,11 @@ services: EN_SNAPSHOTS_OBJECT_STORE_MODE: "GCSAnonymousReadOnly" RUST_LOG: "warn,zksync=info,zksync_core::metadata_calculator=debug,zksync_state=debug,zksync_utils=debug,zksync_web3_decl::client=error" + EN_CONSENSUS_CONFIG_PATH: "/configs/testnet_consensus_config.yaml" + EN_CONSENSUS_SECRETS_PATH: "/configs/testnet_consensus_secrets.yaml" + volumes: - testnet-postgres: {} - testnet-rocksdb: {} - testnet-prometheus-data: {} - testnet-grafana-data: {} + postgres: {} + rocksdb: {} + prometheus-data: {} + grafana-data: {} From f848d93fe4201905bbe9bd24ffbacf954f7ddd32 Mon Sep 17 00:00:00 2001 From: Ivan Schasny <31857042+ischasny@users.noreply.github.com> Date: Mon, 16 Sep 2024 18:58:05 +0100 Subject: [PATCH 084/116] fix: token multiplier setter is taken from chain config (#2893) Fix `zki chain update-toen-multiplier-setter` to take the address from the chain config instead of the ecosystem. --- docs/guides/external-node/09_decentralization.md | 5 +++-- .../src/commands/chain/set_token_multiplier_setter.rs | 4 ++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/docs/guides/external-node/09_decentralization.md b/docs/guides/external-node/09_decentralization.md index d97ec5b30cf9..caf93a85a923 100644 --- a/docs/guides/external-node/09_decentralization.md +++ b/docs/guides/external-node/09_decentralization.md @@ -9,5 +9,6 @@ On the gossipnet, the data integrity will be protected by the BFT (byzantine fau ### Add `--enable-consensus` flag to your entry point command -For the consensus configuration to take effect you have to add `--enable-consensus` flag when -running the node. You can do that by editing the docker compose files (mainnet-external-node-docker-compose.yml or testnet-external-node-docker-compose.yml) and uncommenting the line with `--enable-consensus`. +For the consensus configuration to take effect you have to add `--enable-consensus` flag when running the node. You can +do that by editing the docker compose files (mainnet-external-node-docker-compose.yml or +testnet-external-node-docker-compose.yml) and uncommenting the line with `--enable-consensus`. diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/set_token_multiplier_setter.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/set_token_multiplier_setter.rs index f92391c22f47..15f7de4c277c 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/set_token_multiplier_setter.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/set_token_multiplier_setter.rs @@ -43,8 +43,8 @@ pub async fn run(args: ForgeScriptArgs, shell: &Shell) -> anyhow::Result<()> { .l1_rpc_url .expose_str() .to_string(); - let token_multiplier_setter_address = ecosystem_config - .get_wallets() + let token_multiplier_setter_address = chain_config + .get_wallets_config() .context(MSG_WALLETS_CONFIG_MUST_BE_PRESENT)? .token_multiplier_setter .context(MSG_WALLET_TOKEN_MULTIPLIER_SETTER_NOT_FOUND)? From 2ac7cc5836e69fc82c98df2005fedee01c1084e1 Mon Sep 17 00:00:00 2001 From: Artem Fomiuk <88630083+Artemka374@users.noreply.github.com> Date: Tue, 17 Sep 2024 11:24:06 +0300 Subject: [PATCH 085/116] feat: emit errors in prover API metrics (#2890) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Report reasons of failed calls in prover API metrics. ## Why ❔ To have more observability ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- core/node/external_proof_integration_api/src/lib.rs | 11 ++--------- .../external_proof_integration_api/src/metrics.rs | 11 ++--------- .../external_proof_integration_api/src/middleware.rs | 8 +++++--- 3 files changed, 9 insertions(+), 21 deletions(-) diff --git a/core/node/external_proof_integration_api/src/lib.rs b/core/node/external_proof_integration_api/src/lib.rs index 4355896e2a2e..d152ea265cb8 100644 --- a/core/node/external_proof_integration_api/src/lib.rs +++ b/core/node/external_proof_integration_api/src/lib.rs @@ -19,10 +19,7 @@ use types::{ExternalProof, ProofGenerationDataResponse}; use zksync_basic_types::L1BatchNumber; pub use crate::processor::Processor; -use crate::{ - metrics::{CallOutcome, Method}, - middleware::MetricsMiddleware, -}; +use crate::{metrics::Method, middleware::MetricsMiddleware}; /// External API implementation. #[derive(Debug)] @@ -37,11 +34,7 @@ impl Api { axum::middleware::from_fn(move |req: Request, next: Next| async move { let middleware = MetricsMiddleware::new(method); let response = next.run(req).await; - let outcome = match response.status().is_success() { - true => CallOutcome::Success, - false => CallOutcome::Failure, - }; - middleware.observe(outcome); + middleware.observe(response.status()); response }) }; diff --git a/core/node/external_proof_integration_api/src/metrics.rs b/core/node/external_proof_integration_api/src/metrics.rs index f43b49b7b1c0..6b909a278d62 100644 --- a/core/node/external_proof_integration_api/src/metrics.rs +++ b/core/node/external_proof_integration_api/src/metrics.rs @@ -2,13 +2,6 @@ use std::time::Duration; use vise::{EncodeLabelSet, EncodeLabelValue, Histogram, LabeledFamily, Metrics}; -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelSet, EncodeLabelValue)] -#[metrics(label = "outcome", rename_all = "snake_case")] -pub(crate) enum CallOutcome { - Success, - Failure, -} - #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelSet, EncodeLabelValue)] #[metrics(label = "type", rename_all = "snake_case")] pub(crate) enum Method { @@ -20,8 +13,8 @@ pub(crate) enum Method { #[derive(Debug, Metrics)] #[metrics(prefix = "external_proof_integration_api")] pub(crate) struct ProofIntegrationApiMetrics { - #[metrics(labels = ["method", "outcome"], buckets = vise::Buckets::LATENCIES)] - pub call_latency: LabeledFamily<(Method, CallOutcome), Histogram, 2>, + #[metrics(labels = ["method", "status"], buckets = vise::Buckets::LATENCIES)] + pub call_latency: LabeledFamily<(Method, u16), Histogram, 2>, } #[vise::register] diff --git a/core/node/external_proof_integration_api/src/middleware.rs b/core/node/external_proof_integration_api/src/middleware.rs index 1dc6aefe9171..ebd4ef5bfb09 100644 --- a/core/node/external_proof_integration_api/src/middleware.rs +++ b/core/node/external_proof_integration_api/src/middleware.rs @@ -1,6 +1,7 @@ +use axum::http::StatusCode; use tokio::time::Instant; -use crate::metrics::{CallOutcome, Method, METRICS}; +use crate::metrics::{Method, METRICS}; #[derive(Debug)] pub(crate) struct MetricsMiddleware { @@ -16,7 +17,8 @@ impl MetricsMiddleware { } } - pub fn observe(&self, outcome: CallOutcome) { - METRICS.call_latency[&(self.method, outcome)].observe(self.started_at.elapsed()); + pub fn observe(&self, status_code: StatusCode) { + METRICS.call_latency[&(self.method, status_code.as_u16())] + .observe(self.started_at.elapsed()); } } From 1baa58507ba5d9c2cab6df3e2296220da2d8eb69 Mon Sep 17 00:00:00 2001 From: Danil Date: Tue, 17 Sep 2024 11:13:20 +0200 Subject: [PATCH 086/116] ci: Deprecate zk in ci tests (#2860) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Do not use old zk tool inside our ci. Only zk docker is still in place will be replaced soon by DevOps team ## Why ❔ We are deprecating old zk tool and now it's not needed anymore ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --------- Signed-off-by: Danil Co-authored-by: perekopskiy --- .../build-contract-verifier-template.yml | 9 +- .github/workflows/build-core-template.yml | 13 +- .github/workflows/build-local-node-docker.yml | 6 +- .github/workflows/ci-common-reusable.yml | 7 +- .github/workflows/ci-core-reusable.yml | 511 ++++++++++-------- .github/workflows/ci-prover-reusable.yml | 15 +- .github/workflows/ci-zk-toolbox-reusable.yml | 346 ------------ .github/workflows/ci.yml | 14 +- .github/workflows/vm-perf-comparison.yml | 14 +- .github/workflows/vm-perf-to-prometheus.yml | 6 +- .../system-constants-generator/src/utils.rs | 7 +- core/lib/contracts/src/lib.rs | 2 +- .../contracts/zkasm/deep_stak.zkasm | 35 +- core/tests/ts-integration/hardhat.config.ts | 9 +- core/tests/ts-integration/package.json | 6 +- .../ts-integration/scripts/compile-yul.ts | 14 +- .../tests/api/contract-verification.test.ts | 42 +- .../ts-integration/tests/api/debug.test.ts | 4 +- docker/external-node/Dockerfile | 2 + docker/server-v2/Dockerfile | 2 + .../building-from-scratch/Dockerfile | 10 +- prover/crates/bin/prover_cli/src/cli.rs | 2 + .../prover_cli/src/commands/status/batch.rs | 50 +- yarn.lock | 124 +++-- .../commands/contract_verifier/args/init.rs | 55 +- .../src/commands/contract_verifier/init.rs | 14 + .../crates/zk_inception/src/messages.rs | 3 + .../zk_supervisor/src/commands/clean/mod.rs | 18 + .../zk_supervisor/src/commands/test/db.rs | 34 ++ .../src/commands/test/integration.rs | 2 +- .../zk_supervisor/src/commands/test/mod.rs | 3 +- .../zk_supervisor/src/commands/test/prover.rs | 20 +- .../zk_supervisor/src/commands/test/rust.rs | 38 +- 33 files changed, 610 insertions(+), 827 deletions(-) delete mode 100644 .github/workflows/ci-zk-toolbox-reusable.yml create mode 100644 zk_toolbox/crates/zk_supervisor/src/commands/test/db.rs diff --git a/.github/workflows/build-contract-verifier-template.yml b/.github/workflows/build-contract-verifier-template.yml index db7c4ba387f4..bb385b2797b2 100644 --- a/.github/workflows/build-contract-verifier-template.yml +++ b/.github/workflows/build-contract-verifier-template.yml @@ -113,18 +113,15 @@ jobs: ci_run git config --global --add safe.directory /usr/src/zksync/sdk/binaryen ci_run git config --global --add safe.directory /usr/src/zksync/contracts/system-contracts ci_run git config --global --add safe.directory /usr/src/zksync/contracts - ci_run zk || true - ci_run yarn zk build + ci_run ./bin/zkt || true + ci_run ./bin/zk || true ci_run run_retried curl -LO https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2\^26.key - name: build contracts if: env.BUILD_CONTRACTS == 'true' run: | - ci_run zk run yarn ci_run cp etc/tokens/{test,localhost}.json - ci_run zk compiler all - ci_run zk contract build - ci_run zk f yarn run l2-contracts build + ci_run zk_supervisor contracts - name: Login to Docker registries if: ${{ inputs.action == 'push' }} diff --git a/.github/workflows/build-core-template.yml b/.github/workflows/build-core-template.yml index 7e5dcc10a939..dc46c4ba95e9 100644 --- a/.github/workflows/build-core-template.yml +++ b/.github/workflows/build-core-template.yml @@ -71,11 +71,15 @@ jobs: if [ $(jq length <<<"$tags") -eq 0 ]; then echo "No tag found on all pages." echo "BUILD_CONTRACTS=true" >> "$GITHUB_ENV" + # TODO Remove it when we migrate to foundry inside contracts repository + mkdir -p contracts/l1-contracts/artifacts/ exit 0 fi filtered_tag=$(jq -r --arg commit_sha "$commit_sha" 'map(select(.commit.sha == $commit_sha)) | .[].name' <<<"$tags") if [[ ! -z "$filtered_tag" ]]; then echo "BUILD_CONTRACTS=false" >> "$GITHUB_ENV" + # TODO Remove it when we migrate to foundry inside contracts repository + mkdir -p contracts/l1-contracts/out break fi ((page++)) @@ -122,18 +126,15 @@ jobs: ci_run git config --global --add safe.directory /usr/src/zksync/sdk/binaryen ci_run git config --global --add safe.directory /usr/src/zksync/contracts/system-contracts ci_run git config --global --add safe.directory /usr/src/zksync/contracts - ci_run zk || true - ci_run yarn zk build + ci_run ./bin/zk || true + ci_run ./bin/zkt || true ci_run run_retried curl -LO https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2\^26.key - name: build contracts if: env.BUILD_CONTRACTS == 'true' run: | - ci_run zk run yarn ci_run cp etc/tokens/{test,localhost}.json - ci_run zk compiler all - ci_run zk contract build - ci_run zk f yarn run l2-contracts build + ci_run zk_supervisor contracts - name: Login to Docker registries if: ${{ inputs.action == 'push' }} diff --git a/.github/workflows/build-local-node-docker.yml b/.github/workflows/build-local-node-docker.yml index 7f36f28f2864..f664bfaaa00a 100644 --- a/.github/workflows/build-local-node-docker.yml +++ b/.github/workflows/build-local-node-docker.yml @@ -16,7 +16,7 @@ on: jobs: build-images: name: Local Node - Build and Push Docker Image - runs-on: [matterlabs-ci-runner-high-performance] + runs-on: [ matterlabs-ci-runner-high-performance ] steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 with: @@ -61,9 +61,9 @@ jobs: ci_run git config --global --add safe.directory /usr/src/zksync/contracts ci_run zk + ci_run zkt ci_run cp etc/tokens/{test,localhost}.json - ci_run zk compiler all - ci_run zk contract build + ci_run zk_supervisor contracts - name: update-image run: | diff --git a/.github/workflows/ci-common-reusable.yml b/.github/workflows/ci-common-reusable.yml index d4667a273ef4..d2f9e11348f0 100644 --- a/.github/workflows/ci-common-reusable.yml +++ b/.github/workflows/ci-common-reusable.yml @@ -28,11 +28,10 @@ jobs: - name: Init run: | - ci_run zk - ci_run run_retried rustup show - ci_run zk db setup + ci_run zkt # This does both linting and "building". We're using `zk lint prover` as it's common practice within our repo # `zk lint prover` = cargo clippy, which does cargo check behind the scenes, which is a lightweight version of cargo build - name: Lints - run: ci_run zk lint prover + run: ci_run zk_supervisor lint -t rs --check + diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index a88a8fe3944e..898085a36784 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -145,304 +145,341 @@ jobs: ci_run sccache --show-stats || true ci_run cat /tmp/sccache_log.txt || true - integration: - name: Integration (consensus=${{ matrix.consensus }}, base_token=${{ matrix.base_token }}, deployment_mode=${{ matrix.deployment_mode }}) - strategy: - # In matrix jobs, fail-fast is true by default. - # To be consistent with the rest of the workflow we disable it explicitly. - fail-fast: false - matrix: - consensus: [ false, true ] - base_token: [ "Eth", "Custom" ] - deployment_mode: [ "Rollup", "Validium" ] - env: - SERVER_COMPONENTS: "api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads,vm_runner_bwip,vm_playground,da_dispatcher${{ matrix.consensus && ',consensus' || '' }}${{ matrix.base_token == 'Custom' && ',base_token_ratio_persister' || '' }}" - - runs-on: [ matterlabs-ci-runner-highmem-long ] + integration-tests: + runs-on: [ matterlabs-ci-runner-ultra-performance ] steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 with: submodules: "recursive" fetch-depth: 0 + - name: Setup environment run: | echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH echo IN_DOCKER=1 >> .env - echo RUN_CONTRACT_VERIFICATION_TEST=true >> .env - echo ZKSYNC_DEBUG_LOGS=true >> .env - - - name: Download zksolc/solc and zkvyper/vyper - run: | - sudo apt update && sudo apt install wget -y - - mkdir -p $(pwd)/etc/solc-bin/0.8.23 - wget https://github.com/ethereum/solc-bin/raw/gh-pages/linux-amd64/solc-linux-amd64-v0.8.23%2Bcommit.f704f362 - mv solc-linux-amd64-v0.8.23+commit.f704f362 $(pwd)/etc/solc-bin/0.8.23/solc - chmod +x $(pwd)/etc/solc-bin/0.8.23/solc - - mkdir -p $(pwd)/etc/solc-bin/zkVM-0.8.23-1.0.0 - wget https://github.com/matter-labs/era-solidity/releases/download/0.8.23-1.0.0/solc-linux-amd64-0.8.23-1.0.0 -O $(pwd)/etc/solc-bin/zkVM-0.8.23-1.0.0/solc - chmod +x $(pwd)/etc/solc-bin/zkVM-0.8.23-1.0.0/solc - - mkdir -p $(pwd)/etc/zksolc-bin/v1.3.21 - wget https://github.com/matter-labs/zksolc-bin/raw/main/linux-amd64/zksolc-linux-amd64-musl-v1.3.21 - mv zksolc-linux-amd64-musl-v1.3.21 $(pwd)/etc/zksolc-bin/v1.3.21/zksolc - chmod +x $(pwd)/etc/zksolc-bin/v1.3.21/zksolc - - mkdir -p $(pwd)/etc/vyper-bin/0.3.10 - wget -O vyper0.3.10 https://github.com/vyperlang/vyper/releases/download/v0.3.10/vyper.0.3.10%2Bcommit.91361694.linux - mv vyper0.3.10 $(pwd)/etc/vyper-bin/0.3.10/vyper - chmod +x $(pwd)/etc/vyper-bin/0.3.10/vyper - - mkdir -p $(pwd)/etc/zkvyper-bin/v1.3.13 - wget https://github.com/matter-labs/zkvyper-bin/raw/main/linux-amd64/zkvyper-linux-amd64-musl-v1.3.13 - mv zkvyper-linux-amd64-musl-v1.3.13 $(pwd)/etc/zkvyper-bin/v1.3.13/zkvyper - chmod +x $(pwd)/etc/zkvyper-bin/v1.3.13/zkvyper + echo RUN_CONTRACT_VERIFICATION_TEST=true >> $GITHUB_ENV - name: Start services run: | ci_localnet_up - ci_run pre_download_compilers.sh ci_run sccache --start-server - - name: Init + - name: Build zk_toolbox + run: ci_run bash -c "./bin/zkt" + + - name: Create log directories + run: | + SERVER_LOGS_DIR=logs/server + INTEGRATION_TESTS_LOGS_DIR=logs/integration_tests + INTEGRATION_TESTS_EN_LOGS_DIR=logs/integration_tests/en + SNAPSHOT_RECOVERY_LOGS_DIR=logs/snapshot_recovery/ + GENESIS_RECOVERY_LOGS_DIR=logs/genesis_recovery/ + EXTERNAL_NODE_LOGS_DIR=logs/external_node + REVERT_LOGS_DIR=logs/revert + + mkdir -p $SERVER_LOGS_DIR + mkdir -p $INTEGRATION_TESTS_LOGS_DIR + mkdir -p $INTEGRATION_TESTS_EN_LOGS_DIR + mkdir -p $SNAPSHOT_RECOVERY_LOGS_DIR + mkdir -p $GENESIS_RECOVERY_LOGS_DIR + mkdir -p $EXTERNAL_NODE_LOGS_DIR + mkdir -p $REVERT_LOGS_DIR + + echo "SERVER_LOGS_DIR=$SERVER_LOGS_DIR" >> $GITHUB_ENV + echo "INTEGRATION_TESTS_LOGS_DIR=$INTEGRATION_TESTS_LOGS_DIR" >> $GITHUB_ENV + echo "INTEGRATION_TESTS_EN_LOGS_DIR=$INTEGRATION_TESTS_EN_LOGS_DIR" >> $GITHUB_ENV + echo "SNAPSHOT_RECOVERY_LOGS_DIR=$SNAPSHOT_RECOVERY_LOGS_DIR" >> $GITHUB_ENV + echo "GENESIS_RECOVERY_LOGS_DIR=$GENESIS_RECOVERY_LOGS_DIR" >> $GITHUB_ENV + echo "EXTERNAL_NODE_LOGS_DIR=$EXTERNAL_NODE_LOGS_DIR" >> $GITHUB_ENV + echo "REVERT_LOGS_DIR=$REVERT_LOGS_DIR" >> $GITHUB_ENV + + - name: Initialize ecosystem run: | ci_run git config --global --add safe.directory /usr/src/zksync - ci_run git config --global --add safe.directory /usr/src/zksync/sdk/binaryen ci_run git config --global --add safe.directory /usr/src/zksync/contracts/system-contracts ci_run git config --global --add safe.directory /usr/src/zksync/contracts - ci_run zk - ci_run run_retried rustup show - if [[ "${{ matrix.deployment_mode }}" == "Validium" ]]; then - ci_run zk env dev_validium_docker - ci_run zk config compile dev_validium_docker - else - ci_run zk config compile - fi - ci_run zk init ${{ matrix.base_token == 'Custom' && '--base-token-name BAT' || ''}} ${{ matrix.deployment_mode == 'Validium' && '--validium-mode' || ''}} - # `sleep 5` because we need to wait until server started properly - - name: Run server + ci_run zk_inception ecosystem init --deploy-paymaster --deploy-erc20 \ + --deploy-ecosystem --l1-rpc-url=http://localhost:8545 \ + --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ + --server-db-name=zksync_server_localhost_era \ + --prover-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ + --prover-db-name=zksync_prover_localhost_era \ + --ignore-prerequisites --verbose \ + --observability=false + + - name: Read Custom Token address and set as environment variable run: | - ci_run zk server --components=$SERVER_COMPONENTS &>server.log & - ci_run sleep 5 + address=$(awk -F": " '/tokens:/ {found_tokens=1} found_tokens && /DAI:/ {found_dai=1} found_dai && /address:/ {print $2; exit}' ./configs/erc20.yaml) + echo "address=$address" + echo "address=$address" >> $GITHUB_ENV - - name: Run contract verifier + - name: Create and initialize Validium chain run: | - ci_run zk contract_verifier &>contract_verifier.log & - ci_run sleep 2 - - - name: Server integration tests - run: ci_run zk test i server - - - name: Snapshot recovery test - # We use `yarn` directly because the test launches `zk` commands in both server and EN envs. - # An empty topmost environment helps avoid a mess when redefining env vars shared between both envs - # (e.g., DATABASE_URL). - # - # Since `base_token` doesn't meaningfully influence the test, we use it as a flag for - # enabling / disabling tree during pruning. + ci_run zk_inception chain create \ + --chain-name validium \ + --chain-id sequential \ + --prover-mode no-proofs \ + --wallet-creation localhost \ + --l1-batch-commit-data-generator-mode validium \ + --base-token-address 0x0000000000000000000000000000000000000001 \ + --base-token-price-nominator 1 \ + --base-token-price-denominator 1 \ + --set-as-default false \ + --ignore-prerequisites + + ci_run zk_inception chain init \ + --deploy-paymaster \ + --l1-rpc-url=http://localhost:8545 \ + --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ + --server-db-name=zksync_server_localhost_validium \ + --prover-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ + --prover-db-name=zksync_prover_localhost_validium \ + --port-offset 2000 \ + --chain validium + + - name: Create and initialize chain with Custom Token run: | - if [[ "${{ matrix.deployment_mode }}" == "Validium" ]]; then - ci_run zk config compile ext-node-validium - ci_run zk config compile ext-node-validium-docker - fi - ENABLE_CONSENSUS=${{ matrix.consensus }} \ - DEPLOYMENT_MODE=${{ matrix.deployment_mode }} \ - SNAPSHOTS_CREATOR_VERSION=${{ matrix.deployment_mode == 'Validium' && '0' || '1' }} \ - DISABLE_TREE_DURING_PRUNING=${{ matrix.base_token == 'Eth' }} \ - ETH_CLIENT_WEB3_URL="http://localhost:8545" \ - PASSED_ENV_VARS="ENABLE_CONSENSUS,DEPLOYMENT_MODE,DISABLE_TREE_DURING_PRUNING,SNAPSHOTS_CREATOR_VERSION,ETH_CLIENT_WEB3_URL" \ - ci_run yarn recovery-test snapshot-recovery-test - - - name: Genesis recovery test + ci_run zk_inception chain create \ + --chain-name custom_token \ + --chain-id sequential \ + --prover-mode no-proofs \ + --wallet-creation localhost \ + --l1-batch-commit-data-generator-mode rollup \ + --base-token-address ${{ env.address }} \ + --base-token-price-nominator 3 \ + --base-token-price-denominator 2 \ + --set-as-default false \ + --ignore-prerequisites + + ci_run zk_inception chain init \ + --deploy-paymaster \ + --l1-rpc-url=http://localhost:8545 \ + --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ + --server-db-name=zksync_server_localhost_custom_token \ + --prover-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ + --prover-db-name=zksync_prover_localhost_custom_token \ + --port-offset 3000 \ + --chain custom_token + + - name: Create and register chain with transactions signed "offline" run: | - ENABLE_CONSENSUS=${{ matrix.consensus }} \ - DEPLOYMENT_MODE=${{ matrix.deployment_mode }} \ - ETH_CLIENT_WEB3_URL="http://localhost:8545" \ - PASSED_ENV_VARS="ENABLE_CONSENSUS,DEPLOYMENT_MODE,ETH_CLIENT_WEB3_URL" \ - ci_run yarn recovery-test genesis-recovery-test + ci_run zk_inception chain create \ + --chain-name offline_chain \ + --chain-id sequential \ + --prover-mode no-proofs \ + --wallet-creation localhost \ + --l1-batch-commit-data-generator-mode rollup \ + --base-token-address 0x0000000000000000000000000000000000000001 \ + --base-token-price-nominator 1 \ + --base-token-price-denominator 1 \ + --set-as-default false \ + --ignore-prerequisites - - name: Fee projection tests - run: ci_run zk test i fees + ci_run zk_inception chain build-transactions --chain offline_chain --l1-rpc-url http://127.0.0.1:8545 - - name: Run revert test - run: | - ci_run pkill zksync_server || true - ci_run sleep 2 - ENABLE_CONSENSUS=${{ matrix.consensus }} DEPLOYMENT_MODE=${{ matrix.deployment_mode }} PASSED_ENV_VARS="ENABLE_CONSENSUS,DEPLOYMENT_MODE" ci_run zk test i revert + governor_pk=$(awk '/governor:/ {flag=1} flag && /private_key:/ {print $2; exit}' ./configs/wallets.yaml) + ci_run zk_supervisor send-transactions \ + --file ./transactions/chain/offline_chain/register-hyperchain-txns.json \ + --l1-rpc-url http://127.0.0.1:8545 \ + --private-key $governor_pk - # This test should be the last one as soon as it - # finished bootloader will be different - - name: Run upgrade test - run: | - ci_run pkill zksync_server || true - ci_run sleep 10 - ci_run zk test i upgrade + bridge_hub=$(awk '/bridgehub_proxy_addr/ {print $2}' ./configs/contracts.yaml) + chain_id=$(awk '/chain_id:/ {print $2}' ./chains/offline_chain/ZkStack.yaml) - - name: Show server.log logs - if: always() - run: ci_run cat server.log || true + hyperchain_output=$(ci_run cast call $bridge_hub "getHyperchain(uint256)" $chain_id) - - name: Show contract_verifier.log logs - if: always() - run: ci_run cat contract_verifier.log || true + if [[ $hyperchain_output == 0x* && ${#hyperchain_output} -eq 66 ]]; then + echo "Chain successfully registered: $hyperchain_output" + else + echo "Failed to register chain: $hyperchain_output" + exit 1 + fi - - name: Show snapshot-creator.log logs - if: always() - run: ci_run cat core/tests/recovery-test/snapshot-creator.log || true - - name: Show snapshot-recovery.log logs - if: always() - run: ci_run cat core/tests/recovery-test/snapshot-recovery.log || true - - name: Show genesis-recovery.log logs - if: always() - run: ci_run cat core/tests/recovery-test/genesis-recovery.log || true + - name: Create and initialize Consensus chain + run: | + ci_run zk_inception chain create \ + --chain-name consensus \ + --chain-id sequential \ + --prover-mode no-proofs \ + --wallet-creation localhost \ + --l1-batch-commit-data-generator-mode validium \ + --base-token-address ${{ env.address }} \ + --base-token-price-nominator 3 \ + --base-token-price-denominator 2 \ + --set-as-default false \ + --ignore-prerequisites + + ci_run zk_inception chain init \ + --deploy-paymaster \ + --l1-rpc-url=http://localhost:8545 \ + --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ + --server-db-name=zksync_server_localhost_consensus \ + --prover-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ + --prover-db-name=zksync_prover_localhost_consensus \ + --port-offset 4000 \ + --chain consensus + + - name: Build test dependencies + run: | + ci_run zk_supervisor test build - - name: Show revert.log logs - if: always() - run: ci_run cat logs/revert/default/server.log || true + - name: Initialize Contract verifier + run: | + ci_run zk_inception contract-verifier init --zksolc-version=v1.5.3 --zkvyper-version=v1.5.4 --solc-version=0.8.26 --vyper-version=v0.3.10 --era-vm-solc-version=0.8.18-1.0.1 --only --chain era + ci_run zk_inception contract-verifier run --chain era &> ${{ env.SERVER_LOGS_DIR }}/contract-verifier-rollup.log & - - name: Show upgrade.log logs - if: always() - run: ci_run cat core/tests/upgrade-test/upgrade.log || true + - name: Run servers + run: | + ci_run zk_inception server --ignore-prerequisites --chain era &> ${{ env.SERVER_LOGS_DIR }}/rollup.log & + ci_run zk_inception server --ignore-prerequisites --chain validium &> ${{ env.SERVER_LOGS_DIR }}/validium.log & + ci_run zk_inception server --ignore-prerequisites --chain custom_token &> ${{ env.SERVER_LOGS_DIR }}/custom_token.log & + ci_run zk_inception server --ignore-prerequisites --chain consensus \ + --components=api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads,vm_runner_bwip,vm_playground,da_dispatcher,consensus \ + &> ${{ env.SERVER_LOGS_DIR }}/consensus.log & - - name: Show fee-projection.log logs - if: always() - run: ci_run cat core/tests/ts-integration/fees.log || true + ci_run sleep 5 - - name: Show sccache logs - if: always() + - name: Run integration tests run: | - ci_run sccache --show-stats || true - ci_run cat /tmp/sccache_log.txt || true + PASSED_ENV_VARS="RUN_CONTRACT_VERIFICATION_TEST" \ + ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --chain era &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/rollup.log & + PID1=$! - external-node: - name: External node (consensus=${{ matrix.consensus }}, base_token=${{ matrix.base_token }}, deployment_mode=${{ matrix.deployment_mode }}) - strategy: - fail-fast: false - matrix: - consensus: [ false, true ] - base_token: [ "Eth", "Custom" ] - deployment_mode: [ "Rollup", "Validium" ] - runs-on: [ matterlabs-ci-runner-highmem-long ] + ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --chain validium &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/validium.log & + PID2=$! - env: - SERVER_COMPONENTS: "api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads,vm_runner_bwip,da_dispatcher${{ matrix.consensus && ',consensus' || '' }}${{ matrix.base_token == 'Custom' && ',base_token_ratio_persister' || '' }}" - EXT_NODE_FLAGS: "${{ matrix.consensus && '-- --enable-consensus' || '' }}" + ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --chain custom_token &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/custom_token.log & + PID3=$! + + ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --chain consensus &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/consensus.log & + PID4=$! - steps: - - name: Checkout code # Checks out the repository under $GITHUB_WORKSPACE, so the job can access it. - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 - with: - submodules: "recursive" - fetch-depth: 0 + wait $PID1 + wait $PID2 + wait $PID3 + wait $PID4 - - name: Setup environment + - name: Init external nodes run: | - echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV - echo $(pwd)/bin >> $GITHUB_PATH - echo IN_DOCKER=1 >> .env - echo RUN_CONTRACT_VERIFICATION_TEST=true >> .env - echo ZKSYNC_DEBUG_LOGS=true >> .env + ci_run zk_inception external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \ + --db-name=zksync_en_localhost_era_rollup --l1-rpc-url=http://localhost:8545 --chain era + ci_run zk_inception external-node init --ignore-prerequisites --chain era + + ci_run zk_inception external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \ + --db-name=zksync_en_localhost_era_validium1 --l1-rpc-url=http://localhost:8545 --chain validium + ci_run zk_inception external-node init --ignore-prerequisites --chain validium - - name: Start services - run: | - ci_localnet_up - ci_run pre_download_compilers.sh - ci_run sccache --start-server + ci_run zk_inception external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \ + --db-name=zksync_en_localhost_era_custom_token --l1-rpc-url=http://localhost:8545 --chain custom_token + ci_run zk_inception external-node init --ignore-prerequisites --chain custom_token + + ci_run zk_inception external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \ + --db-name=zksync_en_localhost_era_consensus --l1-rpc-url=http://localhost:8545 --chain consensus + ci_run zk_inception external-node init --ignore-prerequisites --chain consensus - - name: Init + - name: Run recovery tests (from snapshot) run: | - ci_run git config --global --add safe.directory /usr/src/zksync - ci_run git config --global --add safe.directory /usr/src/zksync/sdk/binaryen - ci_run git config --global --add safe.directory /usr/src/zksync/contracts/system-contracts - ci_run git config --global --add safe.directory /usr/src/zksync/contracts - ci_run zk - ci_run run_retried rustup show - if [[ "${{ matrix.deployment_mode }}" == "Rollup" ]]; then - ci_run zk config compile - elif [[ "${{ matrix.deployment_mode }}" == "Validium" ]]; then - ci_run zk env dev_validium_docker - ci_run zk config compile dev_validium_docker - fi - ci_run zk init ${{ matrix.base_token == 'Custom' && '--base-token-name BAT' || ''}} ${{ matrix.deployment_mode == 'Validium' && '--validium-mode' || ''}} - # `sleep 30` because we need to wait until server started properly - - name: Run server - run: | - ci_run zk server --components=$SERVER_COMPONENTS &>>server.log & - ci_run sleep 30 + ci_run zk_supervisor test recovery --snapshot --no-deps --ignore-prerequisites --verbose --chain era &> ${{ env.SNAPSHOT_RECOVERY_LOGS_DIR }}/rollup.log & + PID1=$! + + ci_run zk_supervisor test recovery --snapshot --no-deps --ignore-prerequisites --verbose --chain validium &> ${{ env.SNAPSHOT_RECOVERY_LOGS_DIR }}/validium.log & + PID2=$! + + ci_run zk_supervisor test recovery --snapshot --no-deps --ignore-prerequisites --verbose --chain custom_token &> ${{ env.SNAPSHOT_RECOVERY_LOGS_DIR }}/custom_token.log & + PID3=$! + + ci_run zk_supervisor test recovery --snapshot --no-deps --ignore-prerequisites --verbose --chain consensus &> ${{ env.SNAPSHOT_RECOVERY_LOGS_DIR }}/consensus.log & + PID4=$! + + wait $PID1 + wait $PID2 + wait $PID3 + wait $PID4 - - name: Run external node + - name: Run recovery tests (from genesis) run: | - if [[ "${{ matrix.deployment_mode }}" == "Rollup" ]]; then - ci_run zk env ext-node-docker - elif [[ "${{ matrix.deployment_mode }}" == "Validium" ]]; then - ci_run zk env ext-node-validium-docker - fi - ci_run zk db setup - ci_run zk external-node $EXT_NODE_FLAGS &>>ext-node.log & - ci_run sleep 30 + ci_run zk_supervisor test recovery --no-deps --no-kill --ignore-prerequisites --verbose --chain era &> ${{ env.GENESIS_RECOVERY_LOGS_DIR }}/rollup.log & + PID1=$! - - name: Integration tests - run: ci_run zk test i server --testPathIgnorePatterns 'contract-verification|snapshots-creator' + ci_run zk_supervisor test recovery --no-deps --no-kill --ignore-prerequisites --verbose --chain validium &> ${{ env.GENESIS_RECOVERY_LOGS_DIR }}/validium.log & + PID2=$! - - name: Run revert test - run: | - ENABLE_CONSENSUS=${{ matrix.consensus }} \ - DEPLOYMENT_MODE=${{ matrix.deployment_mode }} \ - PASSED_ENV_VARS="ENABLE_CONSENSUS,DEPLOYMENT_MODE" \ - ci_run zk test i revert-en + ci_run zk_supervisor test recovery --no-deps --no-kill --ignore-prerequisites --verbose --chain custom_token &> ${{ env.GENESIS_RECOVERY_LOGS_DIR }}/custom_token.log & + PID3=$! - # test terminates the nodes, so we restart them. - if [[ "${{ matrix.deployment_mode }}" == "Rollup" ]]; then - ZKSYNC_ENV=docker ci_run zk server --components=$SERVER_COMPONENTS &>>server.log & - ZKSYNC_ENV=ext-node-docker ci_run zk external-node $EXT_NODE_FLAGS &>>ext-node.log & - elif [[ "${{ matrix.deployment_mode }}" == "Validium" ]]; then - ZKSYNC_ENV=dev_validium_docker ci_run zk server --components=$SERVER_COMPONENTS &>>server.log & - ZKSYNC_ENV=ext-node-validium-docker ci_run zk external-node $EXT_NODE_FLAGS &>>ext-node.log & - fi - ci_run sleep 30 + ci_run zk_supervisor test recovery --no-deps --no-kill --ignore-prerequisites --verbose --chain consensus &> ${{ env.GENESIS_RECOVERY_LOGS_DIR }}/consensus.log & + PID4=$! - - name: Run upgrade test + wait $PID1 + wait $PID2 + wait $PID3 + wait $PID4 + + - name: Run external node server run: | - if [[ "${{ matrix.deployment_mode }}" == "Rollup" ]]; then - ci_run zk env docker - elif [[ "${{ matrix.deployment_mode }}" == "Validium" ]]; then - ci_run zk env dev_validium_docker - fi - CHECK_EN_URL="http://0.0.0.0:3060" ci_run zk test i upgrade + ci_run zk_inception external-node run --ignore-prerequisites --chain era &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/rollup.log & + ci_run zk_inception external-node run --ignore-prerequisites --chain validium &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/validium.log & + ci_run zk_inception external-node run --ignore-prerequisites --chain custom_token &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/custom_token.log & + ci_run zk_inception external-node run --ignore-prerequisites --chain consensus --enable-consensus &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/consensus.log & - - name: Show server.log logs - if: always() - run: ci_run cat server.log || true + - name: Run integration tests en + run: | + ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --external-node --chain era &> ${{ env.INTEGRATION_TESTS_EN_LOGS_DIR }}/rollup.log & + PID1=$! - - name: Show ext-node.log logs - if: always() - run: ci_run cat ext-node.log || true + ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --external-node --chain validium &> ${{ env.INTEGRATION_TESTS_EN_LOGS_DIR }}/validium.log & + PID2=$! - - name: Show contract_verifier.log logs - if: always() - run: ci_run cat ext-node.log || true + ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --external-node --chain custom_token &> ${{ env.INTEGRATION_TESTS_EN_LOGS_DIR }}/custom_token.log & + PID3=$! - - name: Show revert logs (main node) - if: always() - run: ci_run cat logs/revert/en/default/server.log || true + ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --external-node --chain consensus &> ${{ env.INTEGRATION_TESTS_EN_LOGS_DIR }}/consensus.log & + PID4=$! - - name: Show revert logs (EN) - if: always() - run: ci_run cat logs/revert/en/default/external_node.log || true + wait $PID1 + wait $PID2 + wait $PID3 + wait $PID4 - - name: Show upgrade.log logs - if: always() - run: ci_run cat core/tests/upgrade-test/upgrade.log || true + - name: Run revert tests + run: | + ci_run killall -INT zksync_server || true + ci_run killall -INT zksync_external_node || true + + ci_run zk_supervisor test revert --no-deps --external-node --no-kill --ignore-prerequisites --chain era &> ${{ env.REVERT_LOGS_DIR }}/rollup.log & + PID1=$! - - name: Show sccache logs - if: always() + ci_run zk_supervisor test revert --no-deps --external-node --no-kill --ignore-prerequisites --chain validium &> ${{ env.REVERT_LOGS_DIR }}/validium.log & + PID2=$! + + ci_run zk_supervisor test revert --no-deps --external-node --no-kill --ignore-prerequisites --chain custom_token &> ${{ env.REVERT_LOGS_DIR }}/custom_token.log & + PID3=$! + + ci_run zk_supervisor test revert --no-deps --external-node --no-kill --ignore-prerequisites --chain consensus &> ${{ env.REVERT_LOGS_DIR }}/consensus.log & + PID4=$! + + wait $PID1 + wait $PID2 + wait $PID3 + wait $PID4 + + + # Upgrade tests should run last, because as soon as they + # finish the bootloader will be different + # TODO make upgrade tests safe to run multiple times + - name: Run upgrade test run: | - ci_run sccache --show-stats || true - ci_run cat /tmp/sccache_log.txt || true + ci_run zk_supervisor test upgrade --no-deps --chain era + + - name: Upload logs + uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0 + if: always() + with: + name: logs + path: logs diff --git a/.github/workflows/ci-prover-reusable.yml b/.github/workflows/ci-prover-reusable.yml index d1d4a9ab96b2..ac971dafac99 100644 --- a/.github/workflows/ci-prover-reusable.yml +++ b/.github/workflows/ci-prover-reusable.yml @@ -3,7 +3,7 @@ on: workflow_call: jobs: lint: - runs-on: [matterlabs-ci-runner-highmem-long] + runs-on: [ matterlabs-ci-runner-highmem-long ] env: RUNNER_COMPOSE_FILE: "docker-compose-runner-nightly.yml" @@ -17,6 +17,8 @@ jobs: echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH echo IN_DOCKER=1 >> .env + echo "prover_url=postgres://postgres:notsecurepassword@localhost:5432/zksync_local_prover" >> $GITHUB_ENV + echo "core_url=postgres://postgres:notsecurepassword@localhost:5432/zksync_local" >> $GITHUB_ENV - name: Start services run: | @@ -27,14 +29,14 @@ jobs: - name: Init run: | - ci_run zk - ci_run zk db setup + ci_run zkt + ci_run zk_supervisor db setup --prover-url=${{ env.prover_url }} --core-url=${{ env.core_url }} - name: Formatting run: ci_run bash -c "cd prover && cargo fmt --check" unit-tests: - runs-on: [matterlabs-ci-runner-highmem-long] + runs-on: [ matterlabs-ci-runner-highmem-long ] env: RUNNER_COMPOSE_FILE: "docker-compose-runner-nightly.yml" @@ -58,11 +60,10 @@ jobs: - name: Init run: | - ci_run zk + ci_run zkt ci_run run_retried rustup show - ci_run zk db setup - name: Prover unit tests run: | # Not all tests are enabled, since prover and setup_key_generator_and_server requires bellman-cuda to be present - ci_run zk test prover + ci_run zk_supervisor test prover diff --git a/.github/workflows/ci-zk-toolbox-reusable.yml b/.github/workflows/ci-zk-toolbox-reusable.yml deleted file mode 100644 index 98a44c443581..000000000000 --- a/.github/workflows/ci-zk-toolbox-reusable.yml +++ /dev/null @@ -1,346 +0,0 @@ -name: Workflow template for CI jobs for Core Components -on: - workflow_call: - -env: - CLICOLOR: 1 - # We run multiple binaries in parallel, and by default they will try to utilize all the - # available CPUs. In tests, there is not much CPU-intensive work (rayon), but a lot of - # async work (tokio), so we prioritize tokio. - TOKIO_WORKER_THREADS: 4 - RAYON_NUM_THREADS: 2 - -jobs: - lint: - name: lint - uses: ./.github/workflows/ci-core-lint-reusable.yml - - tests: - runs-on: [ matterlabs-ci-runner-ultra-performance ] - steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 - with: - submodules: "recursive" - fetch-depth: 0 - - - name: Setup environment - run: | - echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV - echo $(pwd)/bin >> $GITHUB_PATH - echo IN_DOCKER=1 >> .env - - - name: Start services - run: | - ci_localnet_up - ci_run sccache --start-server - - - name: Build zk_toolbox - run: ci_run bash -c "./bin/zkt" - - - name: Create log directories - run: | - SERVER_LOGS_DIR=logs/server - INTEGRATION_TESTS_LOGS_DIR=logs/integration_tests - INTEGRATION_TESTS_EN_LOGS_DIR=logs/integration_tests/en - SNAPSHOT_RECOVERY_LOGS_DIR=logs/integration_tests/en - GENESIS_RECOVERY_LOGS_DIR=logs/integration_tests/en - EXTERNAL_NODE_LOGS_DIR=logs/external_node - REVERT_LOGS_DIR=logs/revert - - mkdir -p $SERVER_LOGS_DIR - mkdir -p $INTEGRATION_TESTS_LOGS_DIR - mkdir -p $INTEGRATION_TESTS_EN_LOGS_DIR - mkdir -p $SNAPSHOT_RECOVERY_LOGS_DIR - mkdir -p $GENESIS_RECOVERY_LOGS_DIR - mkdir -p $EXTERNAL_NODE_LOGS_DIR - mkdir -p $REVERT_LOGS_DIR - - echo "SERVER_LOGS_DIR=$SERVER_LOGS_DIR" >> $GITHUB_ENV - echo "INTEGRATION_TESTS_LOGS_DIR=$INTEGRATION_TESTS_LOGS_DIR" >> $GITHUB_ENV - echo "INTEGRATION_TESTS_EN_LOGS_DIR=$INTEGRATION_TESTS_EN_LOGS_DIR" >> $GITHUB_ENV - echo "SNAPSHOT_RECOVERY_LOGS_DIR=$SNAPSHOT_RECOVERY_LOGS_DIR" >> $GITHUB_ENV - echo "GENESIS_RECOVERY_LOGS_DIR=$GENESIS_RECOVERY_LOGS_DIR" >> $GITHUB_ENV - echo "EXTERNAL_NODE_LOGS_DIR=$EXTERNAL_NODE_LOGS_DIR" >> $GITHUB_ENV - echo "REVERT_LOGS_DIR=$REVERT_LOGS_DIR" >> $GITHUB_ENV - - - name: Initialize ecosystem - run: | - ci_run git config --global --add safe.directory /usr/src/zksync - ci_run git config --global --add safe.directory /usr/src/zksync/contracts/system-contracts - ci_run git config --global --add safe.directory /usr/src/zksync/contracts - - ci_run zk_inception ecosystem init --deploy-paymaster --deploy-erc20 \ - --deploy-ecosystem --l1-rpc-url=http://localhost:8545 \ - --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - --server-db-name=zksync_server_localhost_era \ - --prover-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - --prover-db-name=zksync_prover_localhost_era \ - --ignore-prerequisites --verbose \ - --observability=false - - - name: Create and register chain with transactions signed "offline" - run: | - ci_run zk_inception chain create \ - --chain-name offline_chain \ - --chain-id sequential \ - --prover-mode no-proofs \ - --wallet-creation localhost \ - --l1-batch-commit-data-generator-mode rollup \ - --base-token-address 0x0000000000000000000000000000000000000001 \ - --base-token-price-nominator 1 \ - --base-token-price-denominator 1 \ - --set-as-default false \ - --ignore-prerequisites - - ci_run zk_inception chain build-transactions --chain offline_chain --l1-rpc-url http://127.0.0.1:8545 - - governor_pk=$(awk '/governor:/ {flag=1} flag && /private_key:/ {print $2; exit}' ./configs/wallets.yaml) - - ci_run zk_supervisor send-transactions \ - --file ./transactions/chain/offline_chain/register-hyperchain-txns.json \ - --l1-rpc-url http://127.0.0.1:8545 \ - --private-key $governor_pk - - bridge_hub=$(awk '/bridgehub_proxy_addr/ {print $2}' ./configs/contracts.yaml) - chain_id=$(awk '/chain_id:/ {print $2}' ./chains/offline_chain/ZkStack.yaml) - - hyperchain_output=$(ci_run cast call $bridge_hub "getHyperchain(uint256)" $chain_id) - - if [[ $hyperchain_output == 0x* && ${#hyperchain_output} -eq 66 ]]; then - echo "Chain successfully registered: $hyperchain_output" - else - echo "Failed to register chain: $hyperchain_output" - exit 1 - fi - - - name: Read Custom Token address and set as environment variable - run: | - address=$(awk -F": " '/tokens:/ {found_tokens=1} found_tokens && /DAI:/ {found_dai=1} found_dai && /address:/ {print $2; exit}' ./configs/erc20.yaml) - echo "address=$address" - echo "address=$address" >> $GITHUB_ENV - - - name: Create and initialize Validium chain - run: | - ci_run zk_inception chain create \ - --chain-name validium \ - --chain-id sequential \ - --prover-mode no-proofs \ - --wallet-creation localhost \ - --l1-batch-commit-data-generator-mode validium \ - --base-token-address 0x0000000000000000000000000000000000000001 \ - --base-token-price-nominator 1 \ - --base-token-price-denominator 1 \ - --set-as-default false \ - --ignore-prerequisites - - ci_run zk_inception chain init \ - --deploy-paymaster \ - --l1-rpc-url=http://localhost:8545 \ - --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - --server-db-name=zksync_server_localhost_validium \ - --prover-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - --prover-db-name=zksync_prover_localhost_validium \ - --port-offset 2000 \ - --chain validium - - - name: Create and initialize chain with Custom Token - run: | - ci_run zk_inception chain create \ - --chain-name custom_token \ - --chain-id sequential \ - --prover-mode no-proofs \ - --wallet-creation localhost \ - --l1-batch-commit-data-generator-mode rollup \ - --base-token-address ${{ env.address }} \ - --base-token-price-nominator 3 \ - --base-token-price-denominator 2 \ - --set-as-default false \ - --ignore-prerequisites - - ci_run zk_inception chain init \ - --deploy-paymaster \ - --l1-rpc-url=http://localhost:8545 \ - --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - --server-db-name=zksync_server_localhost_custom_token \ - --prover-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - --prover-db-name=zksync_prover_localhost_custom_token \ - --port-offset 3000 \ - --chain custom_token - - - name: Create and initialize Consensus chain - run: | - ci_run zk_inception chain create \ - --chain-name consensus \ - --chain-id sequential \ - --prover-mode no-proofs \ - --wallet-creation localhost \ - --l1-batch-commit-data-generator-mode validium \ - --base-token-address ${{ env.address }} \ - --base-token-price-nominator 3 \ - --base-token-price-denominator 2 \ - --set-as-default false \ - --ignore-prerequisites - - ci_run zk_inception chain init \ - --deploy-paymaster \ - --l1-rpc-url=http://localhost:8545 \ - --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - --server-db-name=zksync_server_localhost_consensus \ - --prover-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - --prover-db-name=zksync_prover_localhost_consensus \ - --port-offset 4000 \ - --chain consensus - - - name: Build test dependencies - run: | - ci_run zk_supervisor test build - - - name: Run servers - run: | - ci_run zk_inception server --ignore-prerequisites --chain era &> ${{ env.SERVER_LOGS_DIR }}/rollup.log & - ci_run zk_inception server --ignore-prerequisites --chain validium &> ${{ env.SERVER_LOGS_DIR }}/validium.log & - ci_run zk_inception server --ignore-prerequisites --chain custom_token &> ${{ env.SERVER_LOGS_DIR }}/custom_token.log & - ci_run zk_inception server --ignore-prerequisites --chain consensus \ - --components=api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads,vm_runner_bwip,vm_playground,da_dispatcher,consensus \ - &> ${{ env.SERVER_LOGS_DIR }}/consensus.log & - - ci_run sleep 5 - - - name: Run integration tests - run: | - ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --chain era &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/rollup.log & - PID1=$! - - ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --chain validium &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/validium.log & - PID2=$! - - ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --chain custom_token &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/custom_token.log & - PID3=$! - - ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --chain consensus &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/consensus.log & - PID4=$! - - wait $PID1 - wait $PID2 - wait $PID3 - wait $PID4 - - - name: Init external nodes - run: | - ci_run zk_inception external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - --db-name=zksync_en_localhost_era_rollup --l1-rpc-url=http://localhost:8545 --chain era - ci_run zk_inception external-node init --ignore-prerequisites --chain era - - ci_run zk_inception external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - --db-name=zksync_en_localhost_era_validium1 --l1-rpc-url=http://localhost:8545 --chain validium - ci_run zk_inception external-node init --ignore-prerequisites --chain validium - - ci_run zk_inception external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - --db-name=zksync_en_localhost_era_custom_token --l1-rpc-url=http://localhost:8545 --chain custom_token - ci_run zk_inception external-node init --ignore-prerequisites --chain custom_token - - ci_run zk_inception external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - --db-name=zksync_en_localhost_era_consensus --l1-rpc-url=http://localhost:8545 --chain consensus - ci_run zk_inception external-node init --ignore-prerequisites --chain consensus - - - name: Run recovery tests (from snapshot) - run: | - - ci_run zk_supervisor test recovery --snapshot --no-deps --ignore-prerequisites --verbose --chain era &> ${{ env.SNAPSHOT_RECOVERY_LOGS_DIR }}/rollup.log & - PID1=$! - - ci_run zk_supervisor test recovery --snapshot --no-deps --ignore-prerequisites --verbose --chain validium &> ${{ env.SNAPSHOT_RECOVERY_LOGS_DIR }}//validium.log & - PID2=$! - - ci_run zk_supervisor test recovery --snapshot --no-deps --ignore-prerequisites --verbose --chain custom_token &> ${{ env.SNAPSHOT_RECOVERY_LOGS_DIR }}//custom_token.log & - PID3=$! - - ci_run zk_supervisor test recovery --snapshot --no-deps --ignore-prerequisites --verbose --chain consensus &> ${{ env.SNAPSHOT_RECOVERY_LOGS_DIR }}//consensus.log & - PID4=$! - - wait $PID1 - wait $PID2 - wait $PID3 - wait $PID4 - - - name: Run recovery tests (from genesis) - run: | - ci_run zk_supervisor test recovery --no-deps --no-kill --ignore-prerequisites --verbose --chain era &> ${{ env.GENESIS_RECOVERY_LOGS_DIR }}/rollup.log & - PID1=$! - - ci_run zk_supervisor test recovery --no-deps --no-kill --ignore-prerequisites --verbose --chain validium &> ${{ env.GENESIS_RECOVERY_LOGS_DIR }}/validium.log & - PID2=$! - - ci_run zk_supervisor test recovery --no-deps --no-kill --ignore-prerequisites --verbose --chain custom_token &> ${{ env.GENESIS_RECOVERY_LOGS_DIR }}/custom_token.log & - PID3=$! - - ci_run zk_supervisor test recovery --no-deps --no-kill --ignore-prerequisites --verbose --chain consensus &> ${{ env.GENESIS_RECOVERY_LOGS_DIR }}/consensus.log & - PID4=$! - - wait $PID1 - wait $PID2 - wait $PID3 - wait $PID4 - - - name: Run external node server - run: | - ci_run zk_inception external-node run --ignore-prerequisites --chain era &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/rollup.log & - ci_run zk_inception external-node run --ignore-prerequisites --chain validium &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/validium.log & - ci_run zk_inception external-node run --ignore-prerequisites --chain custom_token &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/custom_token.log & - ci_run zk_inception external-node run --ignore-prerequisites --chain consensus --enable-consensus &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/consensus.log & - ci_run sleep 5 - - - name: Run integration tests en - run: | - ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --external-node --chain era &> ${{ env.INTEGRATION_TESTS_EN_LOGS_DIR }}/rollup.log & - PID1=$! - - ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --external-node --chain validium &> ${{ env.INTEGRATION_TESTS_EN_LOGS_DIR }}/validium.log & - PID2=$! - - ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --external-node --chain custom_token &> ${{ env.INTEGRATION_TESTS_EN_LOGS_DIR }}/custom_token.log & - PID3=$! - - ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --external-node --chain consensus &> ${{ env.INTEGRATION_TESTS_EN_LOGS_DIR }}/consensus.log & - PID4=$! - - wait $PID1 - wait $PID2 - wait $PID3 - wait $PID4 - - - name: Run revert tests - run: | - ci_run killall -INT zksync_server || true - ci_run killall -INT zksync_external_node || true - - ci_run zk_supervisor test revert --no-deps --external-node --no-kill --ignore-prerequisites --chain era &> ${{ env.REVERT_LOGS_DIR }}/rollup.log & - PID1=$! - - ci_run zk_supervisor test revert --no-deps --external-node --no-kill --ignore-prerequisites --chain validium &> ${{ env.REVERT_LOGS_DIR }}/validium.log & - PID2=$! - - ci_run zk_supervisor test revert --no-deps --external-node --no-kill --ignore-prerequisites --chain custom_token &> ${{ env.REVERT_LOGS_DIR }}/custom_token.log & - PID3=$! - - ci_run zk_supervisor test revert --no-deps --external-node --no-kill --ignore-prerequisites --chain consensus &> ${{ env.REVERT_LOGS_DIR }}/consensus.log & - PID4=$! - - wait $PID1 - wait $PID2 - wait $PID3 - wait $PID4 - - # Upgrade tests should run last, because as soon as they - # finish the bootloader will be different - # TODO make upgrade tests safe to run multiple times - - name: Run upgrade test - run: | - ci_run zk_supervisor test upgrade --no-deps --chain era - - - name: Upload logs - uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0 - if: always() - with: - name: logs - path: logs diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index e05b84cda971..291f9237ac52 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -58,14 +58,10 @@ jobs: - '.github/workflows/ci-core-lint-reusable.yml' - 'Cargo.toml' - 'Cargo.lock' - - '!**/*.md' - - '!**/*.MD' - - 'docker-compose.yml' - zk_toolbox: - - '.github/workflows/ci-zk-toolbox-reusable.yml' - 'zk_toolbox/**' - '!**/*.md' - '!**/*.MD' + - 'docker-compose.yml' docs: - '**/*.md' - '**/*.MD' @@ -98,12 +94,6 @@ jobs: name: CI for Prover Components uses: ./.github/workflows/ci-prover-reusable.yml - ci-for-zk-toolbox: - needs: changed_files - if: ${{ (needs.changed_files.outputs.core == 'true' || needs.changed_files.outputs.zk_toolbox == 'true' || needs.changed_files.outputs.all == 'true') && !contains(github.ref_name, 'release-please--branches') }} - name: CI for zk_toolbox - uses: ./.github/workflows/ci-zk-toolbox-reusable.yml - ci-for-docs: needs: changed_files if: needs.changed_files.outputs.docs == 'true' @@ -187,7 +177,7 @@ jobs: name: Github Status Check runs-on: ubuntu-latest if: always() && !cancelled() - needs: [ci-for-core-lint, ci-for-common, ci-for-core, ci-for-prover, ci-for-docs, build-core-images, build-contract-verifier, build-prover-images] + needs: [ ci-for-core-lint, ci-for-common, ci-for-core, ci-for-prover, ci-for-docs, build-core-images, build-contract-verifier, build-prover-images ] steps: - name: Status run: | diff --git a/.github/workflows/vm-perf-comparison.yml b/.github/workflows/vm-perf-comparison.yml index cfcfff93037f..cd6f490b3368 100644 --- a/.github/workflows/vm-perf-comparison.yml +++ b/.github/workflows/vm-perf-comparison.yml @@ -8,7 +8,7 @@ on: jobs: vm-benchmarks: name: Run VM benchmarks - runs-on: [matterlabs-ci-runner-highmem-long] + runs-on: [ matterlabs-ci-runner-highmem-long ] steps: - name: checkout base branch @@ -44,20 +44,20 @@ jobs: - name: run benchmarks on base branch shell: bash run: | - ci_run zk - ci_run zk compiler system-contracts + ci_run zkt + ci_run zk_supervisor contracts --system-contracts ci_run cargo bench --package vm-benchmark --bench iai | tee base-iai ci_run cargo run --package vm-benchmark --release --bin instruction_counts | tee base-opcodes || touch base-opcodes - ci_run yarn workspace system-contracts clean - name: checkout PR - run: git checkout --force FETCH_HEAD --recurse-submodules + run: | + git checkout --force FETCH_HEAD --recurse-submodules - name: run benchmarks on PR shell: bash run: | - ci_run zk - ci_run zk compiler system-contracts + ci_run zkt + ci_run zk_supervisor contracts --system-contracts ci_run cargo bench --package vm-benchmark --bench iai | tee pr-iai ci_run cargo run --package vm-benchmark --release --bin instruction_counts | tee pr-opcodes || touch pr-opcodes diff --git a/.github/workflows/vm-perf-to-prometheus.yml b/.github/workflows/vm-perf-to-prometheus.yml index 4d90b2a24ebb..d201949b3ebe 100644 --- a/.github/workflows/vm-perf-to-prometheus.yml +++ b/.github/workflows/vm-perf-to-prometheus.yml @@ -12,7 +12,7 @@ concurrency: vm-benchmarks jobs: vm-benchmarks: name: Run VM benchmarks - runs-on: [matterlabs-ci-runner-highmem-long] + runs-on: [ matterlabs-ci-runner-highmem-long ] steps: - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 @@ -30,8 +30,8 @@ jobs: run: | run_retried docker compose pull zk docker compose up -d zk - ci_run zk - ci_run zk compiler all + ci_run zkt + ci_run zks contracts all - name: run benchmarks run: | diff --git a/core/bin/system-constants-generator/src/utils.rs b/core/bin/system-constants-generator/src/utils.rs index 3775b3c0e243..e596208a7949 100644 --- a/core/bin/system-constants-generator/src/utils.rs +++ b/core/bin/system-constants-generator/src/utils.rs @@ -2,7 +2,7 @@ use std::{cell::RefCell, rc::Rc}; use once_cell::sync::Lazy; use zksync_contracts::{ - load_sys_contract, read_bootloader_code, read_sys_contract_bytecode, read_zbin_bytecode, + load_sys_contract, read_bootloader_code, read_bytecode_from_path, read_sys_contract_bytecode, BaseSystemContracts, ContractLanguage, SystemContractCode, }; use zksync_multivm::{ @@ -169,9 +169,8 @@ pub(super) fn get_l1_txs(number_of_txs: usize) -> (Vec, Vec Vec { - read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/tests/artifacts/{}.yul.zbin", - test + read_bytecode_from_path(format!( + "contracts/system-contracts/zkout/{test}.yul/contracts-preprocessed/bootloader/{test}.yul.json", )) } diff --git a/core/lib/contracts/src/lib.rs b/core/lib/contracts/src/lib.rs index f57649c9d695..a60d9fbf1813 100644 --- a/core/lib/contracts/src/lib.rs +++ b/core/lib/contracts/src/lib.rs @@ -211,7 +211,7 @@ pub fn known_codes_contract() -> Contract { } /// Reads bytecode from a given path. -fn read_bytecode_from_path(artifact_path: impl AsRef + std::fmt::Debug) -> Vec { +pub fn read_bytecode_from_path(artifact_path: impl AsRef + std::fmt::Debug) -> Vec { let artifact = read_file_to_json_value(&artifact_path); let bytecode = artifact["bytecode"] diff --git a/core/tests/ts-integration/contracts/zkasm/deep_stak.zkasm b/core/tests/ts-integration/contracts/zkasm/deep_stak.zkasm index bdfb4e70de7c..4cdf8542cab7 100644 --- a/core/tests/ts-integration/contracts/zkasm/deep_stak.zkasm +++ b/core/tests/ts-integration/contracts/zkasm/deep_stak.zkasm @@ -1,26 +1,25 @@ .text - .file "loop.yul" - .globl __entry + .file "loop.yul" + .globl __entry __entry: .func_begin0: - and! 1, r2, r1 - jump.ne @.BB0_2 + and! 1, r2, r1 + jump.ne @.BB0_2 __LOOP: - near_call r0, @__LOOP, @__LOOP + call r0, @__LOOP, @__LOOP .BB0_1: - add 1, r0, r2 - ret + add 1, r0, r2 + ret .BB0_2: - add 32, r0, r1 - st.2 256, r1 - st.2 288, r0 - add @CPI0_0[0], r0, r1 - ret.ok.to_label r1, @DEFAULT_FAR_RETURN + add 32, r0, r1 + stm.ah 256, r1 + stm.ah 288, r0 + add code[@CPI0_0], r0, r1 + retl r1, @DEFAULT_FAR_RETURN .func_end0: - -.func_end1: - - .note.GNU-stack - .rodata +;; landing pad for returns +DEFAULT_FAR_RETURN: + retl @DEFAULT_FAR_RETURN + .rodata CPI0_0: - .cell 53919893334301279589334030174039261352344891250716429051063678533632 \ No newline at end of file + .cell 53919893334301279589334030174039261352344891250716429051063678533632 diff --git a/core/tests/ts-integration/hardhat.config.ts b/core/tests/ts-integration/hardhat.config.ts index 00abe2b32efb..42ee6bacf7ad 100644 --- a/core/tests/ts-integration/hardhat.config.ts +++ b/core/tests/ts-integration/hardhat.config.ts @@ -4,14 +4,14 @@ import '@matterlabs/hardhat-zksync-vyper'; export default { zksolc: { - version: '1.3.21', + version: '1.5.3', compilerSource: 'binary', settings: { - isSystem: true + enableEraVMExtensions: true } }, zkvyper: { - version: '1.3.13', + version: '1.5.4', compilerSource: 'binary' }, networks: { @@ -20,7 +20,8 @@ export default { } }, solidity: { - version: '0.8.23' + version: '0.8.18', + eraVersion: '1.0.1' }, vyper: { version: '0.3.10' diff --git a/core/tests/ts-integration/package.json b/core/tests/ts-integration/package.json index 0e9b863d8e16..9d53420edaad 100644 --- a/core/tests/ts-integration/package.json +++ b/core/tests/ts-integration/package.json @@ -13,9 +13,9 @@ "build-yul": "hardhat run scripts/compile-yul.ts" }, "devDependencies": { - "@matterlabs/hardhat-zksync-deploy": "^1.3.0", - "@matterlabs/hardhat-zksync-solc": "^1.1.4", - "@matterlabs/hardhat-zksync-vyper": "^1.0.8", + "@matterlabs/hardhat-zksync-deploy": "^1.5.0", + "@matterlabs/hardhat-zksync-solc": "^1.2.4", + "@matterlabs/hardhat-zksync-vyper": "^1.1.0", "@nomiclabs/hardhat-vyper": "^3.0.6", "@types/jest": "^29.0.3", "@types/node": "^18.19.15", diff --git a/core/tests/ts-integration/scripts/compile-yul.ts b/core/tests/ts-integration/scripts/compile-yul.ts index dda65456a6c6..876caacdfab3 100644 --- a/core/tests/ts-integration/scripts/compile-yul.ts +++ b/core/tests/ts-integration/scripts/compile-yul.ts @@ -7,7 +7,7 @@ import { getZksolcUrl, saltFromUrl } from '@matterlabs/hardhat-zksync-solc'; import { getCompilersDir } from 'hardhat/internal/util/global-dir'; import path from 'path'; -const COMPILER_VERSION = '1.3.21'; +const COMPILER_VERSION = '1.5.3'; const IS_COMPILER_PRE_RELEASE = false; async function compilerLocation(): Promise { @@ -48,18 +48,24 @@ export async function compile( } let paths = preparePaths(pathToHome, path, files, outputDirName); - let systemMode = type === 'yul' ? '--system-mode --optimization 3' : ''; + let eraVmExtensions = type === 'yul' ? '--enable-eravm-extensions --optimization 3' : ''; const zksolcLocation = await compilerLocation(); await spawn( - `${zksolcLocation} ${paths.absolutePathSources}/${paths.outputDir} ${systemMode} --${type} --bin --overwrite -o ${paths.absolutePathArtifacts}/${paths.outputDir}` + `${zksolcLocation} ${paths.absolutePathSources}/${paths.outputDir} ${eraVmExtensions} --${type} --bin --overwrite -o ${paths.absolutePathArtifacts}/${paths.outputDir}` ); } export async function compileFolder(pathToHome: string, path: string, type: string) { + let compilationMode; + if (type === 'zkasm') { + compilationMode = 'eravm-assembly'; + } else { + compilationMode = type; + } let files: string[] = (await fs.promises.readdir(path)).filter((fn) => fn.endsWith(`.${type}`)); for (const file of files) { - await compile(pathToHome, path, [file], `${file}`, type); + await compile(pathToHome, path, [file], `${file}`, compilationMode); } } diff --git a/core/tests/ts-integration/tests/api/contract-verification.test.ts b/core/tests/ts-integration/tests/api/contract-verification.test.ts index c0cd887bcf7d..77ab2e6ddfcb 100644 --- a/core/tests/ts-integration/tests/api/contract-verification.test.ts +++ b/core/tests/ts-integration/tests/api/contract-verification.test.ts @@ -10,11 +10,11 @@ import { NodeMode } from '../../src/types'; // Regular expression to match ISO dates. const DATE_REGEX = /\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d{6})?/; -const ZKSOLC_VERSION = 'v1.3.21'; -const SOLC_VERSION = '0.8.23'; -const ZK_VM_SOLC_VERSION = 'zkVM-0.8.23-1.0.0'; +const ZKSOLC_VERSION = 'v1.5.3'; +const SOLC_VERSION = '0.8.26'; +const ZK_VM_SOLC_VERSION = 'zkVM-0.8.18-1.0.1'; -const ZKVYPER_VERSION = 'v1.3.13'; +const ZKVYPER_VERSION = 'v1.5.4'; const VYPER_VERSION = '0.3.10'; type HttpMethod = 'POST' | 'GET'; @@ -54,32 +54,6 @@ describe('Tests for the contract verification API', () => { const counterContract = await deployContract(alice, contracts.counter, []); const constructorArguments = counterContract.interface.encodeDeploy([]); - const requestBody = { - contractAddress: await counterContract.getAddress(), - contractName: 'contracts/counter/counter.sol:Counter', - sourceCode: getContractSource('counter/counter.sol'), - compilerZksolcVersion: ZKSOLC_VERSION, - compilerSolcVersion: SOLC_VERSION, - optimizationUsed: true, - constructorArguments, - isSystem: true - }; - let requestId = await query('POST', '/contract_verification', undefined, requestBody); - - await expectVerifyRequestToSucceed(requestId, requestBody); - }); - - test('should test zkVM solc contract verification', async () => { - let artifact = contracts.counter; - // TODO: use plugin compilation when it's ready instead of pre-compiled bytecode. - artifact.bytecode = fs.readFileSync( - `${testMaster.environment().pathToHome}/core/tests/ts-integration/contracts/counter/zkVM_bytecode.txt`, - 'utf8' - ); - - const counterContract = await deployContract(alice, artifact, []); - const constructorArguments = counterContract.interface.encodeDeploy([]); - const requestBody = { contractAddress: await counterContract.getAddress(), contractName: 'contracts/counter/counter.sol:Counter', @@ -127,7 +101,7 @@ describe('Tests for the contract verification API', () => { sourceCode: standardJsonInput, codeFormat: 'solidity-standard-json-input', compilerZksolcVersion: ZKSOLC_VERSION, - compilerSolcVersion: SOLC_VERSION, + compilerSolcVersion: ZK_VM_SOLC_VERSION, optimizationUsed: true, constructorArguments }; @@ -144,8 +118,8 @@ describe('Tests for the contract verification API', () => { const bytecodePath = `${ testMaster.environment().pathToHome - }/core/tests/ts-integration/contracts/yul/artifacts/Empty.yul/Empty.yul.zbin`; - const bytecode = fs.readFileSync(bytecodePath); + }/core/tests/ts-integration/contracts/yul/artifacts/Empty.yul/yul/Empty.yul.zbin`; + const bytecode = fs.readFileSync(bytecodePath, 'utf8'); const contractFactory = new zksync.ContractFactory([], bytecode, alice); const deployTx = await contractFactory.deploy(); @@ -157,7 +131,7 @@ describe('Tests for the contract verification API', () => { sourceCode, codeFormat: 'yul-single-file', compilerZksolcVersion: ZKSOLC_VERSION, - compilerSolcVersion: SOLC_VERSION, + compilerSolcVersion: ZK_VM_SOLC_VERSION, optimizationUsed: true, constructorArguments: '0x', isSystem: true diff --git a/core/tests/ts-integration/tests/api/debug.test.ts b/core/tests/ts-integration/tests/api/debug.test.ts index dd1ea141a419..054aa57cf64e 100644 --- a/core/tests/ts-integration/tests/api/debug.test.ts +++ b/core/tests/ts-integration/tests/api/debug.test.ts @@ -29,8 +29,8 @@ describe('Debug methods', () => { test('Should not fail for infinity recursion', async () => { const bytecodePath = `${ testMaster.environment().pathToHome - }/core/tests/ts-integration/contracts/zkasm/artifacts/deep_stak.zkasm/deep_stak.zkasm.zbin`; - const bytecode = fs.readFileSync(bytecodePath); + }/core/tests/ts-integration/contracts/zkasm/artifacts/deep_stak.zkasm/zkasm/deep_stak.zkasm.zbin`; + const bytecode = fs.readFileSync(bytecodePath, 'utf-8'); const contractFactory = new zksync.ContractFactory([], bytecode, testMaster.mainAccount()); const deployTx = await contractFactory.deploy(); diff --git a/docker/external-node/Dockerfile b/docker/external-node/Dockerfile index dc989f9ba4e7..7d276941dc42 100644 --- a/docker/external-node/Dockerfile +++ b/docker/external-node/Dockerfile @@ -18,6 +18,8 @@ COPY contracts/system-contracts/bootloader/build/artifacts/ /contracts/system-co COPY contracts/system-contracts/contracts-preprocessed/artifacts/ /contracts/system-contracts/contracts-preprocessed/artifacts/ COPY contracts/system-contracts/contracts-preprocessed/precompiles/artifacts/ /contracts/system-contracts/contracts-preprocessed/precompiles/artifacts/ COPY contracts/system-contracts/artifacts-zk /contracts/system-contracts/artifacts-zk +COPY contracts/l1-contracts/out/ /contracts/l1-contracts/out/ +# TODO Remove once we use foundry inside contracts repo COPY contracts/l1-contracts/artifacts/ /contracts/l1-contracts/artifacts/ COPY contracts/l2-contracts/artifacts-zk/ /contracts/l2-contracts/artifacts-zk/ COPY etc/tokens/ /etc/tokens/ diff --git a/docker/server-v2/Dockerfile b/docker/server-v2/Dockerfile index e5d378c3b6d8..07611a1d7b4d 100644 --- a/docker/server-v2/Dockerfile +++ b/docker/server-v2/Dockerfile @@ -24,6 +24,8 @@ COPY contracts/system-contracts/bootloader/build/artifacts/ /contracts/system-co COPY contracts/system-contracts/contracts-preprocessed/artifacts/ /contracts/system-contracts/contracts-preprocessed/artifacts/ COPY contracts/system-contracts/contracts-preprocessed/precompiles/artifacts/ /contracts/system-contracts/contracts-preprocessed/precompiles/artifacts/ COPY contracts/system-contracts/artifacts-zk /contracts/system-contracts/artifacts-zk +COPY contracts/l1-contracts/out/ /contracts/l1-contracts/out/ +# TODO Remove once we use foundry inside contracts repo COPY contracts/l1-contracts/artifacts/ /contracts/l1-contracts/artifacts/ COPY contracts/l2-contracts/artifacts-zk/ /contracts/l2-contracts/artifacts-zk/ COPY etc/tokens/ /etc/tokens/ diff --git a/docs/guides/external-node/building-from-scratch/Dockerfile b/docs/guides/external-node/building-from-scratch/Dockerfile index da098df91d51..5b015a4545b7 100644 --- a/docs/guides/external-node/building-from-scratch/Dockerfile +++ b/docs/guides/external-node/building-from-scratch/Dockerfile @@ -11,8 +11,7 @@ ENV ZKSYNC_HOME=/usr/src/zksync/zksync-era ENV PATH="${ZKSYNC_HOME}/bin:${PATH}" # build zk tool -RUN zk -RUN yarn zk build +RUN zkt # build rust RUN cargo build --release @@ -20,12 +19,7 @@ RUN cp target/release/zksync_external_node /usr/bin # build contracts RUN git submodule update --init --recursive -RUN zk run yarn -RUN zk compiler all || true -RUN rm /root/.cache/hardhat-nodejs/compilers-v2/linux-amd64/solc-*.does.not.work || true -RUN zk compiler all -RUN zk contract build -RUN zk f yarn run l2-contracts build +RUN zk_supervisor contracts # copy migrations (node expects them to be in specific directory) RUN cp -r core/lib/dal/migrations/ migrations diff --git a/prover/crates/bin/prover_cli/src/cli.rs b/prover/crates/bin/prover_cli/src/cli.rs index 41ef94980056..de5d2d2525b4 100644 --- a/prover/crates/bin/prover_cli/src/cli.rs +++ b/prover/crates/bin/prover_cli/src/cli.rs @@ -44,6 +44,8 @@ pub struct ProverCLIConfig { env("PLI__DB_URL") )] pub db_url: SensitiveUrl, + #[clap(default_value = "10")] + pub max_failure_attempts: u32, } #[derive(Subcommand)] diff --git a/prover/crates/bin/prover_cli/src/commands/status/batch.rs b/prover/crates/bin/prover_cli/src/commands/status/batch.rs index 797695b02278..dc63f6bf837c 100644 --- a/prover/crates/bin/prover_cli/src/commands/status/batch.rs +++ b/prover/crates/bin/prover_cli/src/commands/status/batch.rs @@ -4,8 +4,6 @@ use anyhow::Context as _; use circuit_definitions::zkevm_circuits::scheduler::aux::BaseLayerCircuitType; use clap::Args as ClapArgs; use colored::*; -use zksync_config::configs::FriProverConfig; -use zksync_env_config::FromEnv; use zksync_prover_dal::{Connection, ConnectionPool, Prover, ProverDal}; use zksync_types::{ basic_fri_types::AggregationRound, @@ -57,9 +55,9 @@ pub(crate) async fn run(args: Args, config: ProverCLIConfig) -> anyhow::Result<( } if !args.verbose { - display_batch_status(batch_data); + display_batch_status(batch_data, config.max_failure_attempts); } else { - display_batch_info(batch_data); + display_batch_info(batch_data, config.max_failure_attempts); } } @@ -200,19 +198,19 @@ async fn get_proof_compression_job_info_for_batch<'a>( .await } -fn display_batch_status(batch_data: BatchData) { - display_status_for_stage(batch_data.basic_witness_generator); - display_status_for_stage(batch_data.leaf_witness_generator); - display_status_for_stage(batch_data.node_witness_generator); - display_status_for_stage(batch_data.recursion_tip_witness_generator); - display_status_for_stage(batch_data.scheduler_witness_generator); - display_status_for_stage(batch_data.compressor); +fn display_batch_status(batch_data: BatchData, max_failure_attempts: u32) { + display_status_for_stage(batch_data.basic_witness_generator, max_failure_attempts); + display_status_for_stage(batch_data.leaf_witness_generator, max_failure_attempts); + display_status_for_stage(batch_data.node_witness_generator, max_failure_attempts); + display_status_for_stage( + batch_data.recursion_tip_witness_generator, + max_failure_attempts, + ); + display_status_for_stage(batch_data.scheduler_witness_generator, max_failure_attempts); + display_status_for_stage(batch_data.compressor, max_failure_attempts); } -fn display_status_for_stage(stage_info: StageInfo) { - let max_attempts = FriProverConfig::from_env() - .expect("Fail to read prover config.") - .max_attempts; +fn display_status_for_stage(stage_info: StageInfo, max_attempts: u32) { display_aggregation_round(&stage_info); let status = stage_info.witness_generator_jobs_status(max_attempts); match status { @@ -231,19 +229,19 @@ fn display_status_for_stage(stage_info: StageInfo) { } } -fn display_batch_info(batch_data: BatchData) { - display_info_for_stage(batch_data.basic_witness_generator); - display_info_for_stage(batch_data.leaf_witness_generator); - display_info_for_stage(batch_data.node_witness_generator); - display_info_for_stage(batch_data.recursion_tip_witness_generator); - display_info_for_stage(batch_data.scheduler_witness_generator); - display_info_for_stage(batch_data.compressor); +fn display_batch_info(batch_data: BatchData, max_failure_attempts: u32) { + display_info_for_stage(batch_data.basic_witness_generator, max_failure_attempts); + display_info_for_stage(batch_data.leaf_witness_generator, max_failure_attempts); + display_info_for_stage(batch_data.node_witness_generator, max_failure_attempts); + display_info_for_stage( + batch_data.recursion_tip_witness_generator, + max_failure_attempts, + ); + display_info_for_stage(batch_data.scheduler_witness_generator, max_failure_attempts); + display_info_for_stage(batch_data.compressor, max_failure_attempts); } -fn display_info_for_stage(stage_info: StageInfo) { - let max_attempts = FriProverConfig::from_env() - .expect("Fail to read prover config.") - .max_attempts; +fn display_info_for_stage(stage_info: StageInfo, max_attempts: u32) { display_aggregation_round(&stage_info); let status = stage_info.witness_generator_jobs_status(max_attempts); match status { diff --git a/yarn.lock b/yarn.lock index f400104b9c20..b70e64f148a1 100644 --- a/yarn.lock +++ b/yarn.lock @@ -1703,20 +1703,20 @@ chalk "4.1.2" ts-morph "^19.0.0" -"@matterlabs/hardhat-zksync-deploy@^1.3.0": - version "1.3.0" - resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-deploy/-/hardhat-zksync-deploy-1.3.0.tgz#5c2b723318ddf6c4d3929ec225401864ff54557a" - integrity sha512-4UHOgOwIBC4JA3W8DE9GHqbAuBhCPAjtM+Oew1aiYYGkIsPUAMYsH35+4I2FzJsYyE6mD6ATmoS/HfZweQHTlQ== +"@matterlabs/hardhat-zksync-deploy@^1.5.0": + version "1.5.0" + resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-deploy/-/hardhat-zksync-deploy-1.5.0.tgz#40cb454fb187da4bb354f3acb48762a6657fcb36" + integrity sha512-7LAgYYwoKWHeR+3CyWEvA3NKBKtt7ktcr7SX6ZPgbEYqHAdXH02vxJZGwNADtMWpyYm8h+fEQkpPIgErD4NhmA== dependencies: - "@matterlabs/hardhat-zksync-solc" "^1.0.4" - chai "^4.3.6" - chalk "4.1.2" + "@matterlabs/hardhat-zksync-solc" "^1.2.0" + chai "^4.3.4" + chalk "^4.1.2" fs-extra "^11.2.0" - glob "^10.3.10" + glob "^10.4.1" lodash "^4.17.21" - sinon "^17.0.1" + sinon "^18.0.0" sinon-chai "^3.7.0" - ts-morph "^21.0.1" + ts-morph "^22.0.0" "@matterlabs/hardhat-zksync-node@^0.0.1-beta.7": version "0.0.1" @@ -1760,7 +1760,7 @@ chalk "4.1.2" dockerode "^3.3.4" -"@matterlabs/hardhat-zksync-solc@^1.0.4", "@matterlabs/hardhat-zksync-solc@^1.0.5", "@matterlabs/hardhat-zksync-solc@^1.1.4": +"@matterlabs/hardhat-zksync-solc@^1.0.5", "@matterlabs/hardhat-zksync-solc@^1.1.4": version "1.1.4" resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-solc/-/hardhat-zksync-solc-1.1.4.tgz#04a2fad6fb6b6944c64ad969080ee65b9af3f617" integrity sha512-4/usbogh9neewR2/v8Dn2OzqVblZMUuT/iH2MyPZgPRZYQlL4SlZtMvokU9UQjZT6iSoaKCbbdWESHDHSzfUjA== @@ -1794,6 +1794,23 @@ sinon-chai "^3.7.0" undici "^6.18.2" +"@matterlabs/hardhat-zksync-solc@^1.2.4": + version "1.2.4" + resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-solc/-/hardhat-zksync-solc-1.2.4.tgz#b14a1dbfe751058bf2d79eab747b87c7ca7d2361" + integrity sha512-9Nk95kxOZ9rl26trP/pXDLw5MqFAd0CD8FMTGDvA5HBGk6CL2wg4tS0gmucYz5R4qj09KUYOO4FW4rgd/atcGg== + dependencies: + "@nomiclabs/hardhat-docker" "^2.0.2" + chai "^4.3.4" + chalk "^4.1.2" + debug "^4.3.5" + dockerode "^4.0.2" + fs-extra "^11.2.0" + proper-lockfile "^4.1.2" + semver "^7.6.2" + sinon "^18.0.0" + sinon-chai "^3.7.0" + undici "^6.18.2" + "@matterlabs/hardhat-zksync-verify@^0.2.0": version "0.2.2" resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-verify/-/hardhat-zksync-verify-0.2.2.tgz#daa34bc4404096ed0f44461ee366c1cb0e5a4f2f" @@ -1824,20 +1841,20 @@ sinon "^18.0.0" sinon-chai "^3.7.0" -"@matterlabs/hardhat-zksync-vyper@^1.0.8": - version "1.0.8" - resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-vyper/-/hardhat-zksync-vyper-1.0.8.tgz#d5bd496715a1e322b0bf3926b4146b4e18ab64ff" - integrity sha512-XR7rbfDuBG5/LZWYfhQTP9gD+U24hSJHDuZ9U55wgIfiQTOxPoztFwEbQNiC39vjT5MjP/Nv8/IDrlEBkaVCgw== +"@matterlabs/hardhat-zksync-vyper@^1.1.0": + version "1.1.0" + resolved "https://registry.yarnpkg.com/@matterlabs/hardhat-zksync-vyper/-/hardhat-zksync-vyper-1.1.0.tgz#b3fb304429e88a84b4abc3fe4e5a83b2f5e907bd" + integrity sha512-zDjHPeIuHRpumXiWZUbhoji4UJe09jTDRn4xnxsuVkLH7qLAm0VDFzCXYNMvEuySZSdhbSbekxJsH9Kunc5ycA== dependencies: - "@nomiclabs/hardhat-docker" "^2.0.0" - chai "^4.3.6" - chalk "4.1.2" + "@nomiclabs/hardhat-docker" "^2.0.2" + chai "^4.3.4" + chalk "^4.1.2" dockerode "^4.0.2" - fs-extra "^11.1.1" - semver "^7.5.4" - sinon "^17.0.1" + fs-extra "^11.2.0" + semver "^7.6.2" + sinon "^18.0.0" sinon-chai "^3.7.0" - undici "^5.14.0" + undici "^6.18.2" "@matterlabs/prettier-config@^1.0.3": version "1.0.3" @@ -2531,10 +2548,10 @@ mkdirp "^2.1.6" path-browserify "^1.0.1" -"@ts-morph/common@~0.22.0": - version "0.22.0" - resolved "https://registry.yarnpkg.com/@ts-morph/common/-/common-0.22.0.tgz#8951d451622a26472fbc3a227d6c3a90e687a683" - integrity sha512-HqNBuV/oIlMKdkLshXd1zKBqNQCsuPEsgQOkfFQ/eUKjRlwndXW1AjN9LVkBEIukm00gGXSRmfkl0Wv5VXLnlw== +"@ts-morph/common@~0.23.0": + version "0.23.0" + resolved "https://registry.yarnpkg.com/@ts-morph/common/-/common-0.23.0.tgz#bd4ddbd3f484f29476c8bd985491592ae5fc147e" + integrity sha512-m7Lllj9n/S6sOkCkRftpM7L24uvmfXQFedlW/4hENcuJH1HHm9u5EgxZb9uVjQSCGrbBWBkOGgcTxNg36r6ywA== dependencies: fast-glob "^3.3.2" minimatch "^9.0.3" @@ -4053,6 +4070,11 @@ code-block-writer@^12.0.0: resolved "https://registry.yarnpkg.com/code-block-writer/-/code-block-writer-12.0.0.tgz#4dd58946eb4234105aff7f0035977b2afdc2a770" integrity sha512-q4dMFMlXtKR3XNBHyMHt/3pwYNA69EDk00lloMOaaUMKPUXBw6lpXtbu3MMVG6/uOihGnRDOlkyqsONEUj60+w== +code-block-writer@^13.0.1: + version "13.0.2" + resolved "https://registry.yarnpkg.com/code-block-writer/-/code-block-writer-13.0.2.tgz#e1c6c3dbe5d38b4ac76fb62c4d4b2fc4bf04c9c1" + integrity sha512-XfXzAGiStXSmCIwrkdfvc7FS5Dtj8yelCtyOf2p2skCAfvLd6zu0rGzuS9NSCO3bq1JKpFZ7tbKdKlcd5occQA== + collect-v8-coverage@^1.0.0: version "1.0.2" resolved "https://registry.yarnpkg.com/collect-v8-coverage/-/collect-v8-coverage-1.0.2.tgz#c0b29bcd33bcd0779a1344c2136051e6afd3d9e9" @@ -5925,16 +5947,17 @@ glob@8.1.0, glob@^8.0.3: minimatch "^5.0.1" once "^1.3.0" -glob@^10.3.10: - version "10.3.16" - resolved "https://registry.yarnpkg.com/glob/-/glob-10.3.16.tgz#bf6679d5d51279c8cfae4febe0d051d2a4bf4c6f" - integrity sha512-JDKXl1DiuuHJ6fVS2FXjownaavciiHNUU4mOvV/B793RLh05vZL1rcPnCSaOgv1hDT6RDlY7AB7ZUvFYAtPgAw== +glob@^10.4.1: + version "10.4.5" + resolved "https://registry.yarnpkg.com/glob/-/glob-10.4.5.tgz#f4d9f0b90ffdbab09c9d77f5f29b4262517b0956" + integrity sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg== dependencies: foreground-child "^3.1.0" jackspeak "^3.1.2" - minimatch "^9.0.1" - minipass "^7.0.4" - path-scurry "^1.11.0" + minimatch "^9.0.4" + minipass "^7.1.2" + package-json-from-dist "^1.0.0" + path-scurry "^1.11.1" glob@^5.0.15: version "5.0.15" @@ -7951,13 +7974,20 @@ minimatch@^7.4.3: dependencies: brace-expansion "^2.0.1" -minimatch@^9.0.1, minimatch@^9.0.3: +minimatch@^9.0.3: version "9.0.4" resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-9.0.4.tgz#8e49c731d1749cbec05050ee5145147b32496a51" integrity sha512-KqWh+VchfxcMNRAJjj2tnsSJdNbHsVgnkBhTNrW7AjVo6OvLtxw8zfT9oLw1JSohlFzJ8jCoTgaoXvJ+kHt6fw== dependencies: brace-expansion "^2.0.1" +minimatch@^9.0.4: + version "9.0.5" + resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-9.0.5.tgz#d74f9dd6b57d83d8e98cfb82133b03978bc929e5" + integrity sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow== + dependencies: + brace-expansion "^2.0.1" + minimatch@~3.0.4: version "3.0.8" resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-3.0.8.tgz#5e6a59bd11e2ab0de1cfb843eb2d82e546c321c1" @@ -7970,11 +8000,16 @@ minimist@^1.2.0, minimist@^1.2.5, minimist@^1.2.6, minimist@^1.2.8, minimist@~1. resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.8.tgz#c1a464e7693302e082a075cee0c057741ac4772c" integrity sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA== -"minipass@^5.0.0 || ^6.0.2 || ^7.0.0", minipass@^7.0.4: +"minipass@^5.0.0 || ^6.0.2 || ^7.0.0": version "7.1.1" resolved "https://registry.yarnpkg.com/minipass/-/minipass-7.1.1.tgz#f7f85aff59aa22f110b20e27692465cf3bf89481" integrity sha512-UZ7eQ+h8ywIRAW1hIEl2AqdwzJucU/Kp59+8kkZeSvafXhZjul247BvIJjEVFVeON6d7lM46XX1HXCduKAS8VA== +minipass@^7.1.2: + version "7.1.2" + resolved "https://registry.yarnpkg.com/minipass/-/minipass-7.1.2.tgz#93a9626ce5e5e66bd4db86849e7515e92340a707" + integrity sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw== + mkdirp-classic@^0.5.2: version "0.5.3" resolved "https://registry.yarnpkg.com/mkdirp-classic/-/mkdirp-classic-0.5.3.tgz#fa10c9115cc6d8865be221ba47ee9bed78601113" @@ -8447,6 +8482,11 @@ p-try@^2.0.0: resolved "https://registry.yarnpkg.com/p-try/-/p-try-2.2.0.tgz#cb2868540e313d61de58fafbe35ce9004d5540e6" integrity sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ== +package-json-from-dist@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/package-json-from-dist/-/package-json-from-dist-1.0.0.tgz#e501cd3094b278495eb4258d4c9f6d5ac3019f00" + integrity sha512-dATvCeZN/8wQsGywez1mzHtTlP22H8OEfPrVMLNr4/eGa+ijtLn/6M5f0dY8UKNrC2O9UCU6SSoG3qRKnt7STw== + parent-module@^1.0.0: version "1.0.1" resolved "https://registry.yarnpkg.com/parent-module/-/parent-module-1.0.1.tgz#691d2709e78c79fae3a156622452d00762caaaa2" @@ -8512,7 +8552,7 @@ path-parse@^1.0.6, path-parse@^1.0.7: resolved "https://registry.yarnpkg.com/path-parse/-/path-parse-1.0.7.tgz#fbc114b60ca42b30d9daf5858e4bd68bbedb6735" integrity sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw== -path-scurry@^1.11.0: +path-scurry@^1.11.1: version "1.11.1" resolved "https://registry.yarnpkg.com/path-scurry/-/path-scurry-1.11.1.tgz#7960a668888594a0720b12a911d1a742ab9f11d2" integrity sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA== @@ -10258,13 +10298,13 @@ ts-morph@^19.0.0: "@ts-morph/common" "~0.20.0" code-block-writer "^12.0.0" -ts-morph@^21.0.1: - version "21.0.1" - resolved "https://registry.yarnpkg.com/ts-morph/-/ts-morph-21.0.1.tgz#712302a0f6e9dbf1aa8d9cf33a4386c4b18c2006" - integrity sha512-dbDtVdEAncKctzrVZ+Nr7kHpHkv+0JDJb2MjjpBaj8bFeCkePU9rHfMklmhuLFnpeq/EJZk2IhStY6NzqgjOkg== +ts-morph@^22.0.0: + version "22.0.0" + resolved "https://registry.yarnpkg.com/ts-morph/-/ts-morph-22.0.0.tgz#5532c592fb6dddae08846f12c9ab0fc590b1d42e" + integrity sha512-M9MqFGZREyeb5fTl6gNHKZLqBQA0TjA1lea+CR48R8EBTDuWrNqW6ccC5QvjNR4s6wDumD3LTCjOFSp9iwlzaw== dependencies: - "@ts-morph/common" "~0.22.0" - code-block-writer "^12.0.0" + "@ts-morph/common" "~0.23.0" + code-block-writer "^13.0.1" ts-node@^10.1.0, ts-node@^10.7.0: version "10.9.2" diff --git a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/init.rs b/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/init.rs index c74e4a4f765e..7ba7d3cb40cf 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/init.rs @@ -5,12 +5,13 @@ use xshell::Shell; use super::releases::{get_releases_with_arch, Arch, Version}; use crate::messages::{ - MSG_ARCH_NOT_SUPPORTED_ERR, MSG_FETCHING_VYPER_RELEASES_SPINNER, - MSG_FETCHING_ZKSOLC_RELEASES_SPINNER, MSG_FETCHING_ZKVYPER_RELEASES_SPINNER, - MSG_FETCH_SOLC_RELEASES_SPINNER, MSG_GET_SOLC_RELEASES_ERR, MSG_GET_VYPER_RELEASES_ERR, - MSG_GET_ZKSOLC_RELEASES_ERR, MSG_GET_ZKVYPER_RELEASES_ERR, MSG_NO_VERSION_FOUND_ERR, - MSG_OS_NOT_SUPPORTED_ERR, MSG_SOLC_VERSION_PROMPT, MSG_VYPER_VERSION_PROMPT, - MSG_ZKSOLC_VERSION_PROMPT, MSG_ZKVYPER_VERSION_PROMPT, + MSG_ARCH_NOT_SUPPORTED_ERR, MSG_ERA_VM_SOLC_VERSION_PROMPT, + MSG_FETCHING_VYPER_RELEASES_SPINNER, MSG_FETCHING_ZKSOLC_RELEASES_SPINNER, + MSG_FETCHING_ZKVYPER_RELEASES_SPINNER, MSG_FETCH_ERA_VM_SOLC_RELEASES_SPINNER, + MSG_FETCH_SOLC_RELEASES_SPINNER, MSG_GET_ERA_VM_SOLC_RELEASES_ERR, MSG_GET_SOLC_RELEASES_ERR, + MSG_GET_VYPER_RELEASES_ERR, MSG_GET_ZKSOLC_RELEASES_ERR, MSG_GET_ZKVYPER_RELEASES_ERR, + MSG_NO_VERSION_FOUND_ERR, MSG_OS_NOT_SUPPORTED_ERR, MSG_SOLC_VERSION_PROMPT, + MSG_VYPER_VERSION_PROMPT, MSG_ZKSOLC_VERSION_PROMPT, MSG_ZKVYPER_VERSION_PROMPT, }; #[derive(Debug, Clone, Parser, Default)] @@ -24,9 +25,15 @@ pub struct InitContractVerifierArgs { /// Version of solc to install #[clap(long)] pub solc_version: Option, + /// Version of era vm solc to install + #[clap(long)] + pub era_vm_solc_version: Option, /// Version of vyper to install #[clap(long)] pub vyper_version: Option, + /// Install only provided compilers + #[clap(long, default_missing_value = "true")] + pub only: bool, } #[derive(Debug, Clone)] @@ -34,6 +41,7 @@ pub struct InitContractVerifierArgsFinal { pub zksolc_releases: Vec, pub zkvyper_releases: Vec, pub solc_releases: Vec, + pub era_vm_solc_releases: Vec, pub vyper_releases: Vec, } @@ -68,6 +76,14 @@ impl InitContractVerifierArgs { ) .context(MSG_GET_SOLC_RELEASES_ERR)?; + let era_vm_solc_releases = get_releases_with_arch( + shell, + "matter-labs/era-solidity", + arch, + MSG_FETCH_ERA_VM_SOLC_RELEASES_SPINNER, + ) + .context(MSG_GET_ERA_VM_SOLC_RELEASES_ERR)?; + let vyper_releases = get_releases_with_arch( shell, "vyperlang/vyper", @@ -81,33 +97,42 @@ impl InitContractVerifierArgs { zksolc_releases.clone(), MSG_ZKSOLC_VERSION_PROMPT, )?; - let zksolc_releases = get_releases_above_version(zksolc_releases, zksolc_version)?; + let zksolc_releases = get_final_releases(zksolc_releases, zksolc_version, self.only)?; let zkvyper_version = select_min_version( self.zkvyper_version, zkvyper_releases.clone(), MSG_ZKVYPER_VERSION_PROMPT, )?; - let zkvyper_releases = get_releases_above_version(zkvyper_releases, zkvyper_version)?; + let zkvyper_releases = get_final_releases(zkvyper_releases, zkvyper_version, self.only)?; let solc_version = select_min_version( self.solc_version, solc_releases.clone(), MSG_SOLC_VERSION_PROMPT, )?; - let solc_releases = get_releases_above_version(solc_releases, solc_version)?; + let solc_releases = get_final_releases(solc_releases, solc_version, self.only)?; + + let era_vm_solc_version = select_min_version( + self.era_vm_solc_version, + era_vm_solc_releases.clone(), + MSG_ERA_VM_SOLC_VERSION_PROMPT, + )?; + let era_vm_solc_releases = + get_final_releases(era_vm_solc_releases, era_vm_solc_version, self.only)?; let vyper_version = select_min_version( self.vyper_version, vyper_releases.clone(), MSG_VYPER_VERSION_PROMPT, )?; - let vyper_releases = get_releases_above_version(vyper_releases, vyper_version)?; + let vyper_releases = get_final_releases(vyper_releases, vyper_version, self.only)?; Ok(InitContractVerifierArgsFinal { zksolc_releases, zkvyper_releases, solc_releases, + era_vm_solc_releases, vyper_releases, }) } @@ -156,14 +181,20 @@ fn select_min_version( Ok(selected) } -fn get_releases_above_version( +fn get_final_releases( releases: Vec, version: Version, + only: bool, ) -> anyhow::Result> { let pos = releases .iter() .position(|r| r.version == version.version) .context(MSG_NO_VERSION_FOUND_ERR)?; - Ok(releases[..=pos].to_vec()) + let result = if only { + vec![releases[pos].clone()] + } else { + releases[..=pos].to_vec() + }; + Ok(result) } diff --git a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/init.rs b/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/init.rs index 5fd482ae5fff..f376a0d36eca 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/init.rs @@ -36,6 +36,14 @@ pub(crate) async fn run(shell: &Shell, args: InitContractVerifierArgs) -> anyhow "solc", )?; + download_binaries( + shell, + args.era_vm_solc_releases, + get_era_vm_solc_path, + &link_to_code, + "solc", + )?; + download_binaries( shell, args.vyper_releases, @@ -105,3 +113,9 @@ fn get_vyper_path(link_to_code: &Path, version: &str) -> PathBuf { fn get_solc_path(link_to_code: &Path, version: &str) -> PathBuf { link_to_code.join("etc/solc-bin/").join(version) } + +fn get_era_vm_solc_path(link_to_code: &Path, version: &str) -> PathBuf { + link_to_code + .join("etc/solc-bin/") + .join(format!("zkVM-{version}")) +} diff --git a/zk_toolbox/crates/zk_inception/src/messages.rs b/zk_toolbox/crates/zk_inception/src/messages.rs index b20e8edf8ad9..5f398f4b8a3b 100644 --- a/zk_toolbox/crates/zk_inception/src/messages.rs +++ b/zk_toolbox/crates/zk_inception/src/messages.rs @@ -414,10 +414,12 @@ pub(super) const MSG_GET_ZKSOLC_RELEASES_ERR: &str = "Failed to get zksolc relea pub(super) const MSG_FETCHING_ZKSOLC_RELEASES_SPINNER: &str = "Fetching zksolc releases..."; pub(super) const MSG_FETCHING_ZKVYPER_RELEASES_SPINNER: &str = "Fetching zkvyper releases..."; pub(super) const MSG_FETCH_SOLC_RELEASES_SPINNER: &str = "Fetching solc releases..."; +pub(super) const MSG_FETCH_ERA_VM_SOLC_RELEASES_SPINNER: &str = "Fetching era vm solc releases..."; pub(super) const MSG_FETCHING_VYPER_RELEASES_SPINNER: &str = "Fetching vyper releases..."; pub(super) const MSG_ZKSOLC_VERSION_PROMPT: &str = "Select the minimal zksolc version:"; pub(super) const MSG_ZKVYPER_VERSION_PROMPT: &str = "Select the minimal zkvyper version:"; pub(super) const MSG_SOLC_VERSION_PROMPT: &str = "Select the minimal solc version:"; +pub(super) const MSG_ERA_VM_SOLC_VERSION_PROMPT: &str = "Select the minimal era vm solc version:"; pub(super) const MSG_VYPER_VERSION_PROMPT: &str = "Select the minimal vyper version:"; pub(super) const MSG_NO_RELEASES_FOUND_ERR: &str = "No releases found for current architecture"; pub(super) const MSG_NO_VERSION_FOUND_ERR: &str = "No version found"; @@ -425,6 +427,7 @@ pub(super) const MSG_ARCH_NOT_SUPPORTED_ERR: &str = "Architecture not supported" pub(super) const MSG_OS_NOT_SUPPORTED_ERR: &str = "OS not supported"; pub(super) const MSG_GET_VYPER_RELEASES_ERR: &str = "Failed to get vyper releases"; pub(super) const MSG_GET_SOLC_RELEASES_ERR: &str = "Failed to get solc releases"; +pub(super) const MSG_GET_ERA_VM_SOLC_RELEASES_ERR: &str = "Failed to get era vm solc releases"; pub(super) const MSG_GET_ZKVYPER_RELEASES_ERR: &str = "Failed to get zkvyper releases"; pub(super) fn msg_binary_already_exists(name: &str, version: &str) -> String { diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/clean/mod.rs b/zk_toolbox/crates/zk_supervisor/src/commands/clean/mod.rs index 92c8a0f1086e..803e962c0ff8 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/clean/mod.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/clean/mod.rs @@ -43,6 +43,7 @@ pub fn containers(shell: &Shell) -> anyhow::Result<()> { pub fn contracts(shell: &Shell, ecosystem_config: &EcosystemConfig) -> anyhow::Result<()> { let path_to_foundry = ecosystem_config.path_to_foundry(); + let contracts_path = ecosystem_config.link_to_code.join("contracts"); logger::info(MSG_CONTRACTS_CLEANING); shell .remove_path(path_to_foundry.join("broadcast")) @@ -62,18 +63,35 @@ pub fn contracts(shell: &Shell, ecosystem_config: &EcosystemConfig) -> anyhow::R shell .remove_path(path_to_foundry.join("typechain")) .context("typechain")?; + shell + .remove_path(contracts_path.join("l2-contracts/cache-forge")) + .context("l2-contracts/cache-forge")?; + shell + .remove_path(contracts_path.join("l2-contracts/zkout")) + .context("l2-contracts/zkout")?; + shell + .remove_path(contracts_path.join("system-contracts/cache-forge")) + .context("system-contracts/cache-forge")?; + shell + .remove_path(contracts_path.join("system-contracts/zkout")) + .context("system-contracts/zkout")?; + shell + .remove_path(contracts_path.join("system-contracts/contracts-preprocessed")) + .context("system-contracts/contracts-preprocessed")?; shell .remove_path(path_to_foundry.join("script-config")) .context("remove script-config")?; shell .create_dir(path_to_foundry.join("script-config")) .context("create script-config")?; + shell.write_file(path_to_foundry.join("script-config/.gitkeep"), "")?; shell .remove_path(path_to_foundry.join("script-out")) .context("remove script-out")?; shell .create_dir(path_to_foundry.join("script-out")) .context("create script-out")?; + shell.write_file(path_to_foundry.join("script-out/.gitkeep"), "")?; logger::info(MSG_CONTRACTS_CLEANING_FINISHED); Ok(()) } diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/db.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/db.rs new file mode 100644 index 000000000000..a08b0404605e --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/db.rs @@ -0,0 +1,34 @@ +use std::path::Path; + +use common::{cmd::Cmd, db::wait_for_db, logger}; +use xshell::{cmd, Shell}; + +use crate::{commands::database, dals::Dal, messages::MSG_RESETTING_TEST_DATABASES}; + +pub async fn reset_test_databases( + shell: &Shell, + link_to_code: &Path, + dals: Vec, +) -> anyhow::Result<()> { + logger::info(MSG_RESETTING_TEST_DATABASES); + + Cmd::new(cmd!( + shell, + "docker compose -f docker-compose-unit-tests.yml down" + )) + .run()?; + Cmd::new(cmd!( + shell, + "docker compose -f docker-compose-unit-tests.yml up -d" + )) + .run()?; + + for dal in dals { + let mut url = dal.url.clone(); + url.set_path(""); + wait_for_db(&url, 3).await?; + database::reset::reset_database(shell, link_to_code, dal.clone()).await?; + } + + Ok(()) +} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/integration.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/integration.rs index fb3e1436acc3..fc3ba974118c 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/integration.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/integration.rs @@ -42,7 +42,7 @@ pub async fn run(shell: &Shell, args: IntegrationArgs) -> anyhow::Result<()> { let test_pattern = args.test_pattern; let mut command = cmd!( shell, - "yarn jest --forceExit --testTimeout 120000 -t {test_pattern...}" + "yarn jest api/contract-verification.test.ts --forceExit --testTimeout 120000 -t {test_pattern...}" ) .env("CHAIN_NAME", ecosystem_config.current_chain()) .env("MASTER_WALLET_PK", wallets.get_test_pk(&chain_config)?); diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/mod.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/mod.rs index 712e2f75eefd..7d2af71ae9ce 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/mod.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/mod.rs @@ -13,6 +13,7 @@ use crate::messages::{ mod args; mod build; +mod db; mod integration; mod l1_contracts; mod loadtest; @@ -57,7 +58,7 @@ pub async fn run(shell: &Shell, args: TestCommands) -> anyhow::Result<()> { TestCommands::Build => build::run(shell), TestCommands::Rust(args) => rust::run(shell, args).await, TestCommands::L1Contracts => l1_contracts::run(shell), - TestCommands::Prover => prover::run(shell), + TestCommands::Prover => prover::run(shell).await, TestCommands::Wallet => wallet::run(shell), TestCommands::Loadtest => loadtest::run(shell), } diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/prover.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/prover.rs index 4e9c4fc25283..f48b359a9357 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/prover.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/prover.rs @@ -1,15 +1,29 @@ +use std::str::FromStr; + use common::{cmd::Cmd, logger}; use config::EcosystemConfig; +use url::Url; use xshell::{cmd, Shell}; -use crate::messages::MSG_PROVER_TEST_SUCCESS; +use crate::{ + commands::test::db::reset_test_databases, + dals::{Dal, PROVER_DAL_PATH}, + defaults::TEST_DATABASE_PROVER_URL, + messages::MSG_PROVER_TEST_SUCCESS, +}; -pub fn run(shell: &Shell) -> anyhow::Result<()> { +pub async fn run(shell: &Shell) -> anyhow::Result<()> { let ecosystem = EcosystemConfig::from_file(shell)?; - let _dir_guard = shell.push_dir(ecosystem.link_to_code.join("prover")); + let dals = vec![Dal { + url: Url::from_str(TEST_DATABASE_PROVER_URL)?, + path: PROVER_DAL_PATH.to_string(), + }]; + reset_test_databases(shell, &ecosystem.link_to_code, dals).await?; + let _dir_guard = shell.push_dir(ecosystem.link_to_code.join("prover")); Cmd::new(cmd!(shell, "cargo test --release --workspace --locked")) .with_force_run() + .env("TEST_DATABASE_PROVER_URL", TEST_DATABASE_PROVER_URL) .run()?; logger::outro(MSG_PROVER_TEST_SUCCESS); diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/rust.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/rust.rs index 3ac331becc9f..fdee03fe63ea 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/rust.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/rust.rs @@ -1,19 +1,19 @@ -use std::{path::Path, str::FromStr}; +use std::str::FromStr; use anyhow::Context; -use common::{cmd::Cmd, db::wait_for_db, logger}; +use common::{cmd::Cmd, logger}; use config::EcosystemConfig; use url::Url; use xshell::{cmd, Shell}; use super::args::rust::RustArgs; use crate::{ - commands::database, + commands::test::db::reset_test_databases, dals::{Dal, CORE_DAL_PATH, PROVER_DAL_PATH}, defaults::{TEST_DATABASE_PROVER_URL, TEST_DATABASE_SERVER_URL}, messages::{ - MSG_CHAIN_NOT_FOUND_ERR, MSG_POSTGRES_CONFIG_NOT_FOUND_ERR, MSG_RESETTING_TEST_DATABASES, - MSG_UNIT_TESTS_RUN_SUCCESS, MSG_USING_CARGO_NEXTEST, + MSG_CHAIN_NOT_FOUND_ERR, MSG_POSTGRES_CONFIG_NOT_FOUND_ERR, MSG_UNIT_TESTS_RUN_SUCCESS, + MSG_USING_CARGO_NEXTEST, }, }; @@ -78,31 +78,3 @@ pub async fn run(shell: &Shell, args: RustArgs) -> anyhow::Result<()> { logger::outro(MSG_UNIT_TESTS_RUN_SUCCESS); Ok(()) } - -async fn reset_test_databases( - shell: &Shell, - link_to_code: &Path, - dals: Vec, -) -> anyhow::Result<()> { - logger::info(MSG_RESETTING_TEST_DATABASES); - - Cmd::new(cmd!( - shell, - "docker compose -f docker-compose-unit-tests.yml down" - )) - .run()?; - Cmd::new(cmd!( - shell, - "docker compose -f docker-compose-unit-tests.yml up -d" - )) - .run()?; - - for dal in dals { - let mut url = dal.url.clone(); - url.set_path(""); - wait_for_db(&url, 3).await?; - database::reset::reset_database(shell, link_to_code, dal.clone()).await?; - } - - Ok(()) -} From 133d6943da9daa34e1a97895bf5eb9173eeb7074 Mon Sep 17 00:00:00 2001 From: perekopskiy <53865202+perekopskiy@users.noreply.github.com> Date: Tue, 17 Sep 2024 13:47:02 +0300 Subject: [PATCH 087/116] chore: increase max_nonce_ahead (#2896) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ increases max_nonce_ahead in default config ## Why ❔ fix flaky test ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- etc/env/file_based/general.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/etc/env/file_based/general.yaml b/etc/env/file_based/general.yaml index 864bff15dedf..138905883e30 100644 --- a/etc/env/file_based/general.yaml +++ b/etc/env/file_based/general.yaml @@ -36,7 +36,7 @@ api: filters_limit: 10000 subscriptions_limit: 10000 pubsub_polling_interval: 200 - max_nonce_ahead: 20 + max_nonce_ahead: 40 gas_price_scale_factor: 1.5 estimate_gas_scale_factor: 1.3 estimate_gas_acceptable_overestimation: 5000 From 80b37b275f7a2e51abbc273f60edde6862354cb8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Mat=C3=ADas=20Ignacio=20Gonz=C3=A1lez?= Date: Tue, 17 Sep 2024 08:54:28 -0300 Subject: [PATCH 088/116] feat(zk_toolbox): Add zk_supervisor config-writer command (#2866) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Add zk_supervisor override configs command ## Why ❔ In some cases we want to override some default configs. Right now it is done manually and we need proper automation for regular use/ci. --- .github/workflows/ci-core-reusable.yml | 4 +- .../{mainnet/general.yaml => mainnet.yaml} | 5 +- .../overrides/only_real_proofs.yaml | 3 + .../{testnet/general.yaml => testnet.yaml} | 3 +- .../overrides/tests/loadtest-new.yaml | 7 + .../overrides/tests/loadtest-old.yaml | 7 + etc/env/file_based/overrides/validium.yaml | 6 + zk_toolbox/Cargo.lock | 1 + zk_toolbox/crates/common/src/lib.rs | 1 + zk_toolbox/crates/common/src/yaml.rs | 475 ++++++++++++++++++ zk_toolbox/crates/config/Cargo.toml | 1 + zk_toolbox/crates/config/src/general.rs | 12 +- .../src/commands/chain/genesis.rs | 44 +- .../zk_inception/src/commands/update.rs | 332 +----------- zk_toolbox/crates/zk_inception/src/consts.rs | 4 + .../crates/zk_inception/src/messages.rs | 1 - .../src/commands/config_writer.rs | 35 ++ .../crates/zk_supervisor/src/commands/mod.rs | 1 + zk_toolbox/crates/zk_supervisor/src/main.rs | 16 +- .../crates/zk_supervisor/src/messages.rs | 10 + 20 files changed, 602 insertions(+), 366 deletions(-) rename etc/env/file_based/overrides/{mainnet/general.yaml => mainnet.yaml} (92%) create mode 100644 etc/env/file_based/overrides/only_real_proofs.yaml rename etc/env/file_based/overrides/{testnet/general.yaml => testnet.yaml} (95%) create mode 100644 etc/env/file_based/overrides/tests/loadtest-new.yaml create mode 100644 etc/env/file_based/overrides/tests/loadtest-old.yaml create mode 100644 etc/env/file_based/overrides/validium.yaml create mode 100644 zk_toolbox/crates/common/src/yaml.rs create mode 100644 zk_toolbox/crates/zk_supervisor/src/commands/config_writer.rs diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index 898085a36784..cac585a00a87 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -126,9 +126,7 @@ jobs: # `sleep 60` because we need to wait until server added all the tokens - name: Run server run: | - ci_run sed -i -e 's/mode: FULL/mode: LIGHTWEIGHT/' chains/legacy/configs/general.yaml - ci_run sed -i -e 's/state_keeper_fast_vm_mode:.*/state_keeper_fast_vm_mode: ${{ matrix.vm_mode }}/' chains/legacy/configs/general.yaml - ci_run sed -i -e 's/delay_interval:.*/delay_interval: 50/' chains/legacy/configs/general.yaml + ci_run zk_supervisor config-writer --path ${{ matrix.vm_mode == 'NEW' && 'etc/env/file_based/overrides/tests/loadtest-new.yaml' || 'etc/env/file_based/overrides/tests/loadtest-old.yaml' }} --chain legacy ci_run zk_inception server --uring --chain=legacy --components api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads &>server.log & ci_run sleep 60 diff --git a/etc/env/file_based/overrides/mainnet/general.yaml b/etc/env/file_based/overrides/mainnet.yaml similarity index 92% rename from etc/env/file_based/overrides/mainnet/general.yaml rename to etc/env/file_based/overrides/mainnet.yaml index 7abe8eb54725..0600abf694c2 100644 --- a/etc/env/file_based/overrides/mainnet/general.yaml +++ b/etc/env/file_based/overrides/mainnet.yaml @@ -10,12 +10,13 @@ eth: aggregated_block_prove_deadline: 300 aggregated_block_execute_deadline: 300 timestamp_criteria_max_allowed_lag: 104000 # 29h + wait_confirmations: null gas_adjuster: pricing_formula_parameter_a: 1.06 internal_l1_pricing_multiplier: 1 internal_pubdata_pricing_multiplier: 1.50 poll_period: 60 + watcher: + confirmations_for_eth_event: null observability: log_directives: zksync=info,zksync_state_keeper=debug,zksync_core=debug,zksync_server=debug,zksync_contract_verifier=debug,zksync_dal=debug,zksync_state=debug,zksync_utils=debug,zksync_eth_sender=debug,loadnext=debug,dev_ticker=info,vm=info,block_sizes_test=info,setup_key_generator_and_server=info,zksync_queued_job_processor=debug,slot_index_consistency_checker=debug,zksync_health_check=debug,zksync_consensus_bft=debug,zksync_consensus_network=debug,zksync_consensus_storage=debug,zksync_consensus_executor=debug, - -# remove eth_sender_wait_confirmations, eth_watcher_confirmations_for_eth_event variables diff --git a/etc/env/file_based/overrides/only_real_proofs.yaml b/etc/env/file_based/overrides/only_real_proofs.yaml new file mode 100644 index 000000000000..527474675116 --- /dev/null +++ b/etc/env/file_based/overrides/only_real_proofs.yaml @@ -0,0 +1,3 @@ +eth: + sender: + proof_sending_mode: ONLY_REAL_PROOFS diff --git a/etc/env/file_based/overrides/testnet/general.yaml b/etc/env/file_based/overrides/testnet.yaml similarity index 95% rename from etc/env/file_based/overrides/testnet/general.yaml rename to etc/env/file_based/overrides/testnet.yaml index 43a62f3f0dd8..e4da1ac96e26 100644 --- a/etc/env/file_based/overrides/testnet/general.yaml +++ b/etc/env/file_based/overrides/testnet.yaml @@ -10,6 +10,7 @@ eth: aggregated_block_prove_deadline: 300 aggregated_block_execute_deadline: 300 timestamp_criteria_max_allowed_lag: 104000 # 29h + wait_confirmations: null gas_adjuster: pricing_formula_parameter_a: 1.1 internal_l1_pricing_multiplier: 1 @@ -18,5 +19,3 @@ eth: confirmations_for_eth_event: 10 observability: log_directives: zksync=info,zksync_state_keeper=debug,zksync_core=debug,zksync_server=debug,zksync_contract_verifier=debug,zksync_dal=debug,zksync_state=debug,zksync_utils=debug,zksync_eth_sender=debug,loadnext=debug,dev_ticker=info,vm=info,block_sizes_test=info,setup_key_generator_and_server=info,zksync_queued_job_processor=debug,slot_index_consistency_checker=debug,zksync_health_check=debug,zksync_consensus_bft=debug,zksync_consensus_network=debug,zksync_consensus_storage=debug,zksync_consensus_executor=debug, - -# remove eth_sender_wait_confirmations variable diff --git a/etc/env/file_based/overrides/tests/loadtest-new.yaml b/etc/env/file_based/overrides/tests/loadtest-new.yaml new file mode 100644 index 000000000000..2167f7347e09 --- /dev/null +++ b/etc/env/file_based/overrides/tests/loadtest-new.yaml @@ -0,0 +1,7 @@ +db: + merkle_tree: + mode: LIGHTWEIGHT +experimental_vm: + state_keeper_fast_vm_mode: NEW +mempool: + delay_interval: 50 diff --git a/etc/env/file_based/overrides/tests/loadtest-old.yaml b/etc/env/file_based/overrides/tests/loadtest-old.yaml new file mode 100644 index 000000000000..a2d66d1cf4a7 --- /dev/null +++ b/etc/env/file_based/overrides/tests/loadtest-old.yaml @@ -0,0 +1,7 @@ +db: + merkle_tree: + mode: LIGHTWEIGHT +experimental_vm: + state_keeper_fast_vm_mode: OLD +mempool: + delay_interval: 50 diff --git a/etc/env/file_based/overrides/validium.yaml b/etc/env/file_based/overrides/validium.yaml new file mode 100644 index 000000000000..1af02dd95893 --- /dev/null +++ b/etc/env/file_based/overrides/validium.yaml @@ -0,0 +1,6 @@ +eth: + sender: + pubdata_sending_mode: CUSTOM +state_keeper: + pubdata_overhead_part: 0 + compute_overhead_part: 1 diff --git a/zk_toolbox/Cargo.lock b/zk_toolbox/Cargo.lock index 02da0311991a..fd524865d567 100644 --- a/zk_toolbox/Cargo.lock +++ b/zk_toolbox/Cargo.lock @@ -725,6 +725,7 @@ dependencies = [ "rand 0.8.5", "serde", "serde_json", + "serde_yaml", "strum", "thiserror", "types", diff --git a/zk_toolbox/crates/common/src/lib.rs b/zk_toolbox/crates/common/src/lib.rs index b4495d555ec0..c23ef9202261 100644 --- a/zk_toolbox/crates/common/src/lib.rs +++ b/zk_toolbox/crates/common/src/lib.rs @@ -14,6 +14,7 @@ pub mod git; pub mod server; pub mod version; pub mod wallets; +pub mod yaml; pub use prerequisites::{ check_general_prerequisites, check_prerequisites, GCLOUD_PREREQUISITE, GPU_PREREQUISITES, diff --git a/zk_toolbox/crates/common/src/yaml.rs b/zk_toolbox/crates/common/src/yaml.rs new file mode 100644 index 000000000000..83b59ad67642 --- /dev/null +++ b/zk_toolbox/crates/common/src/yaml.rs @@ -0,0 +1,475 @@ +use anyhow::Context; + +use crate::logger; + +pub(super) const MSG_INVALID_KEY_TYPE_ERR: &str = "Invalid key type"; + +/// Holds the differences between two YAML configurations. +#[derive(Default)] +pub struct ConfigDiff { + /// Fields that have different values between the two configurations + /// This contains the new values + pub differing_values: serde_yaml::Mapping, + + /// Fields that are present in the new configuration but not in the old one. + pub new_fields: serde_yaml::Mapping, +} + +impl ConfigDiff { + pub fn print(&self, msg: &str, is_warning: bool) { + if self.new_fields.is_empty() { + return; + } + + if is_warning { + logger::warn(msg); + logger::warn(logger::object_to_string(&self.new_fields)); + } else { + logger::info(msg); + logger::info(logger::object_to_string(&self.new_fields)); + } + } +} + +fn merge_yaml_internal( + a: &mut serde_yaml::Value, + b: serde_yaml::Value, + current_key: String, + diff: &mut ConfigDiff, + override_values: bool, +) -> anyhow::Result<()> { + match (a, b) { + (serde_yaml::Value::Mapping(a), serde_yaml::Value::Mapping(b)) => { + for (key, value) in b { + let k = key.as_str().context(MSG_INVALID_KEY_TYPE_ERR)?.to_string(); + let current_key = if current_key.is_empty() { + k.clone() + } else { + format!("{}.{}", current_key, k) + }; + + if a.contains_key(&key) { + let a_value = a.get_mut(&key).unwrap(); + if value.is_null() && override_values { + a.remove(&key); + diff.differing_values + .insert(current_key.into(), serde_yaml::Value::Null); + } else { + merge_yaml_internal(a_value, value, current_key, diff, override_values)?; + } + } else if !value.is_null() { + a.insert(key.clone(), value.clone()); + diff.new_fields.insert(current_key.into(), value); + } else if override_values { + diff.differing_values + .insert(current_key.into(), serde_yaml::Value::Null); + } + } + } + (a, b) => { + if a != &b { + diff.differing_values.insert(current_key.into(), b.clone()); + if override_values { + *a = b; + } + } + } + } + Ok(()) +} + +pub fn merge_yaml( + a: &mut serde_yaml::Value, + b: serde_yaml::Value, + override_values: bool, +) -> anyhow::Result { + let mut diff = ConfigDiff::default(); + merge_yaml_internal(a, b, "".into(), &mut diff, override_values)?; + Ok(diff) +} + +#[cfg(test)] +mod tests { + #[test] + fn test_merge_yaml_both_are_equal_returns_no_diff() { + let mut a = serde_yaml::from_str( + r#" + key1: value1 + key2: value2 + key3: + key4: value4 + "#, + ) + .unwrap(); + let b: serde_yaml::Value = serde_yaml::from_str( + r#" + key1: value1 + key2: value2 + key3: + key4: value4 + "#, + ) + .unwrap(); + let expected: serde_yaml::Value = serde_yaml::from_str( + r#" + key1: value1 + key2: value2 + key3: + key4: value4 + "#, + ) + .unwrap(); + let diff = super::merge_yaml(&mut a, b, false).unwrap(); + assert!(diff.differing_values.is_empty()); + assert!(diff.new_fields.is_empty()); + assert_eq!(a, expected); + } + + #[test] + fn test_merge_yaml_b_has_extra_field_returns_diff() { + let mut a = serde_yaml::from_str( + r#" + key1: value1 + key2: value2 + key3: + key4: value4 + "#, + ) + .unwrap(); + let b: serde_yaml::Value = serde_yaml::from_str( + r#" + key1: value1 + key2: value2 + key3: + key4: value4 + key5: value5 + "#, + ) + .unwrap(); + + let expected: serde_yaml::Value = serde_yaml::from_str( + r#" + key1: value1 + key2: value2 + key3: + key4: value4 + key5: value5 + "#, + ) + .unwrap(); + + let diff = super::merge_yaml(&mut a, b.clone(), false).unwrap(); + assert!(diff.differing_values.is_empty()); + assert_eq!(diff.new_fields.len(), 1); + assert_eq!( + diff.new_fields.get::("key5".into()).unwrap(), + b.clone().get("key5").unwrap() + ); + assert_eq!(a, expected); + } + + #[test] + fn test_merge_yaml_a_has_extra_field_no_diff() { + let mut a = serde_yaml::from_str( + r#" + key1: value1 + key2: value2 + key3: + key4: value4 + key5: value5 + "#, + ) + .unwrap(); + let b: serde_yaml::Value = serde_yaml::from_str( + r#" + key1: value1 + key2: value2 + key3: + key4: value4 + "#, + ) + .unwrap(); + + let expected: serde_yaml::Value = serde_yaml::from_str( + r#" + key1: value1 + key2: value2 + key3: + key4: value4 + key5: value5 + "#, + ) + .unwrap(); + + let diff = super::merge_yaml(&mut a, b, false).unwrap(); + assert!(diff.differing_values.is_empty()); + assert!(diff.new_fields.is_empty()); + assert_eq!(a, expected); + } + + #[test] + fn test_merge_yaml_a_has_extra_field_and_b_has_extra_field_returns_diff() { + let mut a = serde_yaml::from_str( + r#" + key1: value1 + key2: value2 + key3: + key4: value4 + key5: value5 + "#, + ) + .unwrap(); + let b: serde_yaml::Value = serde_yaml::from_str( + r#" + key1: value1 + key2: value2 + key3: + key4: value4 + key6: value6 + "#, + ) + .unwrap(); + + let expected: serde_yaml::Value = serde_yaml::from_str( + r#" + key1: value1 + key2: value2 + key3: + key4: value4 + key5: value5 + key6: value6 + "#, + ) + .unwrap(); + + let diff = super::merge_yaml(&mut a, b.clone(), false).unwrap(); + assert_eq!(diff.differing_values.len(), 0); + assert_eq!(diff.new_fields.len(), 1); + assert_eq!( + diff.new_fields.get::("key6".into()).unwrap(), + b.clone().get("key6").unwrap() + ); + assert_eq!(a, expected); + } + + #[test] + fn test_merge_yaml_a_has_different_value_returns_diff() { + let mut a = serde_yaml::from_str( + r#" + key1: value1 + key2: value2 + key3: + key4: value4 + "#, + ) + .unwrap(); + let b: serde_yaml::Value = serde_yaml::from_str( + r#" + key1: value1 + key2: value2 + key3: + key4: value5 + "#, + ) + .unwrap(); + + let expected: serde_yaml::Value = serde_yaml::from_str( + r#" + key1: value1 + key2: value2 + key3: + key4: value4 + "#, + ) + .unwrap(); + + let diff = super::merge_yaml(&mut a, b.clone(), false).unwrap(); + assert_eq!(diff.differing_values.len(), 1); + assert_eq!( + diff.differing_values + .get::("key3.key4".into()) + .unwrap(), + b.get("key3").unwrap().get("key4").unwrap() + ); + assert_eq!(a, expected); + } + + #[test] + fn test_merge_yaml_a_has_different_value_and_b_has_extra_field_returns_diff() { + let mut a = serde_yaml::from_str( + r#" + key1: value1 + key2: value2 + key3: + key4: value4 + "#, + ) + .unwrap(); + let b: serde_yaml::Value = serde_yaml::from_str( + r#" + key1: value1 + key2: value2 + key3: + key4: value5 + key5: value5 + "#, + ) + .unwrap(); + + let expected: serde_yaml::Value = serde_yaml::from_str( + r#" + key1: value1 + key2: value2 + key3: + key4: value4 + key5: value5 + "#, + ) + .unwrap(); + + let diff = super::merge_yaml(&mut a, b.clone(), false).unwrap(); + assert_eq!(diff.differing_values.len(), 1); + assert_eq!( + diff.differing_values + .get::("key3.key4".into()) + .unwrap(), + b.get("key3").unwrap().get("key4").unwrap() + ); + assert_eq!(diff.new_fields.len(), 1); + assert_eq!( + diff.new_fields.get::("key5".into()).unwrap(), + b.get("key5").unwrap() + ); + assert_eq!(a, expected); + } + + #[test] + fn test_merge_yaml_override_values() { + let mut a = serde_yaml::from_str( + r#" + key1: value1 + key2: value2 + key3: + key4: value4 + "#, + ) + .unwrap(); + let b: serde_yaml::Value = serde_yaml::from_str( + r#" + key1: value1 + key2: value2 + key3: + key4: value5 + "#, + ) + .unwrap(); + + let expected: serde_yaml::Value = serde_yaml::from_str( + r#" + key1: value1 + key2: value2 + key3: + key4: value5 + "#, + ) + .unwrap(); + + let diff = super::merge_yaml(&mut a, b.clone(), true).unwrap(); + assert_eq!(diff.differing_values.len(), 1); + assert_eq!( + diff.differing_values + .get::("key3.key4".into()) + .unwrap(), + b.get("key3").unwrap().get("key4").unwrap() + ); + assert_eq!(a, expected); + } + + #[test] + fn test_merge_yaml_override_values_with_extra_field() { + let mut a = serde_yaml::from_str( + r#" + key1: value1 + key2: value2 + key3: + key4: value4 + "#, + ) + .unwrap(); + let b: serde_yaml::Value = serde_yaml::from_str( + r#" + key1: value1 + key2: value2 + key3: + key4: value5 + key5: value5 + "#, + ) + .unwrap(); + + let expected: serde_yaml::Value = serde_yaml::from_str( + r#" + key1: value1 + key2: value2 + key3: + key4: value5 + key5: value5 + "#, + ) + .unwrap(); + + let diff = super::merge_yaml(&mut a, b.clone(), true).unwrap(); + assert_eq!(diff.differing_values.len(), 1); + assert_eq!( + diff.differing_values + .get::("key3.key4".into()) + .unwrap(), + b.get("key3").unwrap().get("key4").unwrap() + ); + assert_eq!(diff.new_fields.len(), 1); + assert_eq!( + diff.new_fields.get::("key5".into()).unwrap(), + b.get("key5").unwrap() + ); + assert_eq!(a, expected); + } + + #[test] + fn test_override_values_with_null() { + let mut a = serde_yaml::from_str( + r#" + key1: value1 + key2: value2 + key3: + key4: value4 + "#, + ) + .unwrap(); + let b: serde_yaml::Value = serde_yaml::from_str( + r#" + key1: value1 + key2: value2 + key3: null + "#, + ) + .unwrap(); + + let expected: serde_yaml::Value = serde_yaml::from_str( + r#" + key1: value1 + key2: value2 + "#, + ) + .unwrap(); + + let diff = super::merge_yaml(&mut a, b.clone(), true).unwrap(); + assert_eq!(diff.differing_values.len(), 1); + assert_eq!( + diff.differing_values + .get::("key3".into()) + .unwrap(), + b.get("key3").unwrap() + ); + assert_eq!(a, expected); + } +} diff --git a/zk_toolbox/crates/config/Cargo.toml b/zk_toolbox/crates/config/Cargo.toml index 5f1419c7ce97..9320beffef22 100644 --- a/zk_toolbox/crates/config/Cargo.toml +++ b/zk_toolbox/crates/config/Cargo.toml @@ -18,6 +18,7 @@ ethers.workspace = true rand.workspace = true serde.workspace = true serde_json.workspace = true +serde_yaml.workspace = true strum.workspace = true thiserror.workspace = true types.workspace = true diff --git a/zk_toolbox/crates/config/src/general.rs b/zk_toolbox/crates/config/src/general.rs index 6498beb0f532..87eb3a7eb19b 100644 --- a/zk_toolbox/crates/config/src/general.rs +++ b/zk_toolbox/crates/config/src/general.rs @@ -1,6 +1,7 @@ use std::path::{Path, PathBuf}; use anyhow::Context; +use common::yaml::merge_yaml; use url::Url; use xshell::Shell; pub use zksync_config::configs::GeneralConfig; @@ -10,7 +11,7 @@ use zksync_protobuf_config::{decode_yaml_repr, encode_yaml_repr}; use crate::{ consts::GENERAL_FILE, traits::{ConfigWithL2RpcUrl, FileConfigWithDefaultName, ReadConfig, SaveConfig}, - DEFAULT_CONSENSUS_PORT, + ChainConfig, DEFAULT_CONSENSUS_PORT, }; pub struct RocksDbs { @@ -174,6 +175,15 @@ pub fn update_ports(config: &mut GeneralConfig, ports_config: &PortsConfig) -> a Ok(()) } +pub fn override_config(shell: &Shell, path: PathBuf, chain: &ChainConfig) -> anyhow::Result<()> { + let chain_config_path = chain.path_to_general_config(); + let override_config = serde_yaml::from_str(&shell.read_file(path)?)?; + let mut chain_config = serde_yaml::from_str(&shell.read_file(chain_config_path.clone())?)?; + merge_yaml(&mut chain_config, override_config, true)?; + shell.write_file(chain_config_path, serde_yaml::to_string(&chain_config)?)?; + Ok(()) +} + fn update_port_in_url(http_url: &mut String, port: u16) -> anyhow::Result<()> { let mut http_url_url = Url::parse(http_url)?; if let Err(()) = http_url_url.set_port(Some(port)) { diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/genesis.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/genesis.rs index bfa3f94916b8..187af41489d9 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/genesis.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/genesis.rs @@ -9,7 +9,7 @@ use common::{ spinner::Spinner, }; use config::{ - set_databases, set_file_artifacts, set_rocks_db_config, + override_config, set_databases, set_file_artifacts, set_rocks_db_config, traits::{FileConfigWithDefaultName, SaveConfigWithBasePath}, ChainConfig, ContractsConfig, EcosystemConfig, FileArtifacts, GeneralConfig, GenesisConfig, SecretsConfig, WalletsConfig, @@ -17,12 +17,14 @@ use config::{ use types::ProverMode; use xshell::Shell; use zksync_basic_types::commitment::L1BatchCommitmentMode; -use zksync_config::configs::eth_sender::{ProofSendingMode, PubdataSendingMode}; use super::args::genesis::GenesisArgsFinal; use crate::{ commands::chain::args::genesis::GenesisArgs, - consts::{PROVER_MIGRATIONS, SERVER_MIGRATIONS}, + consts::{ + PATH_TO_ONLY_REAL_PROOFS_OVERRIDE_CONFIG, PATH_TO_VALIDIUM_OVERRIDE_CONFIG, + PROVER_MIGRATIONS, SERVER_MIGRATIONS, + }, messages::{ MSG_CHAIN_NOT_INITIALIZED, MSG_FAILED_TO_DROP_PROVER_DATABASE_ERR, MSG_FAILED_TO_DROP_SERVER_DATABASE_ERR, MSG_FAILED_TO_RUN_SERVER_ERR, @@ -55,41 +57,31 @@ pub async fn genesis( ) -> anyhow::Result<()> { shell.create_dir(&config.rocks_db_path)?; + let link_to_code = config.link_to_code.clone(); let rocks_db = recreate_rocksdb_dirs(shell, &config.rocks_db_path, RocksDBDirOption::Main) .context(MSG_RECREATE_ROCKS_DB_ERRROR)?; let mut general = config.get_general_config()?; let file_artifacts = FileArtifacts::new(config.artifacts.clone()); set_rocks_db_config(&mut general, rocks_db)?; set_file_artifacts(&mut general, file_artifacts); + general.save_with_base_path(shell, &config.configs)?; + if config.prover_version != ProverMode::NoProofs { - general - .eth - .as_mut() - .context("eth")? - .sender - .as_mut() - .context("sender")? - .proof_sending_mode = ProofSendingMode::OnlyRealProofs; + override_config( + shell, + link_to_code.join(PATH_TO_ONLY_REAL_PROOFS_OVERRIDE_CONFIG), + config, + )?; } if config.l1_batch_commit_data_generator_mode == L1BatchCommitmentMode::Validium { - general - .eth - .as_mut() - .context("eth")? - .sender - .as_mut() - .context("sender")? - .pubdata_sending_mode = PubdataSendingMode::Custom; - general - .state_keeper_config - .as_mut() - .context("state_keeper_config")? - .pubdata_overhead_part = 0.0; + override_config( + shell, + link_to_code.join(PATH_TO_VALIDIUM_OVERRIDE_CONFIG), + config, + )?; } - general.save_with_base_path(shell, &config.configs)?; - let mut secrets = config.get_secrets_config()?; set_databases(&mut secrets, &args.server_db, &args.prover_db)?; secrets.save_with_base_path(shell, &config.configs)?; diff --git a/zk_toolbox/crates/zk_inception/src/commands/update.rs b/zk_toolbox/crates/zk_inception/src/commands/update.rs index a05ecbe62e0f..c140c3a4e9c8 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/update.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/update.rs @@ -1,7 +1,11 @@ use std::path::Path; use anyhow::{Context, Ok}; -use common::{git, logger, spinner::Spinner}; +use common::{ + git, logger, + spinner::Spinner, + yaml::{merge_yaml, ConfigDiff}, +}; use config::{ ChainConfig, EcosystemConfig, CONTRACTS_FILE, EN_CONFIG_FILE, ERA_OBSERBAVILITY_DIR, GENERAL_FILE, GENESIS_FILE, SECRETS_FILE, @@ -12,38 +16,11 @@ use super::args::UpdateArgs; use crate::messages::{ msg_diff_contracts_config, msg_diff_genesis_config, msg_diff_secrets, msg_updating_chain, MSG_CHAIN_NOT_FOUND_ERR, MSG_DIFF_EN_CONFIG, MSG_DIFF_EN_GENERAL_CONFIG, - MSG_DIFF_GENERAL_CONFIG, MSG_INVALID_KEY_TYPE_ERR, MSG_PULLING_ZKSYNC_CODE_SPINNER, + MSG_DIFF_GENERAL_CONFIG, MSG_PULLING_ZKSYNC_CODE_SPINNER, MSG_UPDATING_ERA_OBSERVABILITY_SPINNER, MSG_UPDATING_SUBMODULES_SPINNER, MSG_UPDATING_ZKSYNC, MSG_ZKSYNC_UPDATED, }; -/// Holds the differences between two YAML configurations. -#[derive(Default)] -struct ConfigDiff { - /// Fields that have different values between the two configurations - /// This contains the new values - pub differing_values: serde_yaml::Mapping, - - /// Fields that are present in the new configuration but not in the old one. - pub new_fields: serde_yaml::Mapping, -} - -impl ConfigDiff { - fn print(&self, msg: &str, is_warning: bool) { - if self.new_fields.is_empty() { - return; - } - - if is_warning { - logger::warn(msg); - logger::warn(logger::object_to_string(&self.new_fields)); - } else { - logger::info(msg); - logger::info(logger::object_to_string(&self.new_fields)); - } - } -} - pub fn run(shell: &Shell, args: UpdateArgs) -> anyhow::Result<()> { logger::info(MSG_UPDATING_ZKSYNC); let ecosystem = EcosystemConfig::from_file(shell)?; @@ -127,7 +104,7 @@ fn update_config( ) -> anyhow::Result<()> { let original_config = serde_yaml::from_str(&shell.read_file(original_config_path)?)?; let mut chain_config = serde_yaml::from_str(&shell.read_file(chain_config_path)?)?; - let diff = merge_yaml(&mut chain_config, original_config)?; + let diff = merge_yaml(&mut chain_config, original_config, false)?; if save_config { save_updated_config(&shell, chain_config, chain_config_path, diff, msg)?; } else { @@ -202,298 +179,3 @@ fn update_chain( Ok(()) } - -fn merge_yaml_internal( - a: &mut serde_yaml::Value, - b: serde_yaml::Value, - current_key: String, - diff: &mut ConfigDiff, -) -> anyhow::Result<()> { - match (a, b) { - (serde_yaml::Value::Mapping(a), serde_yaml::Value::Mapping(b)) => { - for (key, value) in b { - let k = key.as_str().context(MSG_INVALID_KEY_TYPE_ERR)?.to_string(); - let current_key = if current_key.is_empty() { - k.clone() - } else { - format!("{}.{}", current_key, k) - }; - - if a.contains_key(&key) { - merge_yaml_internal(a.get_mut(&key).unwrap(), value, current_key, diff)?; - } else { - a.insert(key.clone(), value.clone()); - diff.new_fields.insert(current_key.into(), value); - } - } - } - (a, b) => { - if a != &b { - diff.differing_values.insert(current_key.into(), b); - } - } - } - Ok(()) -} - -fn merge_yaml(a: &mut serde_yaml::Value, b: serde_yaml::Value) -> anyhow::Result { - let mut diff = ConfigDiff::default(); - merge_yaml_internal(a, b, "".into(), &mut diff)?; - Ok(diff) -} - -#[cfg(test)] -mod tests { - #[test] - fn test_merge_yaml_both_are_equal_returns_no_diff() { - let mut a = serde_yaml::from_str( - r#" - key1: value1 - key2: value2 - key3: - key4: value4 - "#, - ) - .unwrap(); - let b: serde_yaml::Value = serde_yaml::from_str( - r#" - key1: value1 - key2: value2 - key3: - key4: value4 - "#, - ) - .unwrap(); - let expected: serde_yaml::Value = serde_yaml::from_str( - r#" - key1: value1 - key2: value2 - key3: - key4: value4 - "#, - ) - .unwrap(); - let diff = super::merge_yaml(&mut a, b).unwrap(); - assert!(diff.differing_values.is_empty()); - assert!(diff.new_fields.is_empty()); - assert_eq!(a, expected); - } - - #[test] - fn test_merge_yaml_b_has_extra_field_returns_diff() { - let mut a = serde_yaml::from_str( - r#" - key1: value1 - key2: value2 - key3: - key4: value4 - "#, - ) - .unwrap(); - let b: serde_yaml::Value = serde_yaml::from_str( - r#" - key1: value1 - key2: value2 - key3: - key4: value4 - key5: value5 - "#, - ) - .unwrap(); - - let expected: serde_yaml::Value = serde_yaml::from_str( - r#" - key1: value1 - key2: value2 - key3: - key4: value4 - key5: value5 - "#, - ) - .unwrap(); - - let diff = super::merge_yaml(&mut a, b.clone()).unwrap(); - assert!(diff.differing_values.is_empty()); - assert_eq!(diff.new_fields.len(), 1); - assert_eq!( - diff.new_fields.get::("key5".into()).unwrap(), - b.clone().get("key5").unwrap() - ); - assert_eq!(a, expected); - } - - #[test] - fn test_merge_yaml_a_has_extra_field_no_diff() { - let mut a = serde_yaml::from_str( - r#" - key1: value1 - key2: value2 - key3: - key4: value4 - key5: value5 - "#, - ) - .unwrap(); - let b: serde_yaml::Value = serde_yaml::from_str( - r#" - key1: value1 - key2: value2 - key3: - key4: value4 - "#, - ) - .unwrap(); - - let expected: serde_yaml::Value = serde_yaml::from_str( - r#" - key1: value1 - key2: value2 - key3: - key4: value4 - key5: value5 - "#, - ) - .unwrap(); - - let diff = super::merge_yaml(&mut a, b).unwrap(); - assert!(diff.differing_values.is_empty()); - assert!(diff.new_fields.is_empty()); - assert_eq!(a, expected); - } - - #[test] - fn test_merge_yaml_a_has_extra_field_and_b_has_extra_field_returns_diff() { - let mut a = serde_yaml::from_str( - r#" - key1: value1 - key2: value2 - key3: - key4: value4 - key5: value5 - "#, - ) - .unwrap(); - let b: serde_yaml::Value = serde_yaml::from_str( - r#" - key1: value1 - key2: value2 - key3: - key4: value4 - key6: value6 - "#, - ) - .unwrap(); - - let expected: serde_yaml::Value = serde_yaml::from_str( - r#" - key1: value1 - key2: value2 - key3: - key4: value4 - key5: value5 - key6: value6 - "#, - ) - .unwrap(); - - let diff = super::merge_yaml(&mut a, b.clone()).unwrap(); - assert_eq!(diff.differing_values.len(), 0); - assert_eq!(diff.new_fields.len(), 1); - assert_eq!( - diff.new_fields.get::("key6".into()).unwrap(), - b.clone().get("key6").unwrap() - ); - assert_eq!(a, expected); - } - - #[test] - fn test_merge_yaml_a_has_different_value_returns_diff() { - let mut a = serde_yaml::from_str( - r#" - key1: value1 - key2: value2 - key3: - key4: value4 - "#, - ) - .unwrap(); - let b: serde_yaml::Value = serde_yaml::from_str( - r#" - key1: value1 - key2: value2 - key3: - key4: value5 - "#, - ) - .unwrap(); - - let expected: serde_yaml::Value = serde_yaml::from_str( - r#" - key1: value1 - key2: value2 - key3: - key4: value4 - "#, - ) - .unwrap(); - - let diff = super::merge_yaml(&mut a, b.clone()).unwrap(); - assert_eq!(diff.differing_values.len(), 1); - assert_eq!( - diff.differing_values - .get::("key3.key4".into()) - .unwrap(), - b.get("key3").unwrap().get("key4").unwrap() - ); - assert_eq!(a, expected); - } - - #[test] - fn test_merge_yaml_a_has_different_value_and_b_has_extra_field_returns_diff() { - let mut a = serde_yaml::from_str( - r#" - key1: value1 - key2: value2 - key3: - key4: value4 - "#, - ) - .unwrap(); - let b: serde_yaml::Value = serde_yaml::from_str( - r#" - key1: value1 - key2: value2 - key3: - key4: value5 - key5: value5 - "#, - ) - .unwrap(); - - let expected: serde_yaml::Value = serde_yaml::from_str( - r#" - key1: value1 - key2: value2 - key3: - key4: value4 - key5: value5 - "#, - ) - .unwrap(); - - let diff = super::merge_yaml(&mut a, b.clone()).unwrap(); - assert_eq!(diff.differing_values.len(), 1); - assert_eq!( - diff.differing_values - .get::("key3.key4".into()) - .unwrap(), - b.get("key3").unwrap().get("key4").unwrap() - ); - assert_eq!(diff.new_fields.len(), 1); - assert_eq!( - diff.new_fields.get::("key5".into()).unwrap(), - b.get("key5").unwrap() - ); - assert_eq!(a, expected); - } -} diff --git a/zk_toolbox/crates/zk_inception/src/consts.rs b/zk_toolbox/crates/zk_inception/src/consts.rs index 87315dcd8186..9f81847e3336 100644 --- a/zk_toolbox/crates/zk_inception/src/consts.rs +++ b/zk_toolbox/crates/zk_inception/src/consts.rs @@ -54,3 +54,7 @@ pub const WITNESS_VECTOR_GENERATOR_BINARY_NAME: &str = "zksync_witness_vector_ge pub const PROVER_BINARY_NAME: &str = "zksync_prover_fri"; pub const COMPRESSOR_BINARY_NAME: &str = "zksync_proof_fri_compressor"; pub const PROVER_JOB_MONITOR_BINARY_NAME: &str = "zksync_prover_job_monitor"; + +pub const PATH_TO_ONLY_REAL_PROOFS_OVERRIDE_CONFIG: &str = + "etc/env/file_based/overrides/only_real_proofs.yaml"; +pub const PATH_TO_VALIDIUM_OVERRIDE_CONFIG: &str = "etc/env/file_based/overrides/validium.yaml"; diff --git a/zk_toolbox/crates/zk_inception/src/messages.rs b/zk_toolbox/crates/zk_inception/src/messages.rs index 5f398f4b8a3b..3bbac066dbb6 100644 --- a/zk_toolbox/crates/zk_inception/src/messages.rs +++ b/zk_toolbox/crates/zk_inception/src/messages.rs @@ -454,7 +454,6 @@ pub(super) const MSG_DIFF_EN_CONFIG: &str = "Added the following fields to the external node config:"; pub(super) const MSG_DIFF_EN_GENERAL_CONFIG: &str = "Added the following fields to the external node generalconfig:"; -pub(super) const MSG_INVALID_KEY_TYPE_ERR: &str = "Invalid key type"; pub(super) const MSG_UPDATING_ERA_OBSERVABILITY_SPINNER: &str = "Updating era observability..."; pub(super) fn msg_diff_genesis_config(chain: &str) -> String { diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/config_writer.rs b/zk_toolbox/crates/zk_supervisor/src/commands/config_writer.rs new file mode 100644 index 000000000000..3adecb36d069 --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/commands/config_writer.rs @@ -0,0 +1,35 @@ +use anyhow::Context; +use clap::Parser; +use common::{config::global_config, logger, Prompt}; +use config::{override_config, EcosystemConfig}; +use xshell::Shell; + +use crate::messages::{ + msg_overriding_config, MSG_CHAIN_NOT_FOUND_ERR, MSG_OVERRIDE_CONFIG_PATH_HELP, + MSG_OVERRIDE_SUCCESS, MSG_OVERRRIDE_CONFIG_PATH_PROMPT, +}; + +#[derive(Debug, Parser)] +pub struct ConfigWriterArgs { + #[clap(long, short, help = MSG_OVERRIDE_CONFIG_PATH_HELP)] + pub path: Option, +} + +impl ConfigWriterArgs { + pub fn get_config_path(self) -> String { + self.path + .unwrap_or_else(|| Prompt::new(MSG_OVERRRIDE_CONFIG_PATH_PROMPT).ask()) + } +} + +pub fn run(shell: &Shell, args: ConfigWriterArgs) -> anyhow::Result<()> { + let path = args.get_config_path().into(); + let ecosystem = EcosystemConfig::from_file(shell)?; + let chain = ecosystem + .load_chain(global_config().chain_name.clone()) + .context(MSG_CHAIN_NOT_FOUND_ERR)?; + logger::step(msg_overriding_config(chain.name.clone())); + override_config(shell, path, &chain)?; + logger::outro(MSG_OVERRIDE_SUCCESS); + Ok(()) +} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs b/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs index 1f3893e293ef..d3cb99f1e342 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs @@ -1,4 +1,5 @@ pub mod clean; +pub mod config_writer; pub mod contracts; pub mod database; pub mod fmt; diff --git a/zk_toolbox/crates/zk_supervisor/src/main.rs b/zk_toolbox/crates/zk_supervisor/src/main.rs index 3c34d0596569..242affd8a71b 100644 --- a/zk_toolbox/crates/zk_supervisor/src/main.rs +++ b/zk_toolbox/crates/zk_supervisor/src/main.rs @@ -1,7 +1,8 @@ use clap::{Parser, Subcommand}; use commands::{ - contracts::ContractsArgs, database::DatabaseCommands, lint::LintArgs, prover::ProverCommands, - send_transactions::args::SendTransactionsArgs, snapshot::SnapshotCommands, test::TestCommands, + config_writer::ConfigWriterArgs, contracts::ContractsArgs, database::DatabaseCommands, + lint::LintArgs, prover::ProverCommands, send_transactions::args::SendTransactionsArgs, + snapshot::SnapshotCommands, test::TestCommands, }; use common::{ check_general_prerequisites, @@ -12,10 +13,10 @@ use common::{ }; use config::EcosystemConfig; use messages::{ - msg_global_chain_does_not_exist, MSG_CONTRACTS_ABOUT, MSG_PROVER_VERSION_ABOUT, - MSG_SEND_TXNS_ABOUT, MSG_SUBCOMMAND_CLEAN, MSG_SUBCOMMAND_DATABASE_ABOUT, - MSG_SUBCOMMAND_FMT_ABOUT, MSG_SUBCOMMAND_LINT_ABOUT, MSG_SUBCOMMAND_SNAPSHOTS_CREATOR_ABOUT, - MSG_SUBCOMMAND_TESTS_ABOUT, + msg_global_chain_does_not_exist, MSG_CONFIG_WRITER_ABOUT, MSG_CONTRACTS_ABOUT, + MSG_PROVER_VERSION_ABOUT, MSG_SEND_TXNS_ABOUT, MSG_SUBCOMMAND_CLEAN, + MSG_SUBCOMMAND_DATABASE_ABOUT, MSG_SUBCOMMAND_FMT_ABOUT, MSG_SUBCOMMAND_LINT_ABOUT, + MSG_SUBCOMMAND_SNAPSHOTS_CREATOR_ABOUT, MSG_SUBCOMMAND_TESTS_ABOUT, }; use xshell::Shell; @@ -59,6 +60,8 @@ enum SupervisorSubcommands { Prover(ProverCommands), #[command(about = MSG_CONTRACTS_ABOUT)] Contracts(ContractsArgs), + #[command(about = MSG_CONFIG_WRITER_ABOUT, alias = "o")] + ConfigWriter(ConfigWriterArgs), #[command(about = MSG_SEND_TXNS_ABOUT)] SendTransactions(SendTransactionsArgs), } @@ -121,6 +124,7 @@ async fn run_subcommand(args: Supervisor, shell: &Shell) -> anyhow::Result<()> { SupervisorSubcommands::Fmt(args) => commands::fmt::run(shell.clone(), args).await?, SupervisorSubcommands::Prover(command) => commands::prover::run(shell, command).await?, SupervisorSubcommands::Contracts(args) => commands::contracts::run(shell, args)?, + SupervisorSubcommands::ConfigWriter(args) => commands::config_writer::run(shell, args)?, SupervisorSubcommands::SendTransactions(args) => { commands::send_transactions::run(shell, args).await? } diff --git a/zk_toolbox/crates/zk_supervisor/src/messages.rs b/zk_toolbox/crates/zk_supervisor/src/messages.rs index 72887e40a2ba..5f68630f7562 100644 --- a/zk_toolbox/crates/zk_supervisor/src/messages.rs +++ b/zk_toolbox/crates/zk_supervisor/src/messages.rs @@ -14,6 +14,7 @@ pub(super) const MSG_SUBCOMMAND_TESTS_ABOUT: &str = "Run tests"; pub(super) const MSG_SUBCOMMAND_CLEAN: &str = "Clean artifacts"; pub(super) const MSG_SUBCOMMAND_LINT_ABOUT: &str = "Lint code"; pub(super) const MSG_CONTRACTS_ABOUT: &str = "Build contracts"; +pub(super) const MSG_CONFIG_WRITER_ABOUT: &str = "Overwrite general config"; pub(super) const MSG_SUBCOMMAND_FMT_ABOUT: &str = "Format code"; @@ -203,6 +204,15 @@ pub(super) const MSG_WALLETS_TEST_SUCCESS: &str = "Wallets test success"; pub(super) const MSG_LOADTEST_ABOUT: &str = "Run loadtest"; +pub(super) const MSG_OVERRIDE_CONFIG_PATH_HELP: &str = "Path to the config file to override"; +pub(super) const MSG_OVERRRIDE_CONFIG_PATH_PROMPT: &str = + "Provide path to the config file to override"; +pub(super) const MSG_OVERRIDE_SUCCESS: &str = "Config was overridden successfully"; + +pub(super) fn msg_overriding_config(chain: String) -> String { + format!("Overriding general config for chain {}", chain) +} + // Send transactions related messages pub(super) const MSG_SEND_TXNS_ABOUT: &str = "Send transactions from file"; pub(super) const MSG_PROMPT_TRANSACTION_FILE: &str = "Path to transactions file"; From c6e5e1c8b38abc4c21455579576c617ed15521c1 Mon Sep 17 00:00:00 2001 From: perekopskiy <53865202+perekopskiy@users.noreply.github.com> Date: Tue, 17 Sep 2024 16:42:37 +0300 Subject: [PATCH 089/116] chore: update solc in tests (#2897) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Updates solc in tests ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- .github/workflows/ci-core-reusable.yml | 2 +- core/tests/ts-integration/hardhat.config.ts | 7 +++++-- .../ts-integration/tests/api/contract-verification.test.ts | 2 +- .../crates/zk_supervisor/src/commands/test/integration.rs | 2 +- 4 files changed, 8 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index cac585a00a87..06828de8d4b7 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -327,7 +327,7 @@ jobs: - name: Initialize Contract verifier run: | - ci_run zk_inception contract-verifier init --zksolc-version=v1.5.3 --zkvyper-version=v1.5.4 --solc-version=0.8.26 --vyper-version=v0.3.10 --era-vm-solc-version=0.8.18-1.0.1 --only --chain era + ci_run zk_inception contract-verifier init --zksolc-version=v1.5.3 --zkvyper-version=v1.5.4 --solc-version=0.8.26 --vyper-version=v0.3.10 --era-vm-solc-version=0.8.26-1.0.1 --only --chain era ci_run zk_inception contract-verifier run --chain era &> ${{ env.SERVER_LOGS_DIR }}/contract-verifier-rollup.log & - name: Run servers diff --git a/core/tests/ts-integration/hardhat.config.ts b/core/tests/ts-integration/hardhat.config.ts index 42ee6bacf7ad..a96a83ca3ee3 100644 --- a/core/tests/ts-integration/hardhat.config.ts +++ b/core/tests/ts-integration/hardhat.config.ts @@ -20,8 +20,11 @@ export default { } }, solidity: { - version: '0.8.18', - eraVersion: '1.0.1' + version: '0.8.26', + eraVersion: '1.0.1', + settings: { + evmVersion: 'cancun' + } }, vyper: { version: '0.3.10' diff --git a/core/tests/ts-integration/tests/api/contract-verification.test.ts b/core/tests/ts-integration/tests/api/contract-verification.test.ts index 77ab2e6ddfcb..8f8830ce7516 100644 --- a/core/tests/ts-integration/tests/api/contract-verification.test.ts +++ b/core/tests/ts-integration/tests/api/contract-verification.test.ts @@ -12,7 +12,7 @@ const DATE_REGEX = /\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d{6})?/; const ZKSOLC_VERSION = 'v1.5.3'; const SOLC_VERSION = '0.8.26'; -const ZK_VM_SOLC_VERSION = 'zkVM-0.8.18-1.0.1'; +const ZK_VM_SOLC_VERSION = 'zkVM-0.8.26-1.0.1'; const ZKVYPER_VERSION = 'v1.5.4'; const VYPER_VERSION = '0.3.10'; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/integration.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/integration.rs index fc3ba974118c..fb3e1436acc3 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/integration.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/integration.rs @@ -42,7 +42,7 @@ pub async fn run(shell: &Shell, args: IntegrationArgs) -> anyhow::Result<()> { let test_pattern = args.test_pattern; let mut command = cmd!( shell, - "yarn jest api/contract-verification.test.ts --forceExit --testTimeout 120000 -t {test_pattern...}" + "yarn jest --forceExit --testTimeout 120000 -t {test_pattern...}" ) .env("CHAIN_NAME", ecosystem_config.current_chain()) .env("MASTER_WALLET_PK", wallets.get_test_pk(&chain_config)?); From 3f1d1a26076d5bf2fd65c31876d2a32a3cbfff0c Mon Sep 17 00:00:00 2001 From: Artem Makhortov <13339874+artmakh@users.noreply.github.com> Date: Wed, 18 Sep 2024 01:05:44 +0700 Subject: [PATCH 090/116] feat(ci): Fully use sccache in CI tests (#2903) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Enable full usage of sccache in CI tests ## Why ❔ Speed up tests ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- .env | 0 .github/workflows/ci-common-reusable.yml | 5 ++- .github/workflows/ci-core-lint-reusable.yml | 5 ++- .github/workflows/ci-core-reusable.yml | 40 +++++++++++++-------- .github/workflows/ci-docs-reusable.yml | 6 +++- .github/workflows/ci-prover-reusable.yml | 10 ++++-- .github/workflows/vm-perf-comparison.yml | 4 +++ .github/workflows/vm-perf-to-prometheus.yml | 5 ++- 8 files changed, 54 insertions(+), 21 deletions(-) delete mode 100644 .env diff --git a/.env b/.env deleted file mode 100644 index e69de29bb2d1..000000000000 diff --git a/.github/workflows/ci-common-reusable.yml b/.github/workflows/ci-common-reusable.yml index d2f9e11348f0..2e5d36feebff 100644 --- a/.github/workflows/ci-common-reusable.yml +++ b/.github/workflows/ci-common-reusable.yml @@ -18,13 +18,16 @@ jobs: echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH echo IN_DOCKER=1 >> .env + echo "SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage" >> .env + echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env + echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env + echo "RUSTC_WRAPPER=sccache" >> .env - name: Start services run: | run_retried docker-compose -f ${RUNNER_COMPOSE_FILE} pull mkdir -p ./volumes/postgres docker-compose -f ${RUNNER_COMPOSE_FILE} up --build -d zk postgres - ci_run sccache --start-server - name: Init run: | diff --git a/.github/workflows/ci-core-lint-reusable.yml b/.github/workflows/ci-core-lint-reusable.yml index e46a67dd8af4..404f0966b405 100644 --- a/.github/workflows/ci-core-lint-reusable.yml +++ b/.github/workflows/ci-core-lint-reusable.yml @@ -15,13 +15,16 @@ jobs: echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH echo IN_DOCKER=1 >> .env + echo "SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage" >> .env + echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env + echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env + echo "RUSTC_WRAPPER=sccache" >> .env echo "prover_url=postgres://postgres:notsecurepassword@localhost:5432/zksync_local_prover" >> $GITHUB_ENV echo "core_url=postgres://postgres:notsecurepassword@localhost:5432/zksync_local" >> $GITHUB_ENV - name: Start services run: | ci_localnet_up - ci_run sccache --start-server - name: Build run: | diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index 06828de8d4b7..53ff64398291 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -26,6 +26,10 @@ jobs: echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH echo IN_DOCKER=1 >> .env + echo "SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage" >> .env + echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env + echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env + echo "RUSTC_WRAPPER=sccache" >> .env # TODO: Remove when we after upgrade of hardhat-plugins - name: pre-download compilers @@ -48,7 +52,6 @@ jobs: - name: Start services run: | ci_localnet_up - ci_run sccache --start-server - name: Init run: | @@ -84,6 +87,10 @@ jobs: echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH echo IN_DOCKER=1 >> .env + echo "SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage" >> .env + echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env + echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env + echo "RUSTC_WRAPPER=sccache" >> .env - name: Loadtest configuration run: | @@ -119,7 +126,7 @@ jobs: --set-as-default false \ --ignore-prerequisites \ --legacy-bridge - + ci_run zk_inception ecosystem init --dev --verbose ci_run zk_supervisor contracts --test-contracts @@ -157,12 +164,15 @@ jobs: echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH echo IN_DOCKER=1 >> .env + echo "SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage" >> .env + echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env + echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env + echo "RUSTC_WRAPPER=sccache" >> .env echo RUN_CONTRACT_VERIFICATION_TEST=true >> $GITHUB_ENV - name: Start services run: | ci_localnet_up - ci_run sccache --start-server - name: Build zk_toolbox run: ci_run bash -c "./bin/zkt" @@ -176,7 +186,7 @@ jobs: GENESIS_RECOVERY_LOGS_DIR=logs/genesis_recovery/ EXTERNAL_NODE_LOGS_DIR=logs/external_node REVERT_LOGS_DIR=logs/revert - + mkdir -p $SERVER_LOGS_DIR mkdir -p $INTEGRATION_TESTS_LOGS_DIR mkdir -p $INTEGRATION_TESTS_EN_LOGS_DIR @@ -184,7 +194,7 @@ jobs: mkdir -p $GENESIS_RECOVERY_LOGS_DIR mkdir -p $EXTERNAL_NODE_LOGS_DIR mkdir -p $REVERT_LOGS_DIR - + echo "SERVER_LOGS_DIR=$SERVER_LOGS_DIR" >> $GITHUB_ENV echo "INTEGRATION_TESTS_LOGS_DIR=$INTEGRATION_TESTS_LOGS_DIR" >> $GITHUB_ENV echo "INTEGRATION_TESTS_EN_LOGS_DIR=$INTEGRATION_TESTS_EN_LOGS_DIR" >> $GITHUB_ENV @@ -352,7 +362,7 @@ jobs: ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --chain custom_token &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/custom_token.log & PID3=$! - + ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --chain consensus &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/consensus.log & PID4=$! @@ -365,8 +375,8 @@ jobs: run: | ci_run zk_inception external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \ --db-name=zksync_en_localhost_era_rollup --l1-rpc-url=http://localhost:8545 --chain era - ci_run zk_inception external-node init --ignore-prerequisites --chain era - + ci_run zk_inception external-node init --ignore-prerequisites --chain era + ci_run zk_inception external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \ --db-name=zksync_en_localhost_era_validium1 --l1-rpc-url=http://localhost:8545 --chain validium ci_run zk_inception external-node init --ignore-prerequisites --chain validium @@ -374,7 +384,7 @@ jobs: ci_run zk_inception external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \ --db-name=zksync_en_localhost_era_custom_token --l1-rpc-url=http://localhost:8545 --chain custom_token ci_run zk_inception external-node init --ignore-prerequisites --chain custom_token - + ci_run zk_inception external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \ --db-name=zksync_en_localhost_era_consensus --l1-rpc-url=http://localhost:8545 --chain consensus ci_run zk_inception external-node init --ignore-prerequisites --chain consensus @@ -384,13 +394,13 @@ jobs: ci_run zk_supervisor test recovery --snapshot --no-deps --ignore-prerequisites --verbose --chain era &> ${{ env.SNAPSHOT_RECOVERY_LOGS_DIR }}/rollup.log & PID1=$! - + ci_run zk_supervisor test recovery --snapshot --no-deps --ignore-prerequisites --verbose --chain validium &> ${{ env.SNAPSHOT_RECOVERY_LOGS_DIR }}/validium.log & PID2=$! - + ci_run zk_supervisor test recovery --snapshot --no-deps --ignore-prerequisites --verbose --chain custom_token &> ${{ env.SNAPSHOT_RECOVERY_LOGS_DIR }}/custom_token.log & PID3=$! - + ci_run zk_supervisor test recovery --snapshot --no-deps --ignore-prerequisites --verbose --chain consensus &> ${{ env.SNAPSHOT_RECOVERY_LOGS_DIR }}/consensus.log & PID4=$! @@ -409,7 +419,7 @@ jobs: ci_run zk_supervisor test recovery --no-deps --no-kill --ignore-prerequisites --verbose --chain custom_token &> ${{ env.GENESIS_RECOVERY_LOGS_DIR }}/custom_token.log & PID3=$! - + ci_run zk_supervisor test recovery --no-deps --no-kill --ignore-prerequisites --verbose --chain consensus &> ${{ env.GENESIS_RECOVERY_LOGS_DIR }}/consensus.log & PID4=$! @@ -448,7 +458,7 @@ jobs: run: | ci_run killall -INT zksync_server || true ci_run killall -INT zksync_external_node || true - + ci_run zk_supervisor test revert --no-deps --external-node --no-kill --ignore-prerequisites --chain era &> ${{ env.REVERT_LOGS_DIR }}/rollup.log & PID1=$! @@ -460,7 +470,7 @@ jobs: ci_run zk_supervisor test revert --no-deps --external-node --no-kill --ignore-prerequisites --chain consensus &> ${{ env.REVERT_LOGS_DIR }}/consensus.log & PID4=$! - + wait $PID1 wait $PID2 wait $PID3 diff --git a/.github/workflows/ci-docs-reusable.yml b/.github/workflows/ci-docs-reusable.yml index 2b8eea15a827..5b1d5a9bcdfa 100644 --- a/.github/workflows/ci-docs-reusable.yml +++ b/.github/workflows/ci-docs-reusable.yml @@ -17,12 +17,16 @@ jobs: echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH echo IN_DOCKER=1 >> .env + echo "SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage" >> .env + echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env + echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env + echo "RUSTC_WRAPPER=sccache" >> .env - name: Start services run: | run_retried docker compose pull zk docker compose up -d zk - + - name: Build run: | ci_run ./bin/zkt diff --git a/.github/workflows/ci-prover-reusable.yml b/.github/workflows/ci-prover-reusable.yml index ac971dafac99..367a86c5f40f 100644 --- a/.github/workflows/ci-prover-reusable.yml +++ b/.github/workflows/ci-prover-reusable.yml @@ -17,6 +17,10 @@ jobs: echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH echo IN_DOCKER=1 >> .env + echo "SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage" >> .env + echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env + echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env + echo "RUSTC_WRAPPER=sccache" >> .env echo "prover_url=postgres://postgres:notsecurepassword@localhost:5432/zksync_local_prover" >> $GITHUB_ENV echo "core_url=postgres://postgres:notsecurepassword@localhost:5432/zksync_local" >> $GITHUB_ENV @@ -25,7 +29,6 @@ jobs: run_retried docker-compose -f ${RUNNER_COMPOSE_FILE} pull mkdir -p ./volumes/postgres docker-compose -f ${RUNNER_COMPOSE_FILE} up --build -d zk postgres - ci_run sccache --start-server - name: Init run: | @@ -50,13 +53,16 @@ jobs: echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH echo IN_DOCKER=1 >> .env + echo "SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage" >> .env + echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env + echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env + echo "RUSTC_WRAPPER=sccache" >> .env - name: Start services run: | run_retried docker-compose -f ${RUNNER_COMPOSE_FILE} pull mkdir -p ./volumes/postgres docker-compose -f ${RUNNER_COMPOSE_FILE} up --build -d zk postgres - ci_run sccache --start-server - name: Init run: | diff --git a/.github/workflows/vm-perf-comparison.yml b/.github/workflows/vm-perf-comparison.yml index cd6f490b3368..6e044287ad3d 100644 --- a/.github/workflows/vm-perf-comparison.yml +++ b/.github/workflows/vm-perf-comparison.yml @@ -35,6 +35,10 @@ jobs: touch .env echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH + echo "SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage" >> .env + echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env + echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env + echo "RUSTC_WRAPPER=sccache" >> .env - name: init run: | diff --git a/.github/workflows/vm-perf-to-prometheus.yml b/.github/workflows/vm-perf-to-prometheus.yml index d201949b3ebe..04eb23f6c346 100644 --- a/.github/workflows/vm-perf-to-prometheus.yml +++ b/.github/workflows/vm-perf-to-prometheus.yml @@ -22,7 +22,10 @@ jobs: - name: setup-env run: | echo BENCHMARK_PROMETHEUS_PUSHGATEWAY_URL=${{ secrets.BENCHMARK_PROMETHEUS_PUSHGATEWAY_URL }} >> .env - + echo "SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage" >> .env + echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env + echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env + echo "RUSTC_WRAPPER=sccache" >> .env echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH From dab1eb2316058b9565e05bdd0750a407e36ee342 Mon Sep 17 00:00:00 2001 From: Artem Makhortov <13339874+artmakh@users.noreply.github.com> Date: Wed, 18 Sep 2024 15:03:58 +0700 Subject: [PATCH 091/116] feat(ci): Deprecate use of zk tool in docker-build process (#2901) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Remove use of ZK tool in docker-build process ## Why ❔ Improve readability of CI, speeds up building process ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- .github/scripts/rate_limit_check.sh | 43 +++ .github/workflows/ci.yml | 10 +- .../new-build-contract-verifier-template.yml | 271 +++++++++++++++++ .github/workflows/new-build-core-template.yml | 287 ++++++++++++++++++ .../workflows/new-build-prover-template.yml | 198 ++++++++++++ .../new-build-witness-generator-template.yml | 133 ++++++++ docker/contract-verifier/Dockerfile | 15 +- docker/external-node/Dockerfile | 13 +- docker/proof-fri-gpu-compressor/Dockerfile | 20 +- docker/prover-fri-gateway/Dockerfile | 13 +- docker/prover-gpu-fri/Dockerfile | 21 +- docker/prover-job-monitor/Dockerfile | 13 +- docker/server-v2/Dockerfile | 13 +- docker/snapshots-creator/Dockerfile | 13 +- docker/verified-sources-fetcher/Dockerfile | 13 +- docker/witness-generator/Dockerfile | 11 + docker/witness-vector-generator/Dockerfile | 13 +- 17 files changed, 1084 insertions(+), 16 deletions(-) create mode 100755 .github/scripts/rate_limit_check.sh create mode 100644 .github/workflows/new-build-contract-verifier-template.yml create mode 100644 .github/workflows/new-build-core-template.yml create mode 100644 .github/workflows/new-build-prover-template.yml create mode 100644 .github/workflows/new-build-witness-generator-template.yml diff --git a/.github/scripts/rate_limit_check.sh b/.github/scripts/rate_limit_check.sh new file mode 100755 index 000000000000..6594c685d847 --- /dev/null +++ b/.github/scripts/rate_limit_check.sh @@ -0,0 +1,43 @@ +#!/bin/bash + +set -o errexit +set -o pipefail + + +api_endpoint="https://api.github.com/users/zksync-era-bot" +wait_time=60 +max_retries=60 +retry_count=0 + +while [[ $retry_count -lt $max_retries ]]; do + response=$(run_retried curl -s -w "%{http_code}" -o temp.json "$api_endpoint") + http_code=$(echo "$response" | tail -n1) + + if [[ "$http_code" == "200" ]]; then + echo "Request successful. Not rate-limited." + cat temp.json + rm temp.json + exit 0 + elif [[ "$http_code" == "403" ]]; then + rate_limit_exceeded=$(jq -r '.message' temp.json | grep -i "API rate limit exceeded") + if [[ -n "$rate_limit_exceeded" ]]; then + retry_count=$((retry_count+1)) + echo "API rate limit exceeded. Retry $retry_count of $max_retries. Retrying in $wait_time seconds..." + sleep $wait_time + else + echo "Request failed with HTTP status $http_code." + cat temp.json + rm temp.json + exit 1 + fi + else + echo "Request failed with HTTP status $http_code." + cat temp.json + rm temp.json + exit 1 + fi +done + +echo "Reached the maximum number of retries ($max_retries). Exiting." +rm temp.json +exit 1 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 291f9237ac52..0a27a719aeb6 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -111,7 +111,7 @@ jobs: name: Build core images needs: changed_files if: ${{ (needs.changed_files.outputs.core == 'true' || needs.changed_files.outputs.all == 'true') && !contains(github.ref_name, 'release-please--branches') }} - uses: ./.github/workflows/build-core-template.yml + uses: ./.github/workflows/new-build-core-template.yml with: image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }} action: "build" @@ -136,7 +136,7 @@ jobs: name: Build contract verifier needs: changed_files if: ${{ (needs.changed_files.outputs.core == 'true' || needs.changed_files.outputs.all == 'true') && !contains(github.ref_name, 'release-please--branches') }} - uses: ./.github/workflows/build-contract-verifier-template.yml + uses: ./.github/workflows/new-build-contract-verifier-template.yml with: image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }} action: "build" @@ -148,7 +148,7 @@ jobs: name: Build prover images needs: changed_files if: ${{ (needs.changed_files.outputs.prover == 'true' || needs.changed_files.outputs.all == 'true') && !contains(github.ref_name, 'release-please--branches') }} - uses: ./.github/workflows/build-prover-template.yml + uses: ./.github/workflows/new-build-prover-template.yml with: image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }} action: "build" @@ -162,12 +162,10 @@ jobs: name: Build prover images with avx512 instructions needs: changed_files if: ${{ (needs.changed_files.outputs.prover == 'true' || needs.changed_files.outputs.all == 'true') && !contains(github.ref_name, 'release-please--branches') }} - uses: ./.github/workflows/build-witness-generator-template.yml + uses: ./.github/workflows/new-build-witness-generator-template.yml with: image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }}-avx512 action: "build" - ERA_BELLMAN_CUDA_RELEASE: ${{ vars.ERA_BELLMAN_CUDA_RELEASE }} - is_pr_from_fork: ${{ github.event.pull_request.head.repo.fork == true }} WITNESS_GENERATOR_RUST_FLAGS: "-Ctarget_feature=+avx512bw,+avx512cd,+avx512dq,+avx512f,+avx512vl" secrets: DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} diff --git a/.github/workflows/new-build-contract-verifier-template.yml b/.github/workflows/new-build-contract-verifier-template.yml new file mode 100644 index 000000000000..42791eab6669 --- /dev/null +++ b/.github/workflows/new-build-contract-verifier-template.yml @@ -0,0 +1,271 @@ +name: Build contract verifier +on: + workflow_call: + secrets: + DOCKERHUB_USER: + description: "DOCKERHUB_USER" + required: true + DOCKERHUB_TOKEN: + description: "DOCKERHUB_TOKEN" + required: true + inputs: + image_tag_suffix: + description: "Optional suffix to override tag name generation" + type: string + required: false + compilers: + description: 'JSON of required compilers and their versions' + type: string + required: false + default: '[{ "zksolc": ["1.3.14", "1.3.16", "1.3.17", "1.3.1", "1.3.7", "1.3.18", "1.3.19", "1.3.21"] } , { "zkvyper": ["1.3.13"] }]' + action: + type: string + default: non-push + required: false + +jobs: + prepare-contracts: + name: Prepare contracts + runs-on: matterlabs-ci-runner-high-performance + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + submodules: "recursive" + + - name: Prepare ENV + shell: bash + run: | + echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV + echo CI=1 >> $GITHUB_ENV + echo $(pwd)/bin >> $GITHUB_PATH + echo CI=1 >> .env + echo IN_DOCKER=1 >> .env + + - name: Download contracts + shell: bash + run: | + commit_sha=$(git submodule status contracts | awk '{print $1}' | tr -d '-') + page=1 + filtered_tag="" + while [ true ]; do + echo "Page: $page" + tags=$(run_retried curl -s -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" -H "Accept: application/vnd.github+json" \ + "https://api.github.com/repos/matter-labs/era-contracts/tags?per_page=100&page=${page}" | jq .) + if [ $(jq length <<<"$tags") -eq 0 ]; then + echo "No tag found on all pages." + echo "BUILD_CONTRACTS=true" >> "$GITHUB_ENV" + exit 0 + fi + filtered_tag=$(jq -r --arg commit_sha "$commit_sha" 'map(select(.commit.sha == $commit_sha)) | .[].name' <<<"$tags") + if [[ ! -z "$filtered_tag" ]]; then + echo "BUILD_CONTRACTS=false" >> "$GITHUB_ENV" + break + fi + ((page++)) + done + echo "Contracts tag is: ${filtered_tag}" + mkdir -p ./contracts + run_retried curl -s -LO https://github.com/matter-labs/era-contracts/releases/download/${filtered_tag}/l1-contracts.tar.gz + run_retried curl -s -LO https://github.com/matter-labs/era-contracts/releases/download/${filtered_tag}/l2-contracts.tar.gz + run_retried curl -s -LO https://github.com/matter-labs/era-contracts/releases/download/${filtered_tag}/system-contracts.tar.gz + tar -C ./contracts -zxf l1-contracts.tar.gz + tar -C ./contracts -zxf l2-contracts.tar.gz + tar -C ./contracts -zxf system-contracts.tar.gz + + - name: Install Apt dependencies + shell: bash + run: | + sudo apt-get update && sudo apt-get install -y libssl-dev pkg-config + + - name: Install Node + uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v4.0.3 + with: + node-version: 20 + cache: 'npm' + + - name: Install Yarn + run: npm install -g yarn + + - name: Setup rust + uses: actions-rust-lang/setup-rust-toolchain@1fbea72663f6d4c03efaab13560c8a24cfd2a7cc # v1.9.0 + with: + toolchain: nightly-2024-08-01 + + - name: Install cargo-nextest from crates.io + uses: baptiste0928/cargo-install@904927dbe77864e0f2281519fe9d5bd097a220b3 # v3.1.1 + with: + crate: cargo-nextest + + - name: Install sqlx-cli from crates.io + uses: baptiste0928/cargo-install@904927dbe77864e0f2281519fe9d5bd097a220b3 # v3.1.1 + with: + crate: sqlx-cli + tag: 0.8.1 + + - name: Install Foundry + uses: foundry-rs/foundry-toolchain@8f1998e9878d786675189ef566a2e4bf24869773 # v1.2.0 + + - name: Pre-download compilers + shell: bash + run: | + # Download needed versions of vyper compiler + # Not sanitized due to unconventional path and tags + mkdir -p ./hardhat-nodejs/compilers-v2/vyper/linux + wget -nv -O ./hardhat-nodejs/compilers-v2/vyper/linux/0.3.10 https://github.com/vyperlang/vyper/releases/download/v0.3.10/vyper.0.3.10+commit.91361694.linux + wget -nv -O ./hardhat-nodejs/compilers-v2/vyper/linux/0.3.3 https://github.com/vyperlang/vyper/releases/download/v0.3.3/vyper.0.3.3+commit.48e326f0.linux + chmod +x ./hardhat-nodejs/compilers-v2/vyper/linux/0.3.10 + chmod +x ./hardhat-nodejs/compilers-v2/vyper/linux/0.3.3 + + COMPILERS_JSON='${{ inputs.compilers }}' + echo "$COMPILERS_JSON" | jq -r '.[] | to_entries[] | .key as $compiler | .value[] | "\(.),\($compiler)"' | while IFS=, read -r version compiler; do + mkdir -p "./hardhat-nodejs/compilers-v2/$compiler" + wget -nv -O "./hardhat-nodejs/compilers-v2/$compiler/${compiler}-v${version}" "https://github.com/matter-labs/${compiler}-bin/releases/download/v${version}/${compiler}-linux-amd64-musl-v${version}" + chmod +x "./hardhat-nodejs/compilers-v2/$compiler/${compiler}-v${version}" + done + + - name: init + shell: bash + run: | + mkdir -p ./volumes/postgres + docker compose up -d postgres + zkt || true + + - name: build contracts + shell: bash + run: | + cp etc/tokens/{test,localhost}.json + zk_supervisor contracts + + - name: Upload contracts + uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0 + with: + name: contacts-verifier + path: | + ./contracts + + build-images: + name: Build and Push Docker Images + needs: prepare-contracts + runs-on: ${{ fromJSON('["matterlabs-ci-runner-high-performance", "matterlabs-ci-runner-arm"]')[contains(matrix.platforms, 'arm')] }} + strategy: + matrix: + components: + - contract-verifier + - verified-sources-fetcher + platforms: + - linux/amd64 + + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + submodules: "recursive" + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 + + - name: Setup env + shell: bash + run: | + echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV + echo CI=1 >> $GITHUB_ENV + echo $(pwd)/bin >> $GITHUB_PATH + echo CI=1 >> .env + echo IN_DOCKER=1 >> .env + + - name: Download setup key + shell: bash + run: | + run_retried curl -LO https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2\^26.key + + - name: Set env vars + shell: bash + run: | + echo PLATFORM=$(echo ${{ matrix.platforms }} | tr '/' '-') >> $GITHUB_ENV + echo IMAGE_TAG_SHA=$(git rev-parse --short HEAD) >> $GITHUB_ENV + # Support for custom tag suffix + if [ -n "${{ inputs.image_tag_suffix }}" ]; then + echo IMAGE_TAG_SHA_TS="${{ inputs.image_tag_suffix }}" >> $GITHUB_ENV + else + echo IMAGE_TAG_SHA_TS=$(git rev-parse --short HEAD)-$(date +%s) >> $GITHUB_ENV + fi + + - name: Download contracts + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 + with: + name: contacts-verifier + path: | + ./contracts + + - name: login to Docker registries + if: ${{ inputs.action == 'push' }} + shell: bash + run: | + docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} + gcloud auth configure-docker us-docker.pkg.dev -q + + - name: Build and push + uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0 + with: + context: . + push: ${{ inputs.action == 'push' }} + file: docker/${{ matrix.components }}/Dockerfile + build-args: | + SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage + SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com + SCCACHE_GCS_RW_MODE=READ_WRITE + RUSTC_WRAPPER=sccache + tags: | + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:latest + matterlabs/${{ matrix.components }}:latest + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:latest2.0 + matterlabs/${{ matrix.components }}:latest2.0 + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA }} + matterlabs/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA }} + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA }} + matterlabs/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA }} + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }} + matterlabs/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }} + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }} + matterlabs/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }} + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }} + matterlabs/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }}-${{ env.PLATFORM }} + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }} + matterlabs/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }}-${{ env.PLATFORM }} + + create_manifest: + name: Create release manifest + runs-on: matterlabs-ci-runner + needs: build-images + if: ${{ inputs.action == 'push' }} + strategy: + matrix: + component: + - name: contract-verifier + platform: linux/amd64 + - name: verified-sources-fetcher + platform: linux/amd64 + env: + IMAGE_TAG_SUFFIX: ${{ inputs.image_tag_suffix }} + steps: + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 + + - name: login to Docker registries + run: | + docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} + gcloud auth configure-docker us-docker.pkg.dev -q + + - name: Create Docker manifest + run: | + docker_repositories=("matterlabs/${{ matrix.component.name }}" "us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component.name }}") + platforms=${{ matrix.component.platform }} + for repo in "${docker_repositories[@]}"; do + platform_tags="" + for platform in ${platforms//,/ }; do + platform=$(echo $platform | tr '/' '-') + platform_tags+=" --amend ${repo}:${IMAGE_TAG_SUFFIX}-${platform}" + done + for manifest in "${repo}:${IMAGE_TAG_SUFFIX}" "${repo}:2.0-${IMAGE_TAG_SUFFIX}" "${repo}:latest" "${repo}:latest2.0"; do + docker manifest create ${manifest} ${platform_tags} + docker manifest push ${manifest} + done + done diff --git a/.github/workflows/new-build-core-template.yml b/.github/workflows/new-build-core-template.yml new file mode 100644 index 000000000000..fba6a68b8eec --- /dev/null +++ b/.github/workflows/new-build-core-template.yml @@ -0,0 +1,287 @@ +name: Build Core images +on: + workflow_call: + secrets: + DOCKERHUB_USER: + description: "DOCKERHUB_USER" + required: true + DOCKERHUB_TOKEN: + description: "DOCKERHUB_TOKEN" + required: true + inputs: + image_tag_suffix: + description: "Optional suffix to override tag name generation" + type: string + required: false + compilers: + description: 'JSON of required compilers and their versions' + type: string + required: false + default: '[{ "zksolc": ["1.3.14", "1.3.16", "1.3.17", "1.3.1", "1.3.7", "1.3.18", "1.3.19", "1.3.21"] } , { "zkvyper": ["1.3.13"] }]' + en_alpha_release: + description: 'Flag that determins if EN release should be marked as alpha' + type: boolean + required: false + default: false + action: + type: string + required: false + default: "do nothing" + +jobs: + prepare-contracts: + name: Prepare contracts + runs-on: matterlabs-ci-runner-high-performance + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + submodules: "recursive" + + - name: Prepare ENV + shell: bash + run: | + echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV + echo CI=1 >> $GITHUB_ENV + echo $(pwd)/bin >> $GITHUB_PATH + echo CI=1 >> .env + echo IN_DOCKER=1 >> .env + + - name: Download contracts + shell: bash + run: | + commit_sha=$(git submodule status contracts | awk '{print $1}' | tr -d '-') + page=1 + filtered_tag="" + while [ true ]; do + echo "Page: $page" + tags=$(run_retried curl -s -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" -H "Accept: application/vnd.github+json" \ + "https://api.github.com/repos/matter-labs/era-contracts/tags?per_page=100&page=${page}" | jq .) + if [ $(jq length <<<"$tags") -eq 0 ]; then + echo "No tag found on all pages." + echo "BUILD_CONTRACTS=true" >> "$GITHUB_ENV" + exit 0 + fi + filtered_tag=$(jq -r --arg commit_sha "$commit_sha" 'map(select(.commit.sha == $commit_sha)) | .[].name' <<<"$tags") + if [[ ! -z "$filtered_tag" ]]; then + echo "BUILD_CONTRACTS=false" >> "$GITHUB_ENV" + break + fi + ((page++)) + done + echo "Contracts tag is: ${filtered_tag}" + mkdir -p ./contracts + run_retried curl -s -LO https://github.com/matter-labs/era-contracts/releases/download/${filtered_tag}/l1-contracts.tar.gz + run_retried curl -s -LO https://github.com/matter-labs/era-contracts/releases/download/${filtered_tag}/l2-contracts.tar.gz + run_retried curl -s -LO https://github.com/matter-labs/era-contracts/releases/download/${filtered_tag}/system-contracts.tar.gz + tar -C ./contracts -zxf l1-contracts.tar.gz + tar -C ./contracts -zxf l2-contracts.tar.gz + tar -C ./contracts -zxf system-contracts.tar.gz + + - name: Install Apt dependencies + shell: bash + run: | + sudo apt-get update && sudo apt-get install -y libssl-dev pkg-config + + - name: Install Node + uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v4.0.3 + with: + node-version: 20 + cache: 'npm' + + - name: Install Yarn + run: npm install -g yarn + + - name: Setup rust + uses: actions-rust-lang/setup-rust-toolchain@1fbea72663f6d4c03efaab13560c8a24cfd2a7cc # v1.9.0 + with: + toolchain: nightly-2024-08-01 + + - name: Install cargo-nextest from crates.io + uses: baptiste0928/cargo-install@904927dbe77864e0f2281519fe9d5bd097a220b3 # v3.1.1 + with: + crate: cargo-nextest + + - name: Install sqlx-cli from crates.io + uses: baptiste0928/cargo-install@904927dbe77864e0f2281519fe9d5bd097a220b3 # v3.1.1 + with: + crate: sqlx-cli + tag: 0.8.1 + + - name: Install Foundry + uses: foundry-rs/foundry-toolchain@8f1998e9878d786675189ef566a2e4bf24869773 # v1.2.0 + + - name: Pre-download compilers + shell: bash + run: | + # Download needed versions of vyper compiler + # Not sanitized due to unconventional path and tags + mkdir -p ./hardhat-nodejs/compilers-v2/vyper/linux + wget -nv -O ./hardhat-nodejs/compilers-v2/vyper/linux/0.3.10 https://github.com/vyperlang/vyper/releases/download/v0.3.10/vyper.0.3.10+commit.91361694.linux + wget -nv -O ./hardhat-nodejs/compilers-v2/vyper/linux/0.3.3 https://github.com/vyperlang/vyper/releases/download/v0.3.3/vyper.0.3.3+commit.48e326f0.linux + chmod +x ./hardhat-nodejs/compilers-v2/vyper/linux/0.3.10 + chmod +x ./hardhat-nodejs/compilers-v2/vyper/linux/0.3.3 + + COMPILERS_JSON='${{ inputs.compilers }}' + echo "$COMPILERS_JSON" | jq -r '.[] | to_entries[] | .key as $compiler | .value[] | "\(.),\($compiler)"' | while IFS=, read -r version compiler; do + mkdir -p "./hardhat-nodejs/compilers-v2/$compiler" + wget -nv -O "./hardhat-nodejs/compilers-v2/$compiler/${compiler}-v${version}" "https://github.com/matter-labs/${compiler}-bin/releases/download/v${version}/${compiler}-linux-amd64-musl-v${version}" + chmod +x "./hardhat-nodejs/compilers-v2/$compiler/${compiler}-v${version}" + done + + - name: init + shell: bash + run: | + mkdir -p ./volumes/postgres + docker compose up -d postgres + zkt || true + + - name: build contracts + shell: bash + run: | + cp etc/tokens/{test,localhost}.json + zk_supervisor contracts + + - name: Upload contracts + uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0 + with: + name: contacts + path: | + ./contracts + + build-images: + name: Build and Push Docker Images + needs: prepare-contracts + env: + IMAGE_TAG_SUFFIX: ${{ inputs.image_tag_suffix }}${{ (inputs.en_alpha_release && matrix.components == 'external-node') && '-alpha' || '' }} + runs-on: ${{ fromJSON('["matterlabs-ci-runner-high-performance", "matterlabs-ci-runner-arm"]')[contains(matrix.platforms, 'arm')] }} + strategy: + matrix: + components: + - server-v2 + - external-node + - snapshots-creator + platforms: + - linux/amd64 + include: + - components: external-node + platforms: linux/arm64 + + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + submodules: "recursive" + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 + + - name: Setup env + shell: bash + run: | + echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV + echo CI=1 >> $GITHUB_ENV + echo $(pwd)/bin >> $GITHUB_PATH + echo CI=1 >> .env + echo IN_DOCKER=1 >> .env + + - name: Download setup key + shell: bash + run: | + run_retried curl -LO https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2\^26.key + + - name: Set env vars + shell: bash + run: | + echo PLATFORM=$(echo ${{ matrix.platforms }} | tr '/' '-') >> $GITHUB_ENV + echo IMAGE_TAG_SHA=$(git rev-parse --short HEAD) >> $GITHUB_ENV + # Support for custom tag suffix + if [ -n "${{ inputs.image_tag_suffix }}" ]; then + echo IMAGE_TAG_SHA_TS="${{ inputs.image_tag_suffix }}" >> $GITHUB_ENV + else + echo IMAGE_TAG_SHA_TS=$(git rev-parse --short HEAD)-$(date +%s) >> $GITHUB_ENV + fi + + - name: Download contracts + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 + with: + name: contacts + path: | + ./contracts + + - name: login to Docker registries + if: ${{ inputs.action == 'push' }} + shell: bash + run: | + docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} + gcloud auth configure-docker us-docker.pkg.dev -q + + - name: Build and push + uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0 + with: + context: . + push: ${{ inputs.action == 'push' }} + file: docker/${{ matrix.components }}/Dockerfile + build-args: | + SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage + SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com + SCCACHE_GCS_RW_MODE=READ_WRITE + RUSTC_WRAPPER=sccache + tags: | + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:latest + matterlabs/${{ matrix.components }}:latest + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:latest2.0 + matterlabs/${{ matrix.components }}:latest2.0 + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA }} + matterlabs/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA }} + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA }} + matterlabs/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA }} + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }} + matterlabs/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }} + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }} + matterlabs/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }} + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }} + matterlabs/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }}-${{ env.PLATFORM }} + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }} + matterlabs/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }}-${{ env.PLATFORM }} + + create_manifest: + name: Create release manifest + runs-on: matterlabs-ci-runner + needs: build-images + if: ${{ inputs.action == 'push' }} + strategy: + matrix: + component: + - name: server-v2 + platform: linux/amd64 + - name: external-node + platform: linux/amd64,linux/arm64 + - name: snapshots-creator + platform: linux/amd64 + + env: + IMAGE_TAG_SUFFIX: ${{ inputs.image_tag_suffix }}${{ (inputs.en_alpha_release && matrix.component.name == 'external-node') && '-alpha' || '' }} + steps: + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 + + - name: login to Docker registries + shell: bash + run: | + docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} + gcloud auth configure-docker us-docker.pkg.dev -q + + - name: Create Docker manifest + shell: bash + run: | + docker_repositories=("matterlabs/${{ matrix.component.name }}" "us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component.name }}") + platforms=${{ matrix.component.platform }} + for repo in "${docker_repositories[@]}"; do + platform_tags="" + for platform in ${platforms//,/ }; do + platform=$(echo $platform | tr '/' '-') + platform_tags+=" --amend ${repo}:${IMAGE_TAG_SUFFIX}-${platform}" + done + for manifest in "${repo}:${IMAGE_TAG_SUFFIX}" "${repo}:2.0-${IMAGE_TAG_SUFFIX}" "${repo}:latest" "${repo}:latest2.0"; do + docker manifest create ${manifest} ${platform_tags} + docker manifest push ${manifest} + done + done diff --git a/.github/workflows/new-build-prover-template.yml b/.github/workflows/new-build-prover-template.yml new file mode 100644 index 000000000000..60c152213e60 --- /dev/null +++ b/.github/workflows/new-build-prover-template.yml @@ -0,0 +1,198 @@ +name: Build Prover images +on: + workflow_call: + secrets: + DOCKERHUB_USER: + description: "DOCKERHUB_USER" + required: true + DOCKERHUB_TOKEN: + description: "DOCKERHUB_TOKEN" + required: true + inputs: + ERA_BELLMAN_CUDA_RELEASE: + description: "ERA_BELLMAN_CUDA_RELEASE" + type: string + required: true + image_tag_suffix: + description: "Optional suffix to override tag name generation" + type: string + required: false + action: + description: "Action with docker image" + type: string + default: "push" + required: false + is_pr_from_fork: + description: "Indicates whether the workflow is invoked from a PR created from fork" + type: boolean + default: false + required: false + CUDA_ARCH: + description: "CUDA Arch to build" + type: string + default: "89" + required: false + outputs: + protocol_version: + description: "Protocol version of the binary" + value: ${{ jobs.get-protocol-version.outputs.protocol_version }} + +jobs: + get-protocol-version: + name: Get protocol version + runs-on: [ matterlabs-ci-runner-high-performance ] + outputs: + protocol_version: ${{ steps.protocolversion.outputs.protocol_version }} + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + submodules: "recursive" + + - name: setup-env + run: | + echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV + echo CI=1 >> $GITHUB_ENV + echo $(pwd)/bin >> $GITHUB_PATH + echo CI=1 >> .env + echo IN_DOCKER=1 >> .env + + - name: setup rust + uses: actions-rust-lang/setup-rust-toolchain@1fbea72663f6d4c03efaab13560c8a24cfd2a7cc # v1.9.0 + with: + toolchain: nightly-2024-08-01 + + - name: Prepare sccache-cache env vars + shell: bash + run: | + echo SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage >> $GITHUB_ENV + echo SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com >> $GITHUB_ENV + echo SCCACHE_ERROR_LOG=/tmp/sccache_log.txt >> $GITHUB_ENV + echo SCCACHE_GCS_RW_MODE=READ_WRITE >> $GITHUB_ENV + echo RUSTC_WRAPPER=sccache >> $GITHUB_ENV + + - name: protocol-version + id: protocolversion + # TODO: use -C flag, when it will become stable. + shell: bash + run: | + cd prover + cargo build --release --bin prover_version + PPV=$(target/release/prover_version) + echo Protocol version is ${PPV} + echo "protocol_version=${PPV}" >> $GITHUB_OUTPUT + + build-images: + name: Build and Push Docker Images + needs: get-protocol-version + env: + PROTOCOL_VERSION: ${{ needs.get-protocol-version.outputs.protocol_version }} + runs-on: [ matterlabs-ci-runner-high-performance ] + strategy: + matrix: + components: + - witness-generator + - prover-gpu-fri + - witness-vector-generator + - prover-fri-gateway + - prover-job-monitor + - proof-fri-gpu-compressor + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + submodules: "recursive" + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 + + - name: setup-env + run: | + echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV + echo CI=1 >> $GITHUB_ENV + echo $(pwd)/bin >> $GITHUB_PATH + echo CI=1 >> .env + echo IN_DOCKER=1 >> .env + + - name: Set env vars + shell: bash + run: | + # Support for custom tag suffix + if [ -n "${{ inputs.image_tag_suffix }}" ]; then + echo IMAGE_TAG_SHA_TS="${{ inputs.image_tag_suffix }}" >> $GITHUB_ENV + else + echo IMAGE_TAG_SHA_TS=$(git rev-parse --short HEAD)-$(date +%s) >> $GITHUB_ENV + fi + + - name: download CRS for GPU compressor + if: matrix.components == 'proof-fri-gpu-compressor' + run: | + run_retried curl -LO https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2\^24.key + + # We need to run this only when ERA_BELLMAN_CUDA_RELEASE is not available + # In our case it happens only when PR is created from fork + - name: Wait for runner IP to be not rate-limited against GH API + if: ( inputs.is_pr_from_fork == true && matrix.components == 'proof-fri-gpu-compressor' ) + run: ./.github/scripts/rate_limit_check.sh + + - name: Hack to set env vars inside docker container + shell: bash + run: | + sed -i '/^FROM matterlabs\/zksync-build-base:latest as builder/a ENV SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage\nENV SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com\nENV SCCACHE_GCS_RW_MODE=READ_WRITE\nENV RUSTC_WRAPPER=sccache' ./docker/${{ matrix.components }}/Dockerfile + #TODO: remove AS version =) + sed -i '/^FROM matterlabs\/zksync-build-base:latest AS builder/a ENV SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage\nENV SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com\nENV SCCACHE_GCS_RW_MODE=READ_WRITE\nENV RUSTC_WRAPPER=sccache' ./docker/${{ matrix.components }}/Dockerfile + cat ./docker/${{ matrix.components }}/Dockerfile + + - name: login to Docker registries + if: github.event_name != 'pull_request' && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/')) + shell: bash + run: | + docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} + gcloud auth configure-docker us-docker.pkg.dev -q + + - name: Build and push + uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0 + with: + context: . + push: ${{ inputs.action == 'push' }} + build-args: | + CUDA_ARCH=${{ inputs.CUDA_ARCH }} + SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage + SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com + SCCACHE_GCS_RW_MODE=READ_WRITE + RUSTC_WRAPPER=sccache + file: docker/${{ matrix.components }}/Dockerfile + tags: | + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.PROTOCOL_VERSION }}-${{ env.IMAGE_TAG_SHA_TS }} + matterlabs/${{ matrix.components }}:2.0-${{ env.PROTOCOL_VERSION }}-${{ env.IMAGE_TAG_SHA_TS }} + + copy-images: + name: Copy images between docker registries + needs: [ build-images, get-protocol-version ] + env: + PROTOCOL_VERSION: ${{ needs.get-protocol-version.outputs.protocol_version }} + runs-on: matterlabs-ci-runner + if: ${{ inputs.action == 'push' }} + strategy: + matrix: + component: + - witness-vector-generator + steps: + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 + + - name: Login to us-central1 GAR + run: | + gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://us-docker.pkg.dev + + - name: Login and push to Asia GAR + run: | + gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://asia-docker.pkg.dev + docker buildx imagetools create \ + --tag asia-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} \ + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} + + - name: Login and push to Europe GAR + run: | + gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://europe-docker.pkg.dev + docker buildx imagetools create \ + --tag europe-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} \ + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} diff --git a/.github/workflows/new-build-witness-generator-template.yml b/.github/workflows/new-build-witness-generator-template.yml new file mode 100644 index 000000000000..2f1fc0b2dd86 --- /dev/null +++ b/.github/workflows/new-build-witness-generator-template.yml @@ -0,0 +1,133 @@ +name: Build witness generator image with custom compiler flags +on: + workflow_call: + secrets: + DOCKERHUB_USER: + description: "DOCKERHUB_USER" + required: true + DOCKERHUB_TOKEN: + description: "DOCKERHUB_TOKEN" + required: true + inputs: + image_tag_suffix: + description: "Optional suffix to override tag name generation" + type: string + required: false + action: + type: string + default: non-push + required: false + CUDA_ARCH: + description: "CUDA Arch to build" + type: string + default: "89" + required: false + WITNESS_GENERATOR_RUST_FLAGS: + description: "Rust flags for witness_generator compilation" + type: string + default: "" + required: false + outputs: + protocol_version: + description: "Protocol version of the binary" + value: ${{ jobs.get-protocol-version.outputs.protocol_version }} + +jobs: + get-protocol-version: + name: Get protocol version + runs-on: [ matterlabs-ci-runner-high-performance ] + outputs: + protocol_version: ${{ steps.protocolversion.outputs.protocol_version }} + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + submodules: "recursive" + + - name: setup-env + run: | + echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV + echo CI=1 >> $GITHUB_ENV + echo $(pwd)/bin >> $GITHUB_PATH + echo CI=1 >> .env + echo IN_DOCKER=1 >> .env + + - name: setup rust + uses: actions-rust-lang/setup-rust-toolchain@1fbea72663f6d4c03efaab13560c8a24cfd2a7cc # v1.9.0 + with: + toolchain: nightly-2024-08-01 + + - name: Prepare sccache-cache env vars + shell: bash + run: | + echo SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage >> $GITHUB_ENV + echo SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com >> $GITHUB_ENV + echo SCCACHE_ERROR_LOG=/tmp/sccache_log.txt >> $GITHUB_ENV + echo SCCACHE_GCS_RW_MODE=READ_WRITE >> $GITHUB_ENV + echo RUSTC_WRAPPER=sccache >> $GITHUB_ENV + + - name: protocol-version + id: protocolversion + # TODO: use -C flag, when it will become stable. + shell: bash + run: | + cd prover + cargo build --release --bin prover_version + PPV=$(target/release/prover_version) + echo Protocol version is ${PPV} + echo "protocol_version=${PPV}" >> $GITHUB_OUTPUT + + build-images: + name: Build and Push Docker Images + needs: get-protocol-version + env: + PROTOCOL_VERSION: ${{ needs.get-protocol-version.outputs.protocol_version }} + runs-on: [ matterlabs-ci-runner-c3d ] + strategy: + matrix: + components: + - witness-generator + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + submodules: "recursive" + + - name: setup-env + run: | + echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV + echo CI=1 >> $GITHUB_ENV + echo $(pwd)/bin >> $GITHUB_PATH + echo CI=1 >> .env + echo IN_DOCKER=1 >> .env + + - name: Set env vars + shell: bash + run: | + # Support for custom tag suffix + if [ -n "${{ inputs.image_tag_suffix }}" ]; then + echo IMAGE_TAG_SHA_TS="${{ inputs.image_tag_suffix }}" >> $GITHUB_ENV + else + echo IMAGE_TAG_SHA_TS=$(git rev-parse --short HEAD)-$(date +%s) >> $GITHUB_ENV + fi + + - name: login to Docker registries + if: github.event_name != 'pull_request' && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/')) + shell: bash + run: | + docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} + gcloud auth configure-docker us-docker.pkg.dev -q + + - name: Build and push + uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0 + with: + context: . + push: ${{ inputs.action == 'push' }} + build-args: | + CUDA_ARCH=${{ inputs.CUDA_ARCH }} + SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage + SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com + SCCACHE_GCS_RW_MODE=READ_WRITE + RUSTC_WRAPPER=sccache + file: docker/${{ matrix.components }}/Dockerfile + tags: | + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.PROTOCOL_VERSION }}-${{ env.IMAGE_TAG_SHA_TS }} + matterlabs/${{ matrix.components }}:2.0-${{ env.PROTOCOL_VERSION }}-${{ env.IMAGE_TAG_SHA_TS }} diff --git a/docker/contract-verifier/Dockerfile b/docker/contract-verifier/Dockerfile index 7ed1906b8574..bc9a07c7d375 100644 --- a/docker/contract-verifier/Dockerfile +++ b/docker/contract-verifier/Dockerfile @@ -1,5 +1,18 @@ # syntax=docker/dockerfile:experimental -FROM matterlabs/zksync-build-base:latest as builder +FROM matterlabs/zksync-build-base:latest AS builder + +ARG CUDA_ARCH=89 +ENV CUDAARCHS=${CUDA_ARCH} +# set of args for use of sccache +ARG SCCACHE_GCS_BUCKET="" +ARG SCCACHE_GCS_SERVICE_ACCOUNT="" +ARG SCCACHE_GCS_RW_MODE="" +ARG RUSTC_WRAPPER="" + +ENV SCCACHE_GCS_BUCKET=${SCCACHE_GCS_BUCKET} +ENV SCCACHE_GCS_SERVICE_ACCOUNT=${SCCACHE_GCS_SERVICE_ACCOUNT} +ENV SCCACHE_GCS_RW_MODE=${SCCACHE_GCS_RW_MODE} +ENV RUSTC_WRAPPER=${RUSTC_WRAPPER} WORKDIR /usr/src/zksync COPY . . diff --git a/docker/external-node/Dockerfile b/docker/external-node/Dockerfile index 7d276941dc42..a12bd71bca39 100644 --- a/docker/external-node/Dockerfile +++ b/docker/external-node/Dockerfile @@ -1,6 +1,17 @@ # Will work locally only after prior contracts build -FROM matterlabs/zksync-build-base:latest as builder +FROM matterlabs/zksync-build-base:latest AS builder + +# set of args for use of sccache +ARG SCCACHE_GCS_BUCKET="" +ARG SCCACHE_GCS_SERVICE_ACCOUNT="" +ARG SCCACHE_GCS_RW_MODE="" +ARG RUSTC_WRAPPER="" + +ENV SCCACHE_GCS_BUCKET=${SCCACHE_GCS_BUCKET} +ENV SCCACHE_GCS_SERVICE_ACCOUNT=${SCCACHE_GCS_SERVICE_ACCOUNT} +ENV SCCACHE_GCS_RW_MODE=${SCCACHE_GCS_RW_MODE} +ENV RUSTC_WRAPPER=${RUSTC_WRAPPER} WORKDIR /usr/src/zksync COPY . . diff --git a/docker/proof-fri-gpu-compressor/Dockerfile b/docker/proof-fri-gpu-compressor/Dockerfile index 45f2ffa51b04..e744787c8259 100644 --- a/docker/proof-fri-gpu-compressor/Dockerfile +++ b/docker/proof-fri-gpu-compressor/Dockerfile @@ -1,10 +1,20 @@ # Will work locally only after prior universal setup key download -FROM nvidia/cuda:12.2.0-devel-ubuntu22.04 as builder +FROM nvidia/cuda:12.2.0-devel-ubuntu22.04 AS builder ARG DEBIAN_FRONTEND=noninteractive ARG CUDA_ARCH=89 ENV CUDAARCHS=${CUDA_ARCH} +# set of args for use of sccache +ARG SCCACHE_GCS_BUCKET="" +ARG SCCACHE_GCS_SERVICE_ACCOUNT="" +ARG SCCACHE_GCS_RW_MODE="" +ARG RUSTC_WRAPPER="" + +ENV SCCACHE_GCS_BUCKET=${SCCACHE_GCS_BUCKET} +ENV SCCACHE_GCS_SERVICE_ACCOUNT=${SCCACHE_GCS_SERVICE_ACCOUNT} +ENV SCCACHE_GCS_RW_MODE=${SCCACHE_GCS_RW_MODE} +ENV RUSTC_WRAPPER=${RUSTC_WRAPPER} RUN apt-get update && apt-get install -y curl clang openssl libssl-dev gcc g++ git \ pkg-config build-essential libclang-dev && \ @@ -22,6 +32,14 @@ RUN curl -Lo cmake-3.24.2-linux-x86_64.sh https://github.com/Kitware/CMake/relea chmod +x cmake-3.24.2-linux-x86_64.sh && \ ./cmake-3.24.2-linux-x86_64.sh --skip-license --prefix=/usr/local +# install sccache +RUN curl -Lo sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz https://github.com/mozilla/sccache/releases/download/v0.8.1/sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz && \ + tar -xzf sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz && \ + cp sccache-v0.8.1-x86_64-unknown-linux-musl/sccache /usr/local/sbin/ && \ + rm -rf sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz && \ + rm -rf sccache-v0.8.1-x86_64-unknown-linux-musl && \ + chmod +x /usr/local/sbin/sccache + WORKDIR /usr/src/zksync COPY . . diff --git a/docker/prover-fri-gateway/Dockerfile b/docker/prover-fri-gateway/Dockerfile index de59451fee8f..2ad8d346956c 100644 --- a/docker/prover-fri-gateway/Dockerfile +++ b/docker/prover-fri-gateway/Dockerfile @@ -1,7 +1,18 @@ -FROM matterlabs/zksync-build-base:latest as builder +FROM matterlabs/zksync-build-base:latest AS builder ARG DEBIAN_FRONTEND=noninteractive +# set of args for use of sccache +ARG SCCACHE_GCS_BUCKET="" +ARG SCCACHE_GCS_SERVICE_ACCOUNT="" +ARG SCCACHE_GCS_RW_MODE="" +ARG RUSTC_WRAPPER="" + +ENV SCCACHE_GCS_BUCKET=${SCCACHE_GCS_BUCKET} +ENV SCCACHE_GCS_SERVICE_ACCOUNT=${SCCACHE_GCS_SERVICE_ACCOUNT} +ENV SCCACHE_GCS_RW_MODE=${SCCACHE_GCS_RW_MODE} +ENV RUSTC_WRAPPER=${RUSTC_WRAPPER} + WORKDIR /usr/src/zksync COPY . . diff --git a/docker/prover-gpu-fri/Dockerfile b/docker/prover-gpu-fri/Dockerfile index ad3ff1ff7197..2a680a49c5de 100644 --- a/docker/prover-gpu-fri/Dockerfile +++ b/docker/prover-gpu-fri/Dockerfile @@ -1,10 +1,21 @@ -FROM nvidia/cuda:12.2.0-devel-ubuntu22.04 as builder +FROM nvidia/cuda:12.2.0-devel-ubuntu22.04 AS builder ARG DEBIAN_FRONTEND=noninteractive ARG CUDA_ARCH=89 ENV CUDAARCHS=${CUDA_ARCH} +# set of args for use of sccache +ARG SCCACHE_GCS_BUCKET="" +ARG SCCACHE_GCS_SERVICE_ACCOUNT="" +ARG SCCACHE_GCS_RW_MODE="" +ARG RUSTC_WRAPPER="" + +ENV SCCACHE_GCS_BUCKET=${SCCACHE_GCS_BUCKET} +ENV SCCACHE_GCS_SERVICE_ACCOUNT=${SCCACHE_GCS_SERVICE_ACCOUNT} +ENV SCCACHE_GCS_RW_MODE=${SCCACHE_GCS_RW_MODE} +ENV RUSTC_WRAPPER=${RUSTC_WRAPPER} + RUN apt-get update && apt-get install -y curl clang openssl libssl-dev gcc g++ \ pkg-config build-essential libclang-dev && \ rm -rf /var/lib/apt/lists/* @@ -21,6 +32,14 @@ RUN curl -Lo cmake-3.24.2-linux-x86_64.sh https://github.com/Kitware/CMake/relea chmod +x cmake-3.24.2-linux-x86_64.sh && \ ./cmake-3.24.2-linux-x86_64.sh --skip-license --prefix=/usr/local +# install sccache +RUN curl -Lo sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz https://github.com/mozilla/sccache/releases/download/v0.8.1/sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz && \ + tar -xzf sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz && \ + cp sccache-v0.8.1-x86_64-unknown-linux-musl/sccache /usr/local/sbin/ && \ + rm -rf sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz && \ + rm -rf sccache-v0.8.1-x86_64-unknown-linux-musl && \ + chmod +x /usr/local/sbin/sccache + WORKDIR /usr/src/zksync COPY . . diff --git a/docker/prover-job-monitor/Dockerfile b/docker/prover-job-monitor/Dockerfile index 25d5dcd3af95..b15379d06621 100644 --- a/docker/prover-job-monitor/Dockerfile +++ b/docker/prover-job-monitor/Dockerfile @@ -1,7 +1,18 @@ -FROM matterlabs/zksync-build-base:latest as builder +FROM matterlabs/zksync-build-base:latest AS builder ARG DEBIAN_FRONTEND=noninteractive +# set of args for use of sccache +ARG SCCACHE_GCS_BUCKET="" +ARG SCCACHE_GCS_SERVICE_ACCOUNT="" +ARG SCCACHE_GCS_RW_MODE="" +ARG RUSTC_WRAPPER="" + +ENV SCCACHE_GCS_BUCKET=${SCCACHE_GCS_BUCKET} +ENV SCCACHE_GCS_SERVICE_ACCOUNT=${SCCACHE_GCS_SERVICE_ACCOUNT} +ENV SCCACHE_GCS_RW_MODE=${SCCACHE_GCS_RW_MODE} +ENV RUSTC_WRAPPER=${RUSTC_WRAPPER} + WORKDIR /usr/src/zksync COPY . . diff --git a/docker/server-v2/Dockerfile b/docker/server-v2/Dockerfile index 07611a1d7b4d..e7b036274bda 100644 --- a/docker/server-v2/Dockerfile +++ b/docker/server-v2/Dockerfile @@ -1,6 +1,17 @@ # Will work locally only after prior contracts build # syntax=docker/dockerfile:experimental -FROM matterlabs/zksync-build-base:latest as builder +FROM matterlabs/zksync-build-base:latest AS builder + +# set of args for use of sccache +ARG SCCACHE_GCS_BUCKET="" +ARG SCCACHE_GCS_SERVICE_ACCOUNT="" +ARG SCCACHE_GCS_RW_MODE="" +ARG RUSTC_WRAPPER="" + +ENV SCCACHE_GCS_BUCKET=${SCCACHE_GCS_BUCKET} +ENV SCCACHE_GCS_SERVICE_ACCOUNT=${SCCACHE_GCS_SERVICE_ACCOUNT} +ENV SCCACHE_GCS_RW_MODE=${SCCACHE_GCS_RW_MODE} +ENV RUSTC_WRAPPER=${RUSTC_WRAPPER} WORKDIR /usr/src/zksync diff --git a/docker/snapshots-creator/Dockerfile b/docker/snapshots-creator/Dockerfile index 10eef06dfbbc..ee31c5c42d48 100644 --- a/docker/snapshots-creator/Dockerfile +++ b/docker/snapshots-creator/Dockerfile @@ -1,5 +1,16 @@ # syntax=docker/dockerfile:experimental -FROM matterlabs/zksync-build-base:latest as builder +FROM matterlabs/zksync-build-base:latest AS builder + +# set of args for use of sccache +ARG SCCACHE_GCS_BUCKET="" +ARG SCCACHE_GCS_SERVICE_ACCOUNT="" +ARG SCCACHE_GCS_RW_MODE="" +ARG RUSTC_WRAPPER="" + +ENV SCCACHE_GCS_BUCKET=${SCCACHE_GCS_BUCKET} +ENV SCCACHE_GCS_SERVICE_ACCOUNT=${SCCACHE_GCS_SERVICE_ACCOUNT} +ENV SCCACHE_GCS_RW_MODE=${SCCACHE_GCS_RW_MODE} +ENV RUSTC_WRAPPER=${RUSTC_WRAPPER} WORKDIR /usr/src/zksync COPY . . diff --git a/docker/verified-sources-fetcher/Dockerfile b/docker/verified-sources-fetcher/Dockerfile index 972f85d0faf5..faf36f27f5b0 100644 --- a/docker/verified-sources-fetcher/Dockerfile +++ b/docker/verified-sources-fetcher/Dockerfile @@ -1,7 +1,18 @@ -FROM matterlabs/zksync-build-base:latest as builder +FROM matterlabs/zksync-build-base:latest AS builder ARG DEBIAN_FRONTEND=noninteractive +# set of args for use of sccache +ARG SCCACHE_GCS_BUCKET="" +ARG SCCACHE_GCS_SERVICE_ACCOUNT="" +ARG SCCACHE_GCS_RW_MODE="" +ARG RUSTC_WRAPPER="" + +ENV SCCACHE_GCS_BUCKET=${SCCACHE_GCS_BUCKET} +ENV SCCACHE_GCS_SERVICE_ACCOUNT=${SCCACHE_GCS_SERVICE_ACCOUNT} +ENV SCCACHE_GCS_RW_MODE=${SCCACHE_GCS_RW_MODE} +ENV RUSTC_WRAPPER=${RUSTC_WRAPPER} + WORKDIR /usr/src/zksync COPY . . diff --git a/docker/witness-generator/Dockerfile b/docker/witness-generator/Dockerfile index 2eebe07515e4..5c5b2429aa80 100644 --- a/docker/witness-generator/Dockerfile +++ b/docker/witness-generator/Dockerfile @@ -4,6 +4,17 @@ ARG DEBIAN_FRONTEND=noninteractive ARG RUST_FLAGS="" ENV RUSTFLAGS=${RUST_FLAGS} +# set of args for use of sccache +ARG SCCACHE_GCS_BUCKET="" +ARG SCCACHE_GCS_SERVICE_ACCOUNT="" +ARG SCCACHE_GCS_RW_MODE="" +ARG RUSTC_WRAPPER="" + +ENV SCCACHE_GCS_BUCKET=${SCCACHE_GCS_BUCKET} +ENV SCCACHE_GCS_SERVICE_ACCOUNT=${SCCACHE_GCS_SERVICE_ACCOUNT} +ENV SCCACHE_GCS_RW_MODE=${SCCACHE_GCS_RW_MODE} +ENV RUSTC_WRAPPER=${RUSTC_WRAPPER} + WORKDIR /usr/src/zksync COPY . . diff --git a/docker/witness-vector-generator/Dockerfile b/docker/witness-vector-generator/Dockerfile index e315f670101a..cfcc8be7efaf 100644 --- a/docker/witness-vector-generator/Dockerfile +++ b/docker/witness-vector-generator/Dockerfile @@ -1,7 +1,18 @@ -FROM matterlabs/zksync-build-base:latest as builder +FROM matterlabs/zksync-build-base:latest AS builder ARG DEBIAN_FRONTEND=noninteractive +# set of args for use of sccache +ARG SCCACHE_GCS_BUCKET="" +ARG SCCACHE_GCS_SERVICE_ACCOUNT="" +ARG SCCACHE_GCS_RW_MODE="" +ARG RUSTC_WRAPPER="" + +ENV SCCACHE_GCS_BUCKET=${SCCACHE_GCS_BUCKET} +ENV SCCACHE_GCS_SERVICE_ACCOUNT=${SCCACHE_GCS_SERVICE_ACCOUNT} +ENV SCCACHE_GCS_RW_MODE=${SCCACHE_GCS_RW_MODE} +ENV RUSTC_WRAPPER=${RUSTC_WRAPPER} + WORKDIR /usr/src/zksync COPY . . From 20442f65a1e23ac92298e6235ccc3e4c987abc00 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Wed, 18 Sep 2024 11:39:13 +0300 Subject: [PATCH 092/116] test: Investigate L1 "nonce too low" errors (#2848) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ - Resolves "nonce too low" errors on L1 by retrying transaction requests. This is a temporary measure until a new version of `reth` is released. - Reduces spammy logs for integration tests. Enables verbose output for integration tests in CI. - Fixes some other issues with tests (e.g., a data race in the snapshot recovery test). ## Why ❔ Makes CI more stable. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- .../external_node/src/config/observability.rs | 5 +- core/bin/external_node/src/config/tests.rs | 5 +- core/bin/external_node/src/tests/mod.rs | 8 +- core/lib/vlog/src/lib.rs | 18 ++-- core/tests/recovery-test/src/index.ts | 14 ++-- core/tests/ts-integration/jest.config.json | 1 + core/tests/ts-integration/package.json | 1 + .../ts-integration/src/jest-setup/env.ts | 14 ++++ .../src/jest-setup/global-setup.ts | 3 +- core/tests/ts-integration/src/l1-provider.ts | 82 +++++++++++++++++++ .../src/matchers/transaction.ts | 5 +- core/tests/ts-integration/src/reporter.ts | 2 +- .../ts-integration/src/retry-provider.ts | 61 ++++++++++++-- core/tests/ts-integration/src/test-master.ts | 13 +-- .../src/transaction-response.ts | 9 ++ core/tests/ts-integration/typings/jest.d.ts | 2 + docker-compose-cpu-runner.yml | 2 +- docker-compose-gpu-runner-cuda-12-0.yml | 2 +- docker-compose-gpu-runner.yml | 2 +- docker-compose.yml | 2 +- etc/env/base/rust.toml | 2 +- etc/env/configs/ext-node.toml | 2 +- etc/env/file_based/general.yaml | 2 +- 23 files changed, 215 insertions(+), 42 deletions(-) create mode 100644 core/tests/ts-integration/src/jest-setup/env.ts create mode 100644 core/tests/ts-integration/src/l1-provider.ts create mode 100644 core/tests/ts-integration/src/transaction-response.ts diff --git a/core/bin/external_node/src/config/observability.rs b/core/bin/external_node/src/config/observability.rs index 0dd83f3bd35b..91b721bf77c9 100644 --- a/core/bin/external_node/src/config/observability.rs +++ b/core/bin/external_node/src/config/observability.rs @@ -95,11 +95,10 @@ impl ObservabilityENConfig { ) }) .transpose()?; - let guard = zksync_vlog::ObservabilityBuilder::new() + zksync_vlog::ObservabilityBuilder::new() .with_logs(Some(logs)) .with_sentry(sentry) - .build(); - Ok(guard) + .try_build() } pub(crate) fn from_configs(general_config: &GeneralConfig) -> anyhow::Result { diff --git a/core/bin/external_node/src/config/tests.rs b/core/bin/external_node/src/config/tests.rs index 43210a765723..a32be3eff725 100644 --- a/core/bin/external_node/src/config/tests.rs +++ b/core/bin/external_node/src/config/tests.rs @@ -63,7 +63,10 @@ fn parsing_observability_config() { fn using_unset_sentry_url() { let env_vars = MockEnvironment::new(&[("MISC_SENTRY_URL", "unset")]); let config = ObservabilityENConfig::new(&env_vars).unwrap(); - config.build_observability().unwrap(); + if let Err(err) = config.build_observability() { + // Global tracer may be installed by another test, but the logic shouldn't fail before that. + assert!(format!("{err:?}").contains("global tracer"), "{err:?}"); + } } #[test] diff --git a/core/bin/external_node/src/tests/mod.rs b/core/bin/external_node/src/tests/mod.rs index 5e9e7b3eeb38..b21dbd0db9a3 100644 --- a/core/bin/external_node/src/tests/mod.rs +++ b/core/bin/external_node/src/tests/mod.rs @@ -21,7 +21,7 @@ const POLL_INTERVAL: Duration = Duration::from_millis(100); #[tokio::test] #[tracing::instrument] // Add args to the test logs async fn external_node_basics(components_str: &'static str) { - let _guard = zksync_vlog::ObservabilityBuilder::new().build(); // Enable logging to simplify debugging + let _guard = zksync_vlog::ObservabilityBuilder::new().try_build().ok(); // Enable logging to simplify debugging let (env, env_handles) = utils::TestEnvironment::with_genesis_block(components_str).await; @@ -92,7 +92,7 @@ async fn external_node_basics(components_str: &'static str) { #[tokio::test] async fn node_reacts_to_stop_signal_during_initial_reorg_detection() { - let _guard = zksync_vlog::ObservabilityBuilder::new().build(); // Enable logging to simplify debugging + let _guard = zksync_vlog::ObservabilityBuilder::new().try_build().ok(); // Enable logging to simplify debugging let (env, env_handles) = utils::TestEnvironment::with_genesis_block("core").await; let l2_client = utils::mock_l2_client_hanging(); @@ -128,7 +128,7 @@ async fn node_reacts_to_stop_signal_during_initial_reorg_detection() { #[tokio::test] async fn running_tree_without_core_is_not_allowed() { - let _guard = zksync_vlog::ObservabilityBuilder::new().build(); // Enable logging to simplify debugging + let _guard = zksync_vlog::ObservabilityBuilder::new().try_build().ok(); // Enable logging to simplify debugging let (env, _env_handles) = utils::TestEnvironment::with_genesis_block("tree").await; let l2_client = utils::mock_l2_client(&env); @@ -165,7 +165,7 @@ async fn running_tree_without_core_is_not_allowed() { #[tokio::test] async fn running_tree_api_without_tree_is_not_allowed() { - let _guard = zksync_vlog::ObservabilityBuilder::new().build(); // Enable logging to simplify debugging + let _guard = zksync_vlog::ObservabilityBuilder::new().try_build().ok(); // Enable logging to simplify debugging let (env, _env_handles) = utils::TestEnvironment::with_genesis_block("core,tree_api").await; let l2_client = utils::mock_l2_client(&env); diff --git a/core/lib/vlog/src/lib.rs b/core/lib/vlog/src/lib.rs index 268fbd0b39eb..598d17879b84 100644 --- a/core/lib/vlog/src/lib.rs +++ b/core/lib/vlog/src/lib.rs @@ -4,6 +4,7 @@ use std::time::Duration; use ::sentry::ClientInitGuard; +use anyhow::Context as _; use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; pub use crate::{logs::Logs, opentelemetry::OpenTelemetry, sentry::Sentry}; @@ -126,8 +127,9 @@ impl ObservabilityBuilder { self } - /// Initializes the observability subsystem. - pub fn build(self) -> ObservabilityGuard { + /// Tries to initialize the observability subsystem. Returns an error if it's already initialized. + /// This is mostly useful in tests. + pub fn try_build(self) -> anyhow::Result { let logs = self.logs.unwrap_or_default(); logs.install_panic_hook(); @@ -151,14 +153,20 @@ impl ObservabilityBuilder { .with(logs_layer) .with(otlp_tracing_layer) .with(otlp_logging_layer) - .init(); + .try_init() + .context("failed installing global tracer / logger")?; let sentry_guard = self.sentry.map(|sentry| sentry.install()); - ObservabilityGuard { + Ok(ObservabilityGuard { otlp_tracing_provider, otlp_logging_provider, sentry_guard, - } + }) + } + + /// Initializes the observability subsystem. + pub fn build(self) -> ObservabilityGuard { + self.try_build().unwrap() } } diff --git a/core/tests/recovery-test/src/index.ts b/core/tests/recovery-test/src/index.ts index 6599e7c5d298..462404af6065 100644 --- a/core/tests/recovery-test/src/index.ts +++ b/core/tests/recovery-test/src/index.ts @@ -271,7 +271,7 @@ export class FundedWallet { await depositTx.waitFinalize(); } - /** Generates at least one L1 batch by transfering funds to itself. */ + /** Generates at least one L1 batch by transferring funds to itself. */ async generateL1Batch(): Promise { const transactionResponse = await this.wallet.transfer({ to: this.wallet.address, @@ -279,15 +279,15 @@ export class FundedWallet { token: zksync.utils.ETH_ADDRESS }); console.log('Generated a transaction from funded wallet', transactionResponse); - const receipt = await transactionResponse.wait(); - console.log('Got finalized transaction receipt', receipt); - // Wait until an L1 batch with the transaction is sealed. - const pastL1BatchNumber = await this.wallet.provider.getL1BatchNumber(); - let newL1BatchNumber: number; - while ((newL1BatchNumber = await this.wallet.provider.getL1BatchNumber()) <= pastL1BatchNumber) { + let receipt: zksync.types.TransactionReceipt; + while (!(receipt = await transactionResponse.wait()).l1BatchNumber) { + console.log('Transaction is not included in L1 batch; sleeping'); await sleep(1000); } + + console.log('Got finalized transaction receipt', receipt); + const newL1BatchNumber = receipt.l1BatchNumber; console.log(`Sealed L1 batch #${newL1BatchNumber}`); return newL1BatchNumber; } diff --git a/core/tests/ts-integration/jest.config.json b/core/tests/ts-integration/jest.config.json index 8fa5ea1eb721..1756de1bb02d 100644 --- a/core/tests/ts-integration/jest.config.json +++ b/core/tests/ts-integration/jest.config.json @@ -14,6 +14,7 @@ "testTimeout": 605000, "globalSetup": "/src/jest-setup/global-setup.ts", "globalTeardown": "/src/jest-setup/global-teardown.ts", + "testEnvironment": "/src/jest-setup/env.ts", "setupFilesAfterEnv": [ "/src/jest-setup/add-matchers.ts" ], diff --git a/core/tests/ts-integration/package.json b/core/tests/ts-integration/package.json index 9d53420edaad..8e5c0cf7470e 100644 --- a/core/tests/ts-integration/package.json +++ b/core/tests/ts-integration/package.json @@ -25,6 +25,7 @@ "ethers": "^6.7.1", "hardhat": "=2.22.2", "jest": "^29.0.3", + "jest-environment-node": "^29.0.3", "jest-matcher-utils": "^29.0.3", "node-fetch": "^2.6.1", "ts-jest": "^29.0.1", diff --git a/core/tests/ts-integration/src/jest-setup/env.ts b/core/tests/ts-integration/src/jest-setup/env.ts new file mode 100644 index 000000000000..77bbfc929111 --- /dev/null +++ b/core/tests/ts-integration/src/jest-setup/env.ts @@ -0,0 +1,14 @@ +import NodeEnvironment from 'jest-environment-node'; +import type { EnvironmentContext, JestEnvironmentConfig } from '@jest/environment'; + +export default class IntegrationTestEnvironment extends NodeEnvironment { + constructor(config: JestEnvironmentConfig, context: EnvironmentContext) { + super(config, context); + } + + override async setup() { + await super.setup(); + // Provide access to raw console in order to produce less cluttered debug messages + this.global.rawWriteToConsole = console.log; + } +} diff --git a/core/tests/ts-integration/src/jest-setup/global-setup.ts b/core/tests/ts-integration/src/jest-setup/global-setup.ts index d84d70fe69da..ffb1a8c35030 100644 --- a/core/tests/ts-integration/src/jest-setup/global-setup.ts +++ b/core/tests/ts-integration/src/jest-setup/global-setup.ts @@ -11,11 +11,12 @@ declare global { */ async function performSetup(_globalConfig: any, _projectConfig: any) { // Perform the test initialization. - // This is an expensive operation that preceeds running any tests, as we need + // This is an expensive operation that precedes running any tests, as we need // to deposit & distribute funds, deploy some contracts, and perform basic server checks. // Jest writes an initial message without a newline, so we have to do it manually. console.log(''); + globalThis.rawWriteToConsole = console.log; // Before starting any actual logic, we need to ensure that the server is running (it may not // be the case, for example, right after deployment on stage). diff --git a/core/tests/ts-integration/src/l1-provider.ts b/core/tests/ts-integration/src/l1-provider.ts new file mode 100644 index 000000000000..39b0397cd069 --- /dev/null +++ b/core/tests/ts-integration/src/l1-provider.ts @@ -0,0 +1,82 @@ +import { + ethers, + JsonRpcProvider, + Network, + TransactionRequest, + TransactionResponse, + TransactionResponseParams +} from 'ethers'; +import { Reporter } from './reporter'; +import { AugmentedTransactionResponse } from './transaction-response'; + +export class L1Provider extends JsonRpcProvider { + readonly reporter: Reporter; + + constructor(url: string, reporter?: Reporter) { + super(url, undefined, { batchMaxCount: 1 }); + this.reporter = reporter ?? new Reporter(); + } + + override _wrapTransactionResponse(tx: TransactionResponseParams, network: Network): L1TransactionResponse { + const base = super._wrapTransactionResponse(tx, network); + return new L1TransactionResponse(base, this.reporter); + } +} + +class L1TransactionResponse extends ethers.TransactionResponse implements AugmentedTransactionResponse { + public readonly kind = 'L1'; + private isWaitingReported: boolean = false; + private isReceiptReported: boolean = false; + + constructor(base: ethers.TransactionResponse, public readonly reporter: Reporter) { + super(base, base.provider); + } + + override async wait(confirmations?: number, timeout?: number) { + if (!this.isWaitingReported) { + this.reporter.debug( + `Started waiting for L1 transaction ${this.hash} (from=${this.from}, nonce=${this.nonce})` + ); + this.isWaitingReported = true; + } + + const receipt = await super.wait(confirmations, timeout); + if (receipt !== null && !this.isReceiptReported) { + this.reporter.debug( + `Obtained receipt for L1 transaction ${this.hash}: blockNumber=${receipt.blockNumber}, status=${receipt.status}` + ); + this.isReceiptReported = true; + } + return receipt; + } + + override replaceableTransaction(startBlock: number): L1TransactionResponse { + const base = super.replaceableTransaction(startBlock); + return new L1TransactionResponse(base, this.reporter); + } +} + +/** Wallet that retries `sendTransaction` requests on "nonce expired" errors, provided that it's possible (i.e., no nonce is set in the request). */ +export class RetryableL1Wallet extends ethers.Wallet { + constructor(key: string, provider: L1Provider) { + super(key, provider); + } + + override async sendTransaction(tx: TransactionRequest): Promise { + const reporter = (this.provider!).reporter; + while (true) { + try { + return await super.sendTransaction(tx); + } catch (err: any) { + // For unknown reason, `reth` sometimes returns outdated transaction count under load, leading to transactions getting rejected. + // This is a workaround for this issue. + reporter.debug('L1 transaction request failed', tx, err); + if (err.code === 'NONCE_EXPIRED' && (tx.nonce === null || tx.nonce === undefined)) { + reporter.debug('Retrying L1 transaction request', tx); + } else { + throw err; + } + } + } + } +} diff --git a/core/tests/ts-integration/src/matchers/transaction.ts b/core/tests/ts-integration/src/matchers/transaction.ts index 89e90b6d5f16..ac5bf8e77eaf 100644 --- a/core/tests/ts-integration/src/matchers/transaction.ts +++ b/core/tests/ts-integration/src/matchers/transaction.ts @@ -1,7 +1,8 @@ import { TestMessage } from './matcher-helpers'; import { MatcherModifier } from '../modifiers'; import * as zksync from 'zksync-ethers'; -import { AugmentedTransactionResponse } from '../retry-provider'; +import { AugmentedTransactionResponse } from '../transaction-response'; +import { ethers } from 'ethers'; // This file contains implementation of matchers for ZKsync/ethereum transaction. // For actual doc-comments, see `typings/jest.d.ts` file. @@ -207,7 +208,7 @@ function fail(message: string) { * * @returns If check has failed, returns a Jest error object. Otherwise, returns `undefined`. */ -function checkReceiptFields(request: zksync.types.TransactionResponse, receipt: zksync.types.TransactionReceipt) { +function checkReceiptFields(request: ethers.TransactionResponseParams, receipt: zksync.types.TransactionReceipt) { const errorMessageBuilder = new TestMessage() .matcherHint('.checkReceiptFields') .line('Transaction receipt is not properly formatted. Transaction request:') diff --git a/core/tests/ts-integration/src/reporter.ts b/core/tests/ts-integration/src/reporter.ts index 903ff3101ef9..e6a11f0725bf 100644 --- a/core/tests/ts-integration/src/reporter.ts +++ b/core/tests/ts-integration/src/reporter.ts @@ -102,7 +102,7 @@ export class Reporter { // Timestamps only make sense to include in tests. const timestampString = testName === undefined ? '' : timestamp(`${new Date().toISOString()} `); const testString = testName ? info(` [${testName}]`) : ''; - console.log(this.indent(`${timestampString}DEBUG${testString}: ${message}`), ...args); + rawWriteToConsole(this.indent(`${timestampString}DEBUG${testString}: ${message}`), ...args); } } diff --git a/core/tests/ts-integration/src/retry-provider.ts b/core/tests/ts-integration/src/retry-provider.ts index 1763c0e4edf5..51d88357c6c3 100644 --- a/core/tests/ts-integration/src/retry-provider.ts +++ b/core/tests/ts-integration/src/retry-provider.ts @@ -1,12 +1,15 @@ import * as zksync from 'zksync-ethers'; import * as ethers from 'ethers'; import { Reporter } from './reporter'; +import { AugmentedTransactionResponse } from './transaction-response'; +import { L1Provider, RetryableL1Wallet } from './l1-provider'; /** * RetryProvider retries every RPC request if it detects a timeout-related issue on the server side. */ export class RetryProvider extends zksync.Provider { private readonly reporter: Reporter; + private readonly knownTransactionHashes: Set = new Set(); constructor(_url?: string | { url: string; timeout: number }, network?: ethers.Networkish, reporter?: Reporter) { let url; @@ -55,15 +58,63 @@ export class RetryProvider extends zksync.Provider { } } + override _wrapTransactionResponse(txResponse: any): L2TransactionResponse { + const base = super._wrapTransactionResponse(txResponse); + this.knownTransactionHashes.add(base.hash); + return new L2TransactionResponse(base, this.reporter); + } + override _wrapTransactionReceipt(receipt: any): zksync.types.TransactionReceipt { const wrapped = super._wrapTransactionReceipt(receipt); - this.reporter.debug( - `Obtained receipt for transaction ${receipt.transactionHash}: blockNumber=${receipt.blockNumber}, status=${receipt.status}` - ); + if (!this.knownTransactionHashes.has(receipt.transactionHash)) { + this.knownTransactionHashes.add(receipt.transactionHash); + this.reporter.debug( + `Obtained receipt for L2 transaction ${receipt.transactionHash}: blockNumber=${receipt.blockNumber}, status=${receipt.status}` + ); + } return wrapped; } } -export interface AugmentedTransactionResponse extends zksync.types.TransactionResponse { - readonly reporter?: Reporter; +class L2TransactionResponse extends zksync.types.TransactionResponse implements AugmentedTransactionResponse { + public readonly kind = 'L2'; + private isWaitingReported: boolean = false; + private isReceiptReported: boolean = false; + + constructor(base: zksync.types.TransactionResponse, public readonly reporter: Reporter) { + super(base, base.provider); + } + + override async wait(confirmations?: number) { + if (!this.isWaitingReported) { + this.reporter.debug( + `Started waiting for L2 transaction ${this.hash} (from=${this.from}, nonce=${this.nonce})` + ); + this.isWaitingReported = true; + } + const receipt = await super.wait(confirmations); + if (receipt !== null && !this.isReceiptReported) { + this.reporter.debug( + `Obtained receipt for L2 transaction ${this.hash}: blockNumber=${receipt.blockNumber}, status=${receipt.status}` + ); + this.isReceiptReported = true; + } + return receipt; + } + + override replaceableTransaction(startBlock: number): L2TransactionResponse { + const base = super.replaceableTransaction(startBlock); + return new L2TransactionResponse(base, this.reporter); + } +} + +/** Wallet that retries expired nonce errors for L1 transactions. */ +export class RetryableWallet extends zksync.Wallet { + constructor(privateKey: string, l2Provider: RetryProvider, l1Provider: L1Provider) { + super(privateKey, l2Provider, l1Provider); + } + + override ethWallet(): RetryableL1Wallet { + return new RetryableL1Wallet(this.privateKey, this._providerL1()); + } } diff --git a/core/tests/ts-integration/src/test-master.ts b/core/tests/ts-integration/src/test-master.ts index 09fddd1589ca..297116b0b512 100644 --- a/core/tests/ts-integration/src/test-master.ts +++ b/core/tests/ts-integration/src/test-master.ts @@ -2,9 +2,10 @@ import * as zksync from 'zksync-ethers'; import * as ethers from 'ethers'; import { TestEnvironment, TestContext } from './types'; import { claimEtherBack } from './context-owner'; -import { RetryProvider } from './retry-provider'; +import { RetryableWallet, RetryProvider } from './retry-provider'; import { Reporter } from './reporter'; import { bigIntReviver } from './helpers'; +import { L1Provider } from './l1-provider'; /** * Test master is a singleton class (per suite) that is capable of providing wallets to the suite. @@ -19,8 +20,8 @@ export class TestMaster { private readonly env: TestEnvironment; readonly reporter: Reporter; - private readonly l1Provider: ethers.JsonRpcProvider; - private readonly l2Provider: zksync.Provider; + private readonly l1Provider: L1Provider; + private readonly l2Provider: RetryProvider; private readonly mainWallet: zksync.Wallet; private readonly subAccounts: zksync.Wallet[] = []; @@ -52,7 +53,7 @@ export class TestMaster { if (!suiteWalletPK) { throw new Error(`Wallet for ${suiteName} suite was not provided`); } - this.l1Provider = new ethers.JsonRpcProvider(this.env.l1NodeUrl); + this.l1Provider = new L1Provider(this.env.l1NodeUrl, this.reporter); this.l2Provider = new RetryProvider( { url: this.env.l2NodeUrl, @@ -71,7 +72,7 @@ export class TestMaster { this.l2Provider.pollingInterval = 5000; } - this.mainWallet = new zksync.Wallet(suiteWalletPK, this.l2Provider, this.l1Provider); + this.mainWallet = new RetryableWallet(suiteWalletPK, this.l2Provider, this.l1Provider); } /** @@ -112,7 +113,7 @@ export class TestMaster { */ newEmptyAccount(): zksync.Wallet { const randomPK = ethers.Wallet.createRandom().privateKey; - const newWallet = new zksync.Wallet(randomPK, this.l2Provider, this.l1Provider); + const newWallet = new RetryableWallet(randomPK, this.l2Provider, this.l1Provider); this.subAccounts.push(newWallet); return newWallet; } diff --git a/core/tests/ts-integration/src/transaction-response.ts b/core/tests/ts-integration/src/transaction-response.ts new file mode 100644 index 000000000000..a104b0107edd --- /dev/null +++ b/core/tests/ts-integration/src/transaction-response.ts @@ -0,0 +1,9 @@ +import { ethers } from 'ethers'; +import { Reporter } from './reporter'; + +export interface AugmentedTransactionResponse extends ethers.TransactionResponseParams { + readonly kind: 'L1' | 'L2'; + readonly reporter?: Reporter; + + wait(confirmations?: number, timeout?: number): Promise; +} diff --git a/core/tests/ts-integration/typings/jest.d.ts b/core/tests/ts-integration/typings/jest.d.ts index 4d8f1c3530c5..3bb62732cf70 100644 --- a/core/tests/ts-integration/typings/jest.d.ts +++ b/core/tests/ts-integration/typings/jest.d.ts @@ -1,6 +1,8 @@ import { MatcherModifier } from '../src/matchers/transaction-modifiers'; export declare global { + function rawWriteToConsole(message: string, ...args: any[]); + namespace jest { interface Matchers { // Generic matchers diff --git a/docker-compose-cpu-runner.yml b/docker-compose-cpu-runner.yml index e0f751130eb0..beb54f3ade98 100644 --- a/docker-compose-cpu-runner.yml +++ b/docker-compose-cpu-runner.yml @@ -11,7 +11,7 @@ services: source: ./etc/reth/chaindata target: /chaindata - command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 600ms --chain /chaindata/reth_config + command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 300ms --chain /chaindata/reth_config ports: - 127.0.0.1:8545:8545 diff --git a/docker-compose-gpu-runner-cuda-12-0.yml b/docker-compose-gpu-runner-cuda-12-0.yml index f2089446a41d..35a0faeb9620 100644 --- a/docker-compose-gpu-runner-cuda-12-0.yml +++ b/docker-compose-gpu-runner-cuda-12-0.yml @@ -11,7 +11,7 @@ services: source: ./etc/reth/chaindata target: /chaindata - command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 600ms --chain /chaindata/reth_config + command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 300ms --chain /chaindata/reth_config ports: - 127.0.0.1:8545:8545 diff --git a/docker-compose-gpu-runner.yml b/docker-compose-gpu-runner.yml index 35c6c3778f22..f95ae0d5f544 100644 --- a/docker-compose-gpu-runner.yml +++ b/docker-compose-gpu-runner.yml @@ -11,7 +11,7 @@ services: source: ./etc/reth/chaindata target: /chaindata - command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 600ms --chain /chaindata/reth_config + command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 300ms --chain /chaindata/reth_config ports: - 127.0.0.1:8545:8545 diff --git a/docker-compose.yml b/docker-compose.yml index 7e1b52f83347..1e3a273ec9a4 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -13,7 +13,7 @@ services: source: ./etc/reth/chaindata target: /chaindata - command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 600ms --chain /chaindata/reth_config + command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 300ms --chain /chaindata/reth_config postgres: diff --git a/etc/env/base/rust.toml b/etc/env/base/rust.toml index 1bb69374ab1a..d8bef020c642 100644 --- a/etc/env/base/rust.toml +++ b/etc/env/base/rust.toml @@ -7,7 +7,7 @@ RUST_LOG="""\ zksync_node_framework=info,\ zksync_block_reverter=info,\ -zksync_commitment_generator=info,\ +zksync_commitment_generator=debug,\ zksync_node_db_pruner=info,\ zksync_eth_sender=info,\ zksync_node_fee_model=info,\ diff --git a/etc/env/configs/ext-node.toml b/etc/env/configs/ext-node.toml index b2f740065591..a5eb22db5ec1 100644 --- a/etc/env/configs/ext-node.toml +++ b/etc/env/configs/ext-node.toml @@ -63,7 +63,7 @@ zksync_node_consensus=info,\ zksync_consensus_bft=info,\ zksync_consensus_network=info,\ zksync_consensus_storage=info,\ -zksync_commitment_generator=info,\ +zksync_commitment_generator=debug,\ zksync_core=debug,\ zksync_dal=info,\ zksync_db_connection=info,\ diff --git a/etc/env/file_based/general.yaml b/etc/env/file_based/general.yaml index 138905883e30..ca9c3fd0c796 100644 --- a/etc/env/file_based/general.yaml +++ b/etc/env/file_based/general.yaml @@ -311,7 +311,7 @@ prometheus: observability: log_format: plain - log_directives: "zksync_node_test_utils=info,zksync_state_keeper=info,zksync_reorg_detector=info,zksync_consistency_checker=info,zksync_metadata_calculator=info,zksync_node_sync=info,zksync_node_consensus=info,zksync_contract_verification_server=info,zksync_node_api_server=info,zksync_tee_verifier_input_producer=info,zksync_node_framework=info,zksync_block_reverter=info,zksync_commitment_generator=info,zksync_node_db_pruner=info,zksync_eth_sender=info,zksync_node_fee_model=info,zksync_node_genesis=info,zksync_house_keeper=info,zksync_proof_data_handler=info,zksync_shared_metrics=info,zksync_node_test_utils=info,zksync_vm_runner=info,zksync_consensus_bft=info,zksync_consensus_network=info,zksync_consensus_storage=info,zksync_core_leftovers=debug,zksync_server=debug,zksync_contract_verifier=debug,zksync_dal=info,zksync_db_connection=info,zksync_eth_client=info,zksync_eth_watch=debug,zksync_storage=info,zksync_db_manager=info,zksync_merkle_tree=info,zksync_state=debug,zksync_utils=debug,zksync_queued_job_processor=info,zksync_types=info,zksync_mempool=debug,loadnext=info,vm=info,zksync_object_store=info,zksync_external_node=info,zksync_witness_generator=info,zksync_prover_fri=info,zksync_witness_vector_generator=info,zksync_web3_decl=debug,zksync_health_check=debug,zksync_proof_fri_compressor=info,vise_exporter=error,snapshots_creator=debug,zksync_base_token_adjuster=debug,zksync_external_price_api=debug,zksync_external_proof_integration_api=info" + log_directives: "zksync_node_test_utils=info,zksync_state_keeper=info,zksync_reorg_detector=info,zksync_consistency_checker=info,zksync_metadata_calculator=info,zksync_node_sync=info,zksync_node_consensus=info,zksync_contract_verification_server=info,zksync_node_api_server=info,zksync_tee_verifier_input_producer=info,zksync_node_framework=info,zksync_block_reverter=info,zksync_commitment_generator=debug,zksync_node_db_pruner=info,zksync_eth_sender=info,zksync_node_fee_model=info,zksync_node_genesis=info,zksync_house_keeper=info,zksync_proof_data_handler=info,zksync_shared_metrics=info,zksync_node_test_utils=info,zksync_vm_runner=info,zksync_consensus_bft=info,zksync_consensus_network=info,zksync_consensus_storage=info,zksync_core_leftovers=debug,zksync_server=debug,zksync_contract_verifier=debug,zksync_dal=info,zksync_db_connection=info,zksync_eth_client=info,zksync_eth_watch=debug,zksync_storage=info,zksync_db_manager=info,zksync_merkle_tree=info,zksync_state=debug,zksync_utils=debug,zksync_queued_job_processor=info,zksync_types=info,zksync_mempool=debug,loadnext=info,vm=info,zksync_object_store=info,zksync_external_node=info,zksync_witness_generator=info,zksync_prover_fri=info,zksync_witness_vector_generator=info,zksync_web3_decl=debug,zksync_health_check=debug,zksync_proof_fri_compressor=info,vise_exporter=error,snapshots_creator=debug,zksync_base_token_adjuster=debug,zksync_external_price_api=debug,zksync_external_proof_integration_api=info" # Uncomment only if needed # sentry: # url: unset From ba21c6eeb042777ca07278d6495cdca92cdc89eb Mon Sep 17 00:00:00 2001 From: Danil Date: Wed, 18 Sep 2024 11:37:22 +0200 Subject: [PATCH 093/116] fix(ci): vm perf to prometheus (#2909) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. Signed-off-by: Danil --- .github/workflows/vm-perf-to-prometheus.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/vm-perf-to-prometheus.yml b/.github/workflows/vm-perf-to-prometheus.yml index 04eb23f6c346..2c82b796d70e 100644 --- a/.github/workflows/vm-perf-to-prometheus.yml +++ b/.github/workflows/vm-perf-to-prometheus.yml @@ -34,7 +34,7 @@ jobs: run_retried docker compose pull zk docker compose up -d zk ci_run zkt - ci_run zks contracts all + ci_run zk_supervisor contracts all - name: run benchmarks run: | From 5cb04edbc13535f6f13972ad00038c6a22f7b7df Mon Sep 17 00:00:00 2001 From: Artem Makhortov <13339874+artmakh@users.noreply.github.com> Date: Wed, 18 Sep 2024 20:34:37 +0700 Subject: [PATCH 094/116] feat(ci): Add building base docker image (#2913) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Workflow to build base-image ## Why ❔ Added ability to push to ghcr.io ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- .github/workflows/build-base.yml | 159 +++++++++++++++++++++++++++++++ 1 file changed, 159 insertions(+) create mode 100644 .github/workflows/build-base.yml diff --git a/.github/workflows/build-base.yml b/.github/workflows/build-base.yml new file mode 100644 index 000000000000..d71b920a9a8a --- /dev/null +++ b/.github/workflows/build-base.yml @@ -0,0 +1,159 @@ +name: Build base Docker image +on: + workflow_dispatch: + inputs: + repo_ref: + description: "git reference of the zksync-era to build" + required: true + default: main +jobs: + build-images: + name: Build and Push Docker Images + runs-on: ${{ fromJSON('["matterlabs-ci-runner-high-performance", "matterlabs-ci-runner-arm"]')[contains(matrix.arch, 'arm')] }} + outputs: + image_tag_sha: ${{ steps.get-sha.outputs.image_tag_sha }} + # Needed to push to Gihub Package Registry + permissions: + packages: write + contents: read + env: + DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} + DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} + REPO_REF: ${{ github.event.inputs.repo_ref }} + strategy: + matrix: + name: [ build-base ] + repository: [ zksync-build-base ] + arch: [ amd64, arm64 ] + + steps: + - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 + with: + submodules: "recursive" + + - name: Login to google container registry + run: | + gcloud auth configure-docker us-docker.pkg.dev -q + + - name: Login to DockerHub + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 + with: + username: ${{ secrets.DOCKERHUB_USER }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Login to GitHub Container Registry + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Get tag + id: get-sha + run: | + echo IMAGE_TAG_SHA=$(git rev-parse --short HEAD) >> $GITHUB_ENV + echo image_tag_sha=$(git rev-parse --short HEAD) >> $GITHUB_OUTPUT + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 + + - name: Build and push + uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0 + with: + push: true + context: . + file: docker/build-base/Dockerfile + labels: | + org.opencontainers.image.source=https://github.com/matter-labs/zksync-era + org.opencontainers.image.licenses="MIT OR Apache-2.0" + tags: | + matterlabs/zksync-build-base:${{ steps.get-sha.outputs.image_tag_sha }}-${{ matrix.arch }} + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/zksync-build-base:${{ steps.get-sha.outputs.image_tag_sha }}-${{ matrix.arch }} + ghcr.io/${{ github.repository_owner }}/zksync-build-base:${{ steps.get-sha.outputs.image_tag_sha }}-${{ matrix.arch }} + + multiarch_manifest: + # Needed to push to Gihub Package Registry + permissions: + packages: write + contents: read + needs: [ build-images ] + env: + IMAGE_TAG_SUFFIX: ${{ needs.build-images.outputs.image_tag_sha }} + runs-on: [ matterlabs-ci-runner-high-performance ] + steps: + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 + + - name: Login to google container registry + run: | + gcloud auth configure-docker us-docker.pkg.dev -q + + - name: Login to DockerHub + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 + with: + username: ${{ secrets.DOCKERHUB_USER }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Login to GitHub Container Registry + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Create and push multi-arch manifests for Dockerhub + shell: bash + run: | + images=("zksync-build-base") + archs=("amd64" "arm64") + + for img in "${images[@]}"; do + multiarch_tag="matterlabs/zksync-build-base:latest" + individual_images=() + + for arch in "${archs[@]}"; do + TAG="$IMAGE_TAG_SUFFIX" + docker pull matterlabs/zksync-build-base:${TAG}-${arch} --platform linux/${arch} + individual_images+=("matterlabs/zksync-build-base:${TAG}-${arch}") + done + + docker buildx imagetools create --tag "${multiarch_tag}" "${individual_images[@]}" + done + + - name: Create and push multi-arch manifests for GitHub Container Registry + shell: bash + run: | + images=("zksync-build-base") + archs=("amd64" "arm64") + + for img in "${images[@]}"; do + multiarch_tag="ghcr.io/${{ github.repository_owner }}/zksync-build-base:latest" + individual_images=() + + for arch in "${archs[@]}"; do + TAG="$IMAGE_TAG_SUFFIX" + docker pull ghcr.io/${{ github.repository_owner }}/zksync-build-base:${TAG}-${arch} --platform linux/${arch} + individual_images+=("ghcr.io/${{ github.repository_owner }}/zksync-build-base:${TAG}-${arch}") + done + + docker buildx imagetools create --tag "${multiarch_tag}" "${individual_images[@]}" + done + + - name: Create and push multi-arch manifests for Google Artifact Registry + shell: bash + run: | + images=("zksync-build-base") + archs=("amd64" "arm64") + + for img in "${images[@]}"; do + multiarch_tag="us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/zksync-build-base:latest" + individual_images=() + + for arch in "${archs[@]}"; do + TAG="$IMAGE_TAG_SUFFIX" + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/zksync-build-base:${TAG}-${arch} --platform linux/${arch} + individual_images+=("us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/zksync-build-base:${TAG}-${arch}") + done + + docker buildx imagetools create --tag "${multiarch_tag}" "${individual_images[@]}" + done From 87292705073081d9fad706300fa545013aa61ba4 Mon Sep 17 00:00:00 2001 From: Artem Makhortov <13339874+artmakh@users.noreply.github.com> Date: Wed, 18 Sep 2024 21:00:10 +0700 Subject: [PATCH 095/116] fix(ci): Fix multiarch for build-base CI (#2914) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Fix multiarch command for build-base-image CI ## Why ❔ ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- .github/workflows/build-base.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build-base.yml b/.github/workflows/build-base.yml index d71b920a9a8a..d8e557225620 100644 --- a/.github/workflows/build-base.yml +++ b/.github/workflows/build-base.yml @@ -151,7 +151,7 @@ jobs: for arch in "${archs[@]}"; do TAG="$IMAGE_TAG_SUFFIX" - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/zksync-build-base:${TAG}-${arch} --platform linux/${arch} + docker pull us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/zksync-build-base:${TAG}-${arch} --platform linux/${arch} individual_images+=("us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/zksync-build-base:${TAG}-${arch}") done From 86299e7fac739aee826bd121ca47633b1da390e8 Mon Sep 17 00:00:00 2001 From: Artem Makhortov <13339874+artmakh@users.noreply.github.com> Date: Wed, 18 Sep 2024 23:42:47 +0700 Subject: [PATCH 096/116] fix(ci): Switch build-base to ghcr.io (#2912) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Change build-base image location from dockerhub to ghcr.io ## Why ❔ Workaround for dockerhub rate-limiting ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- docker/contract-verifier/Dockerfile | 2 +- docker/external-node/Dockerfile | 2 +- docker/prover-fri-gateway/Dockerfile | 2 +- docker/prover-job-monitor/Dockerfile | 2 +- docker/server-v2/Dockerfile | 2 +- docker/snapshots-creator/Dockerfile | 2 +- docker/verified-sources-fetcher/Dockerfile | 2 +- docker/witness-generator/Dockerfile | 2 +- docker/witness-vector-generator/Dockerfile | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/docker/contract-verifier/Dockerfile b/docker/contract-verifier/Dockerfile index bc9a07c7d375..b8d77163f141 100644 --- a/docker/contract-verifier/Dockerfile +++ b/docker/contract-verifier/Dockerfile @@ -1,5 +1,5 @@ # syntax=docker/dockerfile:experimental -FROM matterlabs/zksync-build-base:latest AS builder +FROM ghcr.io/matter-labs/zksync-build-base:latest AS builder ARG CUDA_ARCH=89 ENV CUDAARCHS=${CUDA_ARCH} diff --git a/docker/external-node/Dockerfile b/docker/external-node/Dockerfile index a12bd71bca39..a573a76511a2 100644 --- a/docker/external-node/Dockerfile +++ b/docker/external-node/Dockerfile @@ -1,6 +1,6 @@ # Will work locally only after prior contracts build -FROM matterlabs/zksync-build-base:latest AS builder +FROM ghcr.io/matter-labs/zksync-build-base:latest AS builder # set of args for use of sccache ARG SCCACHE_GCS_BUCKET="" diff --git a/docker/prover-fri-gateway/Dockerfile b/docker/prover-fri-gateway/Dockerfile index 2ad8d346956c..a8d389df2839 100644 --- a/docker/prover-fri-gateway/Dockerfile +++ b/docker/prover-fri-gateway/Dockerfile @@ -1,4 +1,4 @@ -FROM matterlabs/zksync-build-base:latest AS builder +FROM ghcr.io/matter-labs/zksync-build-base:latest AS builder ARG DEBIAN_FRONTEND=noninteractive diff --git a/docker/prover-job-monitor/Dockerfile b/docker/prover-job-monitor/Dockerfile index b15379d06621..b7255f5df433 100644 --- a/docker/prover-job-monitor/Dockerfile +++ b/docker/prover-job-monitor/Dockerfile @@ -1,4 +1,4 @@ -FROM matterlabs/zksync-build-base:latest AS builder +FROM ghcr.io/matter-labs/zksync-build-base:latest AS builder ARG DEBIAN_FRONTEND=noninteractive diff --git a/docker/server-v2/Dockerfile b/docker/server-v2/Dockerfile index e7b036274bda..d5b3ef2a5e61 100644 --- a/docker/server-v2/Dockerfile +++ b/docker/server-v2/Dockerfile @@ -1,6 +1,6 @@ # Will work locally only after prior contracts build # syntax=docker/dockerfile:experimental -FROM matterlabs/zksync-build-base:latest AS builder +FROM ghcr.io/matter-labs/zksync-build-base:latest AS builder # set of args for use of sccache ARG SCCACHE_GCS_BUCKET="" diff --git a/docker/snapshots-creator/Dockerfile b/docker/snapshots-creator/Dockerfile index ee31c5c42d48..044599bcc920 100644 --- a/docker/snapshots-creator/Dockerfile +++ b/docker/snapshots-creator/Dockerfile @@ -1,5 +1,5 @@ # syntax=docker/dockerfile:experimental -FROM matterlabs/zksync-build-base:latest AS builder +FROM ghcr.io/matter-labs/zksync-build-base:latest AS builder # set of args for use of sccache ARG SCCACHE_GCS_BUCKET="" diff --git a/docker/verified-sources-fetcher/Dockerfile b/docker/verified-sources-fetcher/Dockerfile index faf36f27f5b0..f7a08eba587d 100644 --- a/docker/verified-sources-fetcher/Dockerfile +++ b/docker/verified-sources-fetcher/Dockerfile @@ -1,4 +1,4 @@ -FROM matterlabs/zksync-build-base:latest AS builder +FROM ghcr.io/matter-labs/zksync-build-base:latest AS builder ARG DEBIAN_FRONTEND=noninteractive diff --git a/docker/witness-generator/Dockerfile b/docker/witness-generator/Dockerfile index 5c5b2429aa80..e8f017c4971d 100644 --- a/docker/witness-generator/Dockerfile +++ b/docker/witness-generator/Dockerfile @@ -1,4 +1,4 @@ -FROM matterlabs/zksync-build-base:latest AS builder +FROM ghcr.io/matter-labs/zksync-build-base:latest AS builder ARG DEBIAN_FRONTEND=noninteractive ARG RUST_FLAGS="" diff --git a/docker/witness-vector-generator/Dockerfile b/docker/witness-vector-generator/Dockerfile index cfcc8be7efaf..dd04de7b7852 100644 --- a/docker/witness-vector-generator/Dockerfile +++ b/docker/witness-vector-generator/Dockerfile @@ -1,4 +1,4 @@ -FROM matterlabs/zksync-build-base:latest AS builder +FROM ghcr.io/matter-labs/zksync-build-base:latest AS builder ARG DEBIAN_FRONTEND=noninteractive From 8363c1d8697ad9bd2fe5d326218476bc3dad38af Mon Sep 17 00:00:00 2001 From: Nacho Avecilla Date: Thu, 19 Sep 2024 04:26:56 -0300 Subject: [PATCH 097/116] feat: make `to` address optional for transaction data (#2852) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ This PR is the first part of [this one](https://github.com/matter-labs/zksync-era/pull/2631). We decided to separate it into smaller PRs to make it easier to read and review. This PR contains only the changes to the `TransactionData` structs for the different VM versions, modifying them for an optional field. The other parts are currently available in our repo fork, as they are based on this branch. The other PRs are: - [feat: evm simulator as base system contract](https://github.com/lambdaclass/zksync-era/pull/256): integrates the EVM interpreter contract as a base system contract and includes all necessary updates for zksync-era to run with it. - [feat: make evm simulator optional for the server](https://github.com/lambdaclass/zksync-era/pull/257): makes the evm simulator optional to run using a config file. ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- .../system-constants-generator/src/utils.rs | 4 +- core/lib/dal/src/consensus/mod.rs | 9 +++-- core/lib/dal/src/consensus/proto/mod.proto | 2 +- core/lib/dal/src/consensus/tests.rs | 2 +- core/lib/dal/src/models/tests.rs | 2 +- core/lib/dal/src/tests/mod.rs | 6 +-- core/lib/dal/src/transactions_dal.rs | 39 ++++++++++++------- core/lib/mempool/src/tests.rs | 4 +- .../src/versions/vm_1_3_2/test_utils.rs | 2 +- .../src/versions/vm_1_3_2/transaction_data.rs | 6 +-- .../src/versions/vm_1_4_1/tests/gas_limit.rs | 6 +-- .../types/internals/transaction_data.rs | 6 +-- .../src/versions/vm_1_4_2/tests/gas_limit.rs | 6 +-- .../types/internals/transaction_data.rs | 6 +-- .../vm_boojum_integration/tests/gas_limit.rs | 6 +-- .../types/internals/transaction_data.rs | 6 +-- .../src/versions/vm_fast/tests/block_tip.rs | 2 +- .../src/versions/vm_fast/tests/circuits.rs | 2 +- .../src/versions/vm_fast/tests/code_oracle.rs | 8 ++-- .../src/versions/vm_fast/tests/gas_limit.rs | 5 ++- .../vm_fast/tests/get_used_contracts.rs | 6 +-- .../versions/vm_fast/tests/l1_tx_execution.rs | 2 +- .../src/versions/vm_fast/tests/l2_blocks.rs | 2 +- .../versions/vm_fast/tests/nonce_holder.rs | 2 +- .../src/versions/vm_fast/tests/precompiles.rs | 6 +-- .../src/versions/vm_fast/tests/refunds.rs | 4 +- .../versions/vm_fast/tests/require_eip712.rs | 4 +- .../src/versions/vm_fast/tests/rollbacks.rs | 4 +- .../src/versions/vm_fast/tests/sekp256r1.rs | 2 +- .../src/versions/vm_fast/tests/storage.rs | 4 +- .../vm_fast/tests/tracing_execution_error.rs | 2 +- .../src/versions/vm_fast/tests/transfer.rs | 6 +-- .../src/versions/vm_fast/tests/upgrade.rs | 4 +- .../src/versions/vm_fast/transaction_data.rs | 6 +-- .../src/versions/vm_latest/tests/block_tip.rs | 2 +- .../versions/vm_latest/tests/call_tracer.rs | 4 +- .../src/versions/vm_latest/tests/circuits.rs | 2 +- .../versions/vm_latest/tests/code_oracle.rs | 8 ++-- .../src/versions/vm_latest/tests/gas_limit.rs | 5 ++- .../vm_latest/tests/get_used_contracts.rs | 4 +- .../vm_latest/tests/l1_tx_execution.rs | 2 +- .../versions/vm_latest/tests/nonce_holder.rs | 2 +- .../versions/vm_latest/tests/precompiles.rs | 6 +-- .../vm_latest/tests/prestate_tracer.rs | 4 +- .../src/versions/vm_latest/tests/refunds.rs | 4 +- .../vm_latest/tests/require_eip712.rs | 4 +- .../src/versions/vm_latest/tests/rollbacks.rs | 4 +- .../src/versions/vm_latest/tests/sekp256r1.rs | 2 +- .../src/versions/vm_latest/tests/storage.rs | 2 +- .../tests/tracing_execution_error.rs | 2 +- .../src/versions/vm_latest/tests/transfer.rs | 6 +-- .../src/versions/vm_latest/tests/upgrade.rs | 4 +- .../types/internals/transaction_data.rs | 6 +-- .../multivm/src/versions/vm_m5/test_utils.rs | 2 +- .../src/versions/vm_m5/transaction_data.rs | 6 +-- .../multivm/src/versions/vm_m6/test_utils.rs | 2 +- .../src/versions/vm_m6/transaction_data.rs | 6 +-- .../vm_refunds_enhancement/tests/gas_limit.rs | 6 +-- .../types/internals/transaction_data.rs | 6 +-- .../vm_virtual_blocks/tests/gas_limit.rs | 6 +-- .../types/internals/transaction_data.rs | 6 +-- core/lib/types/src/l1/mod.rs | 6 ++- core/lib/types/src/l2/mod.rs | 10 ++--- core/lib/types/src/lib.rs | 8 ++-- core/lib/types/src/transaction_request.rs | 13 ++++--- core/lib/types/src/tx/execute.rs | 8 ++-- .../api_server/src/execution_sandbox/tests.rs | 2 +- core/node/consensus/src/registry/testonly.rs | 2 +- core/node/consensus/src/vm.rs | 2 +- core/node/eth_watch/src/tests.rs | 4 +- .../state_keeper/src/executor/tests/tester.rs | 4 +- core/node/state_keeper/src/testonly/mod.rs | 4 +- core/node/test_utils/src/lib.rs | 2 +- core/node/vm_runner/src/tests/mod.rs | 2 +- .../src/sdk/operations/deploy_contract.rs | 2 +- .../src/sdk/operations/execute_contract.rs | 2 +- .../loadnext/src/sdk/operations/transfer.rs | 4 +- core/tests/loadnext/src/sdk/signer.rs | 6 +-- core/tests/test_account/src/lib.rs | 8 ++-- core/tests/vm-benchmark/src/transaction.rs | 8 ++-- 80 files changed, 204 insertions(+), 189 deletions(-) diff --git a/core/bin/system-constants-generator/src/utils.rs b/core/bin/system-constants-generator/src/utils.rs index e596208a7949..3269fc962042 100644 --- a/core/bin/system-constants-generator/src/utils.rs +++ b/core/bin/system-constants-generator/src/utils.rs @@ -89,7 +89,7 @@ pub(super) fn get_l2_tx( pubdata_price: u32, ) -> L2Tx { L2Tx::new_signed( - contract_address, + Some(contract_address), vec![], Nonce(0), Fee { @@ -134,7 +134,7 @@ pub(super) fn get_l1_tx( ) -> L1Tx { L1Tx { execute: Execute { - contract_address, + contract_address: Some(contract_address), calldata: custom_calldata.unwrap_or_default(), value: U256::from(0), factory_deps, diff --git a/core/lib/dal/src/consensus/mod.rs b/core/lib/dal/src/consensus/mod.rs index f0ef336bc543..f54938e8ec1a 100644 --- a/core/lib/dal/src/consensus/mod.rs +++ b/core/lib/dal/src/consensus/mod.rs @@ -401,9 +401,10 @@ impl ProtoRepr for proto::Transaction { } }, execute: Execute { - contract_address: required(&execute.contract_address) - .and_then(|x| parse_h160(x)) - .context("execute.contract_address")?, + contract_address: execute + .contract_address + .as_ref() + .and_then(|x| parse_h160(x).ok()), calldata: required(&execute.calldata).context("calldata")?.clone(), value: required(&execute.value) .and_then(|x| parse_h256(x)) @@ -487,7 +488,7 @@ impl ProtoRepr for proto::Transaction { } }; let execute = proto::Execute { - contract_address: Some(this.execute.contract_address.as_bytes().into()), + contract_address: this.execute.contract_address.map(|x| x.as_bytes().into()), calldata: Some(this.execute.calldata.clone()), value: Some(u256_to_h256(this.execute.value).as_bytes().into()), factory_deps: this.execute.factory_deps.clone(), diff --git a/core/lib/dal/src/consensus/proto/mod.proto b/core/lib/dal/src/consensus/proto/mod.proto index da9151f10f4d..3ea49e9c0cd6 100644 --- a/core/lib/dal/src/consensus/proto/mod.proto +++ b/core/lib/dal/src/consensus/proto/mod.proto @@ -102,7 +102,7 @@ message ProtocolUpgradeTxCommonData { } message Execute { - optional bytes contract_address = 1; // required; H160 + optional bytes contract_address = 1; // optional; H160 optional bytes calldata = 2; // required optional bytes value = 3; // required; U256 repeated bytes factory_deps = 4; diff --git a/core/lib/dal/src/consensus/tests.rs b/core/lib/dal/src/consensus/tests.rs index f21d09290a2f..7059f1a74ea0 100644 --- a/core/lib/dal/src/consensus/tests.rs +++ b/core/lib/dal/src/consensus/tests.rs @@ -17,7 +17,7 @@ use crate::tests::mock_protocol_upgrade_transaction; fn execute(rng: &mut impl Rng) -> Execute { Execute { - contract_address: rng.gen(), + contract_address: Some(rng.gen()), value: rng.gen::().into(), calldata: (0..10 * 32).map(|_| rng.gen()).collect(), // TODO: find a way to generate valid random bytecode. diff --git a/core/lib/dal/src/models/tests.rs b/core/lib/dal/src/models/tests.rs index 34cfde108f19..b4949dc101d6 100644 --- a/core/lib/dal/src/models/tests.rs +++ b/core/lib/dal/src/models/tests.rs @@ -13,7 +13,7 @@ use crate::{models::storage_transaction::StorageTransaction, BigDecimal}; fn default_execute() -> Execute { Execute { - contract_address: H160::random(), + contract_address: Some(H160::random()), value: U256::from(10i32), calldata: hex::decode( "a9059cbb00000000000000000000000058d595f318167d5af45d9e44ade4348dd4e\ diff --git a/core/lib/dal/src/tests/mod.rs b/core/lib/dal/src/tests/mod.rs index c17e8c5d1fe3..dc672fa1f807 100644 --- a/core/lib/dal/src/tests/mod.rs +++ b/core/lib/dal/src/tests/mod.rs @@ -74,7 +74,7 @@ pub(crate) fn mock_l2_transaction() -> L2Tx { gas_per_pubdata_limit: U256::from(DEFAULT_GAS_PER_PUBDATA), }; let mut l2_tx = L2Tx::new_signed( - Address::random(), + Some(Address::random()), vec![], zksync_types::Nonce(0), fee, @@ -110,7 +110,7 @@ pub(crate) fn mock_l1_execute() -> L1Tx { }; let execute = Execute { - contract_address: H160::random(), + contract_address: Some(H160::random()), value: Default::default(), calldata: vec![], factory_deps: vec![], @@ -138,7 +138,7 @@ pub(crate) fn mock_protocol_upgrade_transaction() -> ProtocolUpgradeTx { }; let execute = Execute { - contract_address: H160::random(), + contract_address: Some(H160::random()), value: Default::default(), calldata: vec![], factory_deps: vec![], diff --git a/core/lib/dal/src/transactions_dal.rs b/core/lib/dal/src/transactions_dal.rs index 49791f776e08..0a72289b48a4 100644 --- a/core/lib/dal/src/transactions_dal.rs +++ b/core/lib/dal/src/transactions_dal.rs @@ -58,7 +58,8 @@ impl TransactionsDal<'_, '_> { tx: &L1Tx, l1_block_number: L1BlockNumber, ) -> DalResult<()> { - let contract_address = tx.execute.contract_address.as_bytes(); + let contract_address = tx.execute.contract_address; + let contract_address_as_bytes = contract_address.map(|addr| addr.as_bytes().to_vec()); let tx_hash = tx.hash(); let tx_hash_bytes = tx_hash.as_bytes(); let json_data = serde_json::to_value(&tx.execute) @@ -143,7 +144,7 @@ impl TransactionsDal<'_, '_> { serial_id, full_fee, layer_2_tip_fee, - contract_address, + contract_address_as_bytes, l1_block_number.0 as i32, value, empty_address.as_bytes(), @@ -161,7 +162,8 @@ impl TransactionsDal<'_, '_> { } pub async fn insert_system_transaction(&mut self, tx: &ProtocolUpgradeTx) -> DalResult<()> { - let contract_address = tx.execute.contract_address.as_bytes().to_vec(); + let contract_address = tx.execute.contract_address; + let contract_address_as_bytes = contract_address.map(|addr| addr.as_bytes().to_vec()); let tx_hash = tx.common_data.hash().0.to_vec(); let json_data = serde_json::to_value(&tx.execute) .unwrap_or_else(|_| panic!("cannot serialize tx {:?} to json", tx.common_data.hash())); @@ -238,7 +240,7 @@ impl TransactionsDal<'_, '_> { gas_per_pubdata_limit, json_data, upgrade_id, - contract_address, + contract_address_as_bytes, l1_block_number, value, &Address::default().0.to_vec(), @@ -284,7 +286,8 @@ impl TransactionsDal<'_, '_> { } let initiator_address = tx.initiator_account(); - let contract_address = tx.execute.contract_address.as_bytes(); + let contract_address = tx.execute.contract_address; + let contract_address_as_bytes = contract_address.map(|addr| addr.as_bytes().to_vec()); let json_data = serde_json::to_value(&tx.execute) .unwrap_or_else(|_| panic!("cannot serialize tx {:?} to json", tx.hash())); let gas_limit = u256_to_big_decimal(tx.common_data.fee.gas_limit); @@ -413,7 +416,7 @@ impl TransactionsDal<'_, '_> { input_data, &json_data, tx_format, - contract_address, + contract_address_as_bytes, value, &paymaster, &paymaster_input, @@ -697,8 +700,10 @@ impl TransactionsDal<'_, '_> { .arg_error(&format!("transactions[{index_in_block}].refunded_gas"), err) })?; + let contract_address = transaction.execute.contract_address; + let contract_address_as_bytes = contract_address.map(|addr| addr.as_bytes().to_vec()); l2_values.push(u256_to_big_decimal(transaction.execute.value)); - l2_contract_addresses.push(transaction.execute.contract_address.as_bytes()); + l2_contract_addresses.push(contract_address_as_bytes); l2_paymaster_input.push(&common_data.paymaster_params.paymaster_input[..]); l2_paymaster.push(common_data.paymaster_params.paymaster.as_bytes()); l2_hashes.push(tx_res.hash.as_bytes()); @@ -818,7 +823,7 @@ impl TransactionsDal<'_, '_> { &l2_inputs as &[&[u8]], &l2_datas, &l2_tx_formats, - &l2_contract_addresses as &[&[u8]], + &l2_contract_addresses as &[Option>], &l2_values, &l2_paymaster as &[&[u8]], &l2_paymaster_input as &[&[u8]], @@ -901,8 +906,10 @@ impl TransactionsDal<'_, '_> { .arg_error(&format!("transactions[{index_in_block}].refunded_gas"), err) })?; + let contract_address = transaction.execute.contract_address; + let contract_address_as_bytes = contract_address.map(|addr| addr.as_bytes().to_vec()); l2_values.push(u256_to_big_decimal(transaction.execute.value)); - l2_contract_addresses.push(transaction.execute.contract_address.as_bytes()); + l2_contract_addresses.push(contract_address_as_bytes); l2_paymaster_input.push(&common_data.paymaster_params.paymaster_input[..]); l2_paymaster.push(common_data.paymaster_params.paymaster.as_bytes()); l2_hashes.push(tx_res.hash.as_bytes()); @@ -1013,7 +1020,7 @@ impl TransactionsDal<'_, '_> { &l2_datas, &l2_refunded_gas, &l2_values, - &l2_contract_addresses as &[&[u8]], + &l2_contract_addresses as &[Option>], &l2_paymaster as &[&[u8]], &l2_paymaster_input as &[&[u8]], l2_block_number.0 as i32, @@ -1083,6 +1090,8 @@ impl TransactionsDal<'_, '_> { .arg_error(&format!("transactions[{index_in_block}].refunded_gas"), err) })?; + let contract_address = transaction.execute.contract_address; + let contract_address_as_bytes = contract_address.map(|addr| addr.as_bytes().to_vec()); let tx = &tx_res.transaction; l1_hashes.push(tx_res.hash.as_bytes()); l1_initiator_address.push(common_data.sender.as_bytes()); @@ -1096,7 +1105,7 @@ impl TransactionsDal<'_, '_> { l1_priority_op_id.push(common_data.serial_id.0 as i64); l1_full_fee.push(u256_to_big_decimal(common_data.full_fee)); l1_layer_2_tip_fee.push(u256_to_big_decimal(common_data.layer_2_tip_fee)); - l1_contract_address.push(tx.execute.contract_address.as_bytes()); + l1_contract_address.push(contract_address_as_bytes); l1_l1_block_number.push(common_data.eth_block as i32); l1_value.push(u256_to_big_decimal(tx.execute.value)); l1_tx_format.push(common_data.tx_format() as i32); @@ -1203,7 +1212,7 @@ impl TransactionsDal<'_, '_> { &l1_priority_op_id, &l1_full_fee, &l1_layer_2_tip_fee, - &l1_contract_address as &[&[u8]], + &l1_contract_address as &[Option>], &l1_l1_block_number, &l1_value, &l1_tx_format, @@ -1373,6 +1382,8 @@ impl TransactionsDal<'_, '_> { .arg_error(&format!("transactions[{index_in_block}].refunded_gas"), err) })?; + let contract_address = transaction.execute.contract_address; + let contract_address_as_bytes = contract_address.map(|addr| addr.as_bytes().to_vec()); let tx = &tx_res.transaction; upgrade_hashes.push(tx_res.hash.as_bytes()); upgrade_initiator_address.push(common_data.sender.as_bytes()); @@ -1385,7 +1396,7 @@ impl TransactionsDal<'_, '_> { .unwrap_or_else(|_| panic!("cannot serialize tx {:?} to json", tx.hash())), ); upgrade_upgrade_id.push(common_data.upgrade_id as i32); - upgrade_contract_address.push(tx.execute.contract_address.as_bytes()); + upgrade_contract_address.push(contract_address_as_bytes); upgrade_l1_block_number.push(common_data.eth_block as i32); upgrade_value.push(u256_to_big_decimal(tx.execute.value)); upgrade_tx_format.push(common_data.tx_format() as i32); @@ -1484,7 +1495,7 @@ impl TransactionsDal<'_, '_> { &upgrade_gas_per_pubdata_limit, &upgrade_data, &upgrade_upgrade_id, - &upgrade_contract_address as &[&[u8]], + &upgrade_contract_address as &[Option>], &upgrade_l1_block_number, &upgrade_value, &upgrade_tx_format, diff --git a/core/lib/mempool/src/tests.rs b/core/lib/mempool/src/tests.rs index 6ea1be3b514b..96ef600984f9 100644 --- a/core/lib/mempool/src/tests.rs +++ b/core/lib/mempool/src/tests.rs @@ -371,7 +371,7 @@ fn gen_l2_tx(address: Address, nonce: Nonce) -> Transaction { fn gen_l2_tx_with_timestamp(address: Address, nonce: Nonce, received_at_ms: u64) -> Transaction { let mut txn = L2Tx::new( - Address::default(), + Some(Address::default()), Vec::new(), nonce, Fee::default(), @@ -386,7 +386,7 @@ fn gen_l2_tx_with_timestamp(address: Address, nonce: Nonce, received_at_ms: u64) fn gen_l1_tx(priority_id: PriorityOpId) -> Transaction { let execute = Execute { - contract_address: Address::repeat_byte(0x11), + contract_address: Some(Address::repeat_byte(0x11)), calldata: vec![1, 2, 3], factory_deps: vec![], value: U256::zero(), diff --git a/core/lib/multivm/src/versions/vm_1_3_2/test_utils.rs b/core/lib/multivm/src/versions/vm_1_3_2/test_utils.rs index a29e1101d520..34c70e0f9c45 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/test_utils.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/test_utils.rs @@ -153,7 +153,7 @@ pub fn get_create_execute(code: &[u8], calldata: &[u8]) -> Execute { .expect("failed to encode parameters"); Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, + contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), calldata, factory_deps: vec![code.to_vec()], value: U256::zero(), diff --git a/core/lib/multivm/src/versions/vm_1_3_2/transaction_data.rs b/core/lib/multivm/src/versions/vm_1_3_2/transaction_data.rs index 788a52206e80..0285320daa30 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/transaction_data.rs @@ -22,7 +22,7 @@ use crate::vm_1_3_2::vm_with_bootloader::{ pub struct TransactionData { pub tx_type: u8, pub from: Address, - pub to: Address, + pub to: Option
, pub gas_limit: U256, pub pubdata_price_limit: U256, pub max_fee_per_gas: U256, @@ -170,7 +170,7 @@ impl TransactionData { encode(&[Token::Tuple(vec![ Token::Uint(U256::from_big_endian(&self.tx_type.to_be_bytes())), Token::Address(self.from), - Token::Address(self.to), + Token::Address(self.to.unwrap_or_default()), Token::Uint(self.gas_limit), Token::Uint(self.pubdata_price_limit), Token::Uint(self.max_fee_per_gas), @@ -593,7 +593,7 @@ mod tests { let transaction = TransactionData { tx_type: 113, from: Address::random(), - to: Address::random(), + to: Some(Address::random()), gas_limit: U256::from(1u32), pubdata_price_limit: U256::from(1u32), max_fee_per_gas: U256::from(1u32), diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/gas_limit.rs index 0ec921450daf..9dfda9e1a68c 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/gas_limit.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/tests/gas_limit.rs @@ -21,10 +21,8 @@ fn test_tx_gas_limit_offset() { let gas_limit = 9999.into(); let tx = vm.rich_accounts[0].get_l2_tx_for_execute( Execute { - contract_address: Default::default(), - calldata: vec![], - value: Default::default(), - factory_deps: None, + contract_address: Some(Default::default()), + ..Default::default() }, Some(Fee { gas_limit, diff --git a/core/lib/multivm/src/versions/vm_1_4_1/types/internals/transaction_data.rs b/core/lib/multivm/src/versions/vm_1_4_1/types/internals/transaction_data.rs index 1379b853a542..f7384da76d0d 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/types/internals/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/types/internals/transaction_data.rs @@ -22,7 +22,7 @@ use crate::vm_1_4_1::{ pub(crate) struct TransactionData { pub(crate) tx_type: u8, pub(crate) from: Address, - pub(crate) to: Address, + pub(crate) to: Option
, pub(crate) gas_limit: U256, pub(crate) pubdata_price_limit: U256, pub(crate) max_fee_per_gas: U256, @@ -169,7 +169,7 @@ impl TransactionData { encode(&[Token::Tuple(vec![ Token::Uint(U256::from_big_endian(&self.tx_type.to_be_bytes())), Token::Address(self.from), - Token::Address(self.to), + Token::Address(self.to.unwrap_or_default()), Token::Uint(self.gas_limit), Token::Uint(self.pubdata_price_limit), Token::Uint(self.max_fee_per_gas), @@ -311,7 +311,7 @@ mod tests { let transaction = TransactionData { tx_type: 113, from: Address::random(), - to: Address::random(), + to: Some(Address::random()), gas_limit: U256::from(1u32), pubdata_price_limit: U256::from(1u32), max_fee_per_gas: U256::from(1u32), diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/gas_limit.rs index 6a57fd07ae71..b84e9d32126c 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/gas_limit.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/tests/gas_limit.rs @@ -20,10 +20,8 @@ fn test_tx_gas_limit_offset() { let gas_limit = 9999.into(); let tx = vm.rich_accounts[0].get_l2_tx_for_execute( Execute { - contract_address: Default::default(), - calldata: vec![], - value: Default::default(), - factory_deps: None, + contract_address: Some(Default::default()), + ..Default::default() }, Some(Fee { gas_limit, diff --git a/core/lib/multivm/src/versions/vm_1_4_2/types/internals/transaction_data.rs b/core/lib/multivm/src/versions/vm_1_4_2/types/internals/transaction_data.rs index 3498e51ec308..38280aa80513 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/types/internals/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/types/internals/transaction_data.rs @@ -22,7 +22,7 @@ use crate::vm_1_4_2::{ pub(crate) struct TransactionData { pub(crate) tx_type: u8, pub(crate) from: Address, - pub(crate) to: Address, + pub(crate) to: Option
, pub(crate) gas_limit: U256, pub(crate) pubdata_price_limit: U256, pub(crate) max_fee_per_gas: U256, @@ -169,7 +169,7 @@ impl TransactionData { encode(&[Token::Tuple(vec![ Token::Uint(U256::from_big_endian(&self.tx_type.to_be_bytes())), Token::Address(self.from), - Token::Address(self.to), + Token::Address(self.to.unwrap_or_default()), Token::Uint(self.gas_limit), Token::Uint(self.pubdata_price_limit), Token::Uint(self.max_fee_per_gas), @@ -311,7 +311,7 @@ mod tests { let transaction = TransactionData { tx_type: 113, from: Address::random(), - to: Address::random(), + to: Some(Address::random()), gas_limit: U256::from(1u32), pubdata_price_limit: U256::from(1u32), max_fee_per_gas: U256::from(1u32), diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/gas_limit.rs index 30a65097111d..637fd94c1c89 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/gas_limit.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/tests/gas_limit.rs @@ -21,10 +21,8 @@ fn test_tx_gas_limit_offset() { let gas_limit = 9999.into(); let tx = vm.rich_accounts[0].get_l2_tx_for_execute( Execute { - contract_address: Default::default(), - calldata: vec![], - value: Default::default(), - factory_deps: None, + contract_address: Some(Default::default()), + ..Default::default() }, Some(Fee { gas_limit, diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/types/internals/transaction_data.rs b/core/lib/multivm/src/versions/vm_boojum_integration/types/internals/transaction_data.rs index ad740a279dcd..8bf575effe06 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/types/internals/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/types/internals/transaction_data.rs @@ -22,7 +22,7 @@ use crate::vm_boojum_integration::{ pub(crate) struct TransactionData { pub(crate) tx_type: u8, pub(crate) from: Address, - pub(crate) to: Address, + pub(crate) to: Option
, pub(crate) gas_limit: U256, pub(crate) pubdata_price_limit: U256, pub(crate) max_fee_per_gas: U256, @@ -169,7 +169,7 @@ impl TransactionData { encode(&[Token::Tuple(vec![ Token::Uint(U256::from_big_endian(&self.tx_type.to_be_bytes())), Token::Address(self.from), - Token::Address(self.to), + Token::Address(self.to.unwrap_or_default()), Token::Uint(self.gas_limit), Token::Uint(self.pubdata_price_limit), Token::Uint(self.max_fee_per_gas), @@ -325,7 +325,7 @@ mod tests { let transaction = TransactionData { tx_type: 113, from: Address::random(), - to: Address::random(), + to: Some(Address::random()), gas_limit: U256::from(1u32), pubdata_price_limit: U256::from(1u32), max_fee_per_gas: U256::from(1u32), diff --git a/core/lib/multivm/src/versions/vm_fast/tests/block_tip.rs b/core/lib/multivm/src/versions/vm_fast/tests/block_tip.rs index 15af9d868adc..a96045141380 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/block_tip.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/block_tip.rs @@ -147,7 +147,7 @@ fn execute_test(test_data: L1MessengerTestData) -> TestStatistics { for (i, data) in txs_data.into_iter().enumerate() { let tx = account.get_l2_tx_for_execute( Execute { - contract_address: CONTRACT_FORCE_DEPLOYER_ADDRESS, + contract_address: Some(CONTRACT_FORCE_DEPLOYER_ADDRESS), calldata: data, value: U256::zero(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_fast/tests/circuits.rs b/core/lib/multivm/src/versions/vm_fast/tests/circuits.rs index 0270ac35475b..a119a31618e9 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/circuits.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/circuits.rs @@ -21,7 +21,7 @@ fn test_circuits() { let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( Execute { - contract_address: Address::random(), + contract_address: Some(Address::random()), calldata: Vec::new(), value: U256::from(1u8), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs b/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs index caea07617ddb..5e7b7748fb3a 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs @@ -58,7 +58,7 @@ fn test_code_oracle() { // Firstly, let's ensure that the contract works. let tx1 = account.get_l2_tx_for_execute( Execute { - contract_address: precompiles_contract_address, + contract_address: Some(precompiles_contract_address), calldata: call_code_oracle_function .encode_input(&[ Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), @@ -82,7 +82,7 @@ fn test_code_oracle() { // the decommitted bytecode gets erased (it shouldn't). let tx2 = account.get_l2_tx_for_execute( Execute { - contract_address: precompiles_contract_address, + contract_address: Some(precompiles_contract_address), calldata: call_code_oracle_function .encode_input(&[ Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), @@ -152,7 +152,7 @@ fn test_code_oracle_big_bytecode() { // Firstly, let's ensure that the contract works. let tx1 = account.get_l2_tx_for_execute( Execute { - contract_address: precompiles_contract_address, + contract_address: Some(precompiles_contract_address), calldata: call_code_oracle_function .encode_input(&[ Token::FixedBytes(big_zkevm_bytecode_hash.0.to_vec()), @@ -220,7 +220,7 @@ fn refunds_in_code_oracle() { let tx = account.get_l2_tx_for_execute( Execute { - contract_address: precompiles_contract_address, + contract_address: Some(precompiles_contract_address), calldata: call_code_oracle_function .encode_input(&[ Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), diff --git a/core/lib/multivm/src/versions/vm_fast/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_fast/tests/gas_limit.rs index b7a2154bdc71..3f0a47b980e2 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/gas_limit.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/gas_limit.rs @@ -18,7 +18,10 @@ fn test_tx_gas_limit_offset() { let gas_limit = 9999.into(); let tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute::default(), + Execute { + contract_address: Some(Default::default()), + ..Default::default() + }, Some(Fee { gas_limit, ..Account::default_fee() diff --git a/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs index 3fcef71add07..746e9be923f2 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs @@ -65,7 +65,7 @@ fn test_get_used_contracts() { let account2 = Account::random(); let tx2 = account2.get_l1_tx( Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, + contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), calldata: big_calldata, value: Default::default(), factory_deps: vec![vec![1; 32]], @@ -150,7 +150,7 @@ fn execute_proxy_counter(gas: u32) -> (VmTester, ProxyCounterData, VmExecutionRe let increment = proxy_counter_abi.function("increment").unwrap(); let increment_tx = account.get_l2_tx_for_execute( Execute { - contract_address: deploy_tx.address, + contract_address: Some(deploy_tx.address), calldata: increment .encode_input(&[Token::Uint(1.into()), Token::Uint(gas.into())]) .unwrap(), @@ -197,7 +197,7 @@ fn get_used_contracts_with_out_of_gas_far_call() { let increment = proxy_counter_abi.function("increment").unwrap(); let increment_tx = account.get_l2_tx_for_execute( Execute { - contract_address: data.proxy_counter_address, + contract_address: Some(data.proxy_counter_address), calldata: increment .encode_input(&[Token::Uint(1.into()), Token::Uint(u64::MAX.into())]) .unwrap(), diff --git a/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs index 3b58565098d5..1abb1e39e19b 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs @@ -176,7 +176,7 @@ fn test_l1_tx_execution_high_gas_limit() { let mut tx = account.get_l1_tx( Execute { - contract_address: L1_MESSENGER_ADDRESS, + contract_address: Some(L1_MESSENGER_ADDRESS), value: 0.into(), factory_deps: vec![], calldata, diff --git a/core/lib/multivm/src/versions/vm_fast/tests/l2_blocks.rs b/core/lib/multivm/src/versions/vm_fast/tests/l2_blocks.rs index a374f63608bc..a43bb7c0309e 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/l2_blocks.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/l2_blocks.rs @@ -37,7 +37,7 @@ fn get_l1_noop() -> Transaction { ..Default::default() }), execute: Execute { - contract_address: H160::zero(), + contract_address: Some(H160::zero()), calldata: vec![], value: U256::zero(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs index 122b38601175..2ae43869d7f6 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs @@ -59,7 +59,7 @@ fn test_nonce_holder() { vm.reset_state(true); let mut transaction = account.get_l2_tx_for_execute_with_nonce( Execute { - contract_address: account.address, + contract_address: Some(account.address), calldata: vec![12], value: Default::default(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_fast/tests/precompiles.rs b/core/lib/multivm/src/versions/vm_fast/tests/precompiles.rs index f77eeb4f126e..28d3ea82da31 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/precompiles.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/precompiles.rs @@ -28,7 +28,7 @@ fn test_keccak() { let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( Execute { - contract_address: address, + contract_address: Some(address), calldata: hex::decode(keccak1000_calldata).unwrap(), value: 0.into(), factory_deps: vec![], @@ -65,7 +65,7 @@ fn test_sha256() { let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( Execute { - contract_address: address, + contract_address: Some(address), calldata: hex::decode(sha1000_calldata).unwrap(), value: 0.into(), factory_deps: vec![], @@ -95,7 +95,7 @@ fn test_ecrecover() { let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( Execute { - contract_address: account.address, + contract_address: Some(account.address), calldata: vec![], value: 0.into(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_fast/tests/refunds.rs b/core/lib/multivm/src/versions/vm_fast/tests/refunds.rs index 5ad6e3fa4f3d..1d276533898e 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/refunds.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/refunds.rs @@ -181,7 +181,7 @@ fn negative_pubdata_for_transaction() { let expensive_tx = vm.rich_accounts[0].get_l2_tx_for_execute( Execute { - contract_address: expensive_contract_address, + contract_address: Some(expensive_contract_address), calldata: expensive_function .encode_input(&[Token::Uint(10.into())]) .unwrap(), @@ -200,7 +200,7 @@ fn negative_pubdata_for_transaction() { // This transaction cleans all initial writes in the contract, thus having negative `pubdata` impact. let clean_up_tx = vm.rich_accounts[0].get_l2_tx_for_execute( Execute { - contract_address: expensive_contract_address, + contract_address: Some(expensive_contract_address), calldata: cleanup_function.encode_input(&[]).unwrap(), value: U256::zero(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs index 68e49b202a93..bc0a07381b00 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs @@ -68,7 +68,7 @@ async fn test_require_eip712() { let tx = private_account.get_l2_tx_for_execute( Execute { - contract_address: account_abstraction.address, + contract_address: Some(account_abstraction.address), calldata: encoded_input, value: Default::default(), factory_deps: vec![], @@ -125,7 +125,7 @@ async fn test_require_eip712() { // // Now send the 'classic' EIP712 transaction let tx_712 = L2Tx::new( - beneficiary.address, + Some(beneficiary.address), vec![], Nonce(1), Fee { diff --git a/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs index a677a61c6029..e7b3f2043385 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs @@ -83,7 +83,7 @@ fn test_vm_loadnext_rollbacks() { let loadnext_tx_1 = account.get_l2_tx_for_execute( Execute { - contract_address: address, + contract_address: Some(address), calldata: LoadnextContractExecutionParams { reads: 100, writes: 100, @@ -101,7 +101,7 @@ fn test_vm_loadnext_rollbacks() { let loadnext_tx_2 = account.get_l2_tx_for_execute( Execute { - contract_address: address, + contract_address: Some(address), calldata: LoadnextContractExecutionParams { reads: 100, writes: 100, diff --git a/core/lib/multivm/src/versions/vm_fast/tests/sekp256r1.rs b/core/lib/multivm/src/versions/vm_fast/tests/sekp256r1.rs index a61a0a2bd91c..55ca372c4a9f 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/sekp256r1.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/sekp256r1.rs @@ -48,7 +48,7 @@ fn test_sekp256r1() { let tx = account.get_l2_tx_for_execute( Execute { - contract_address: P256VERIFY_PRECOMPILE_ADDRESS, + contract_address: Some(P256VERIFY_PRECOMPILE_ADDRESS), calldata: [digest, encoded_r, encoded_s, x, y].concat(), value: U256::zero(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_fast/tests/storage.rs b/core/lib/multivm/src/versions/vm_fast/tests/storage.rs index 7fe15ca7bcd2..8258e21366ce 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/storage.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/storage.rs @@ -30,7 +30,7 @@ fn test_storage(first_tx_calldata: Vec, second_tx_calldata: Vec) -> u32 let tx1 = account.get_l2_tx_for_execute( Execute { - contract_address: test_contract_address, + contract_address: Some(test_contract_address), calldata: first_tx_calldata, value: 0.into(), factory_deps: vec![], @@ -40,7 +40,7 @@ fn test_storage(first_tx_calldata: Vec, second_tx_calldata: Vec) -> u32 let tx2 = account.get_l2_tx_for_execute( Execute { - contract_address: test_contract_address, + contract_address: Some(test_contract_address), calldata: second_tx_calldata, value: 0.into(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_fast/tests/tracing_execution_error.rs b/core/lib/multivm/src/versions/vm_fast/tests/tracing_execution_error.rs index 75144839006e..efa64ea17708 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/tracing_execution_error.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/tracing_execution_error.rs @@ -24,7 +24,7 @@ fn test_tracing_of_execution_errors() { let tx = account.get_l2_tx_for_execute( Execute { - contract_address, + contract_address: Some(contract_address), calldata: get_execute_error_calldata(), value: Default::default(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_fast/tests/transfer.rs b/core/lib/multivm/src/versions/vm_fast/tests/transfer.rs index 57877854031d..662e014ef85b 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/transfer.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/transfer.rs @@ -70,7 +70,7 @@ fn test_send_or_transfer(test_option: TestOptions) { let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( Execute { - contract_address: test_contract_address, + contract_address: Some(test_contract_address), calldata, value: U256::zero(), factory_deps: vec![], @@ -167,7 +167,7 @@ fn test_reentrancy_protection_send_or_transfer(test_option: TestOptions) { let account = &mut vm.rich_accounts[0]; let tx1 = account.get_l2_tx_for_execute( Execute { - contract_address: reentrant_recipeint_address, + contract_address: Some(reentrant_recipeint_address), calldata: reentrant_recipient_abi .function("setX") .unwrap() @@ -188,7 +188,7 @@ fn test_reentrancy_protection_send_or_transfer(test_option: TestOptions) { let tx2 = account.get_l2_tx_for_execute( Execute { - contract_address: test_contract_address, + contract_address: Some(test_contract_address), calldata, value, factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_fast/tests/upgrade.rs b/core/lib/multivm/src/versions/vm_fast/tests/upgrade.rs index dd25c2097405..ba4863f7c457 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/upgrade.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/upgrade.rs @@ -265,7 +265,7 @@ fn get_forced_deploy_tx(deployment: &[ForceDeployment]) -> Transaction { .expect("failed to encode parameters"); let execute = Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, + contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), calldata, factory_deps: vec![], value: U256::zero(), @@ -315,7 +315,7 @@ fn get_complex_upgrade_tx( .unwrap(); let execute = Execute { - contract_address: COMPLEX_UPGRADER_ADDRESS, + contract_address: Some(COMPLEX_UPGRADER_ADDRESS), calldata: complex_upgrader_calldata, factory_deps: vec![], value: U256::zero(), diff --git a/core/lib/multivm/src/versions/vm_fast/transaction_data.rs b/core/lib/multivm/src/versions/vm_fast/transaction_data.rs index 502be0dc22cc..2ec86eb3ceaf 100644 --- a/core/lib/multivm/src/versions/vm_fast/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_fast/transaction_data.rs @@ -22,7 +22,7 @@ use crate::vm_latest::{ pub(crate) struct TransactionData { pub(crate) tx_type: u8, pub(crate) from: Address, - pub(crate) to: Address, + pub(crate) to: Option
, pub(crate) gas_limit: U256, pub(crate) pubdata_price_limit: U256, pub(crate) max_fee_per_gas: U256, @@ -169,7 +169,7 @@ impl TransactionData { encode(&[Token::Tuple(vec![ Token::Uint(U256::from_big_endian(&self.tx_type.to_be_bytes())), Token::Address(self.from), - Token::Address(self.to), + Token::Address(self.to.unwrap_or_default()), Token::Uint(self.gas_limit), Token::Uint(self.pubdata_price_limit), Token::Uint(self.max_fee_per_gas), @@ -305,7 +305,7 @@ mod tests { let transaction = TransactionData { tx_type: 113, from: Address::random(), - to: Address::random(), + to: Some(Address::random()), gas_limit: U256::from(1u32), pubdata_price_limit: U256::from(1u32), max_fee_per_gas: U256::from(1u32), diff --git a/core/lib/multivm/src/versions/vm_latest/tests/block_tip.rs b/core/lib/multivm/src/versions/vm_latest/tests/block_tip.rs index bed348afd2d9..02c73344a543 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/block_tip.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/block_tip.rs @@ -164,7 +164,7 @@ fn execute_test(test_data: L1MessengerTestData) -> TestStatistics { for (i, data) in txs_data.into_iter().enumerate() { let tx = account.get_l2_tx_for_execute( Execute { - contract_address: CONTRACT_FORCE_DEPLOYER_ADDRESS, + contract_address: Some(CONTRACT_FORCE_DEPLOYER_ADDRESS), calldata: data, value: U256::zero(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_latest/tests/call_tracer.rs b/core/lib/multivm/src/versions/vm_latest/tests/call_tracer.rs index a4d0eb2d17e2..df7a78855426 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/call_tracer.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/call_tracer.rs @@ -34,7 +34,7 @@ fn test_max_depth() { let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( Execute { - contract_address: address, + contract_address: Some(address), calldata: vec![], value: Default::default(), factory_deps: vec![], @@ -69,7 +69,7 @@ fn test_basic_behavior() { let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( Execute { - contract_address: address, + contract_address: Some(address), calldata: hex::decode(increment_by_6_calldata).unwrap(), value: Default::default(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_latest/tests/circuits.rs b/core/lib/multivm/src/versions/vm_latest/tests/circuits.rs index 02ec2dc58aaa..35412ee4d1bd 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/circuits.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/circuits.rs @@ -22,7 +22,7 @@ fn test_circuits() { let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( Execute { - contract_address: Address::random(), + contract_address: Some(Address::random()), calldata: Vec::new(), value: U256::from(1u8), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_latest/tests/code_oracle.rs b/core/lib/multivm/src/versions/vm_latest/tests/code_oracle.rs index 0708d67e27a3..b15ef7fde2bf 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/code_oracle.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/code_oracle.rs @@ -69,7 +69,7 @@ fn test_code_oracle() { // Firstly, let's ensure that the contract works. let tx1 = account.get_l2_tx_for_execute( Execute { - contract_address: precompiles_contract_address, + contract_address: Some(precompiles_contract_address), calldata: call_code_oracle_function .encode_input(&[ Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), @@ -93,7 +93,7 @@ fn test_code_oracle() { // the decommitted bytecode gets erased (it shouldn't). let tx2 = account.get_l2_tx_for_execute( Execute { - contract_address: precompiles_contract_address, + contract_address: Some(precompiles_contract_address), calldata: call_code_oracle_function .encode_input(&[ Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), @@ -169,7 +169,7 @@ fn test_code_oracle_big_bytecode() { // Firstly, let's ensure that the contract works. let tx1 = account.get_l2_tx_for_execute( Execute { - contract_address: precompiles_contract_address, + contract_address: Some(precompiles_contract_address), calldata: call_code_oracle_function .encode_input(&[ Token::FixedBytes(big_zkevm_bytecode_hash.0.to_vec()), @@ -251,7 +251,7 @@ fn refunds_in_code_oracle() { let tx = account.get_l2_tx_for_execute( Execute { - contract_address: precompiles_contract_address, + contract_address: Some(precompiles_contract_address), calldata: call_code_oracle_function .encode_input(&[ Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), diff --git a/core/lib/multivm/src/versions/vm_latest/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_latest/tests/gas_limit.rs index 34e1e2d25f31..cc9aac5bb91b 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/gas_limit.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/gas_limit.rs @@ -21,7 +21,10 @@ fn test_tx_gas_limit_offset() { let gas_limit = 9999.into(); let tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute::default(), + Execute { + contract_address: Some(Default::default()), + ..Default::default() + }, Some(Fee { gas_limit, ..Account::default_fee() diff --git a/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs index a42037a7f5be..ef19717a627c 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs @@ -82,7 +82,7 @@ fn test_get_used_contracts() { let account2 = Account::random(); let tx2 = account2.get_l1_tx( Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, + contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), calldata: big_calldata, value: Default::default(), factory_deps: vec![vec![1; 32]], @@ -208,7 +208,7 @@ fn execute_proxy_counter(gas: u32) -> (VmTester, U256, VmExecut let increment = proxy_counter_abi.function("increment").unwrap(); let increment_tx = account.get_l2_tx_for_execute( Execute { - contract_address: deploy_tx.address, + contract_address: Some(deploy_tx.address), calldata: increment .encode_input(&[Token::Uint(1.into()), Token::Uint(gas.into())]) .unwrap(), diff --git a/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs index dcb1bff06d09..0fc12848227e 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs @@ -173,7 +173,7 @@ fn test_l1_tx_execution_high_gas_limit() { let mut tx = account.get_l1_tx( Execute { - contract_address: L1_MESSENGER_ADDRESS, + contract_address: Some(L1_MESSENGER_ADDRESS), value: 0.into(), factory_deps: vec![], calldata, diff --git a/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs index 661286ca9697..91d78c69a931 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs @@ -64,7 +64,7 @@ fn test_nonce_holder() { let mut transaction_data: TransactionData = account .get_l2_tx_for_execute_with_nonce( Execute { - contract_address: account.address, + contract_address: Some(account.address), calldata: vec![12], value: Default::default(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_latest/tests/precompiles.rs b/core/lib/multivm/src/versions/vm_latest/tests/precompiles.rs index 2ab40faf22ca..9388d0161846 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/precompiles.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/precompiles.rs @@ -31,7 +31,7 @@ fn test_keccak() { let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( Execute { - contract_address: address, + contract_address: Some(address), calldata: hex::decode(keccak1000_calldata).unwrap(), value: Default::default(), factory_deps: vec![], @@ -75,7 +75,7 @@ fn test_sha256() { let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( Execute { - contract_address: address, + contract_address: Some(address), calldata: hex::decode(sha1000_calldata).unwrap(), value: Default::default(), factory_deps: vec![], @@ -112,7 +112,7 @@ fn test_ecrecover() { let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( Execute { - contract_address: account.address, + contract_address: Some(account.address), calldata: Vec::new(), value: Default::default(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_latest/tests/prestate_tracer.rs b/core/lib/multivm/src/versions/vm_latest/tests/prestate_tracer.rs index eb3104fd637a..8bf5e9919889 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/prestate_tracer.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/prestate_tracer.rs @@ -88,7 +88,7 @@ fn test_prestate_tracer_diff_mode() { //enter ether to contract to see difference in the balance post execution let tx0 = Execute { - contract_address: vm.test_contract.unwrap(), + contract_address: Some(vm.test_contract.unwrap()), calldata: Default::default(), value: U256::from(100000), factory_deps: vec![], @@ -98,7 +98,7 @@ fn test_prestate_tracer_diff_mode() { .push_transaction(account.get_l2_tx_for_execute(tx0.clone(), None)); let tx1 = Execute { - contract_address: deployed_address2, + contract_address: Some(deployed_address2), calldata: Default::default(), value: U256::from(200000), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs b/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs index ca058d672d2e..cc0085f20252 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs @@ -188,7 +188,7 @@ fn negative_pubdata_for_transaction() { let expensive_tx = vm.rich_accounts[0].get_l2_tx_for_execute( Execute { - contract_address: expensive_contract_address, + contract_address: Some(expensive_contract_address), calldata: expensive_function .encode_input(&[Token::Uint(10.into())]) .unwrap(), @@ -207,7 +207,7 @@ fn negative_pubdata_for_transaction() { // This transaction cleans all initial writes in the contract, thus having negative `pubdata` impact. let clean_up_tx = vm.rich_accounts[0].get_l2_tx_for_execute( Execute { - contract_address: expensive_contract_address, + contract_address: Some(expensive_contract_address), calldata: cleanup_function.encode_input(&[]).unwrap(), value: U256::zero(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs index 779e9b5c629d..cdd71354c8de 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs @@ -63,7 +63,7 @@ async fn test_require_eip712() { let tx = private_account.get_l2_tx_for_execute( Execute { - contract_address: account_abstraction.address, + contract_address: Some(account_abstraction.address), calldata: encoded_input, value: Default::default(), factory_deps: vec![], @@ -120,7 +120,7 @@ async fn test_require_eip712() { // // Now send the 'classic' EIP712 transaction let tx_712 = L2Tx::new( - beneficiary.address, + Some(beneficiary.address), vec![], Nonce(1), Fee { diff --git a/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs index 43e7baae3b2d..52e4d24bc0b4 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs @@ -92,7 +92,7 @@ fn test_vm_loadnext_rollbacks() { let loadnext_tx_1 = account.get_l2_tx_for_execute( Execute { - contract_address: address, + contract_address: Some(address), calldata: LoadnextContractExecutionParams { reads: 100, writes: 100, @@ -110,7 +110,7 @@ fn test_vm_loadnext_rollbacks() { let loadnext_tx_2 = account.get_l2_tx_for_execute( Execute { - contract_address: address, + contract_address: Some(address), calldata: LoadnextContractExecutionParams { reads: 100, writes: 100, diff --git a/core/lib/multivm/src/versions/vm_latest/tests/sekp256r1.rs b/core/lib/multivm/src/versions/vm_latest/tests/sekp256r1.rs index 6cc731a1387c..93be9506a3b0 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/sekp256r1.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/sekp256r1.rs @@ -48,7 +48,7 @@ fn test_sekp256r1() { let tx = account.get_l2_tx_for_execute( Execute { - contract_address: P256VERIFY_PRECOMPILE_ADDRESS, + contract_address: Some(P256VERIFY_PRECOMPILE_ADDRESS), calldata: [digest, encoded_r, encoded_s, x, y].concat(), value: U256::zero(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_latest/tests/storage.rs b/core/lib/multivm/src/versions/vm_latest/tests/storage.rs index 0fe0b0220fae..126d174a6468 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/storage.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/storage.rs @@ -50,7 +50,7 @@ fn test_storage(txs: Vec) -> u32 { let tx = account.get_l2_tx_for_execute( Execute { - contract_address: test_contract_address, + contract_address: Some(test_contract_address), calldata, value: 0.into(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_latest/tests/tracing_execution_error.rs b/core/lib/multivm/src/versions/vm_latest/tests/tracing_execution_error.rs index 58c5ef77dc42..2db37881352f 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/tracing_execution_error.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/tracing_execution_error.rs @@ -27,7 +27,7 @@ fn test_tracing_of_execution_errors() { let tx = account.get_l2_tx_for_execute( Execute { - contract_address, + contract_address: Some(contract_address), calldata: get_execute_error_calldata(), value: Default::default(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_latest/tests/transfer.rs b/core/lib/multivm/src/versions/vm_latest/tests/transfer.rs index 31f6c3291ef6..2c380623636a 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/transfer.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/transfer.rs @@ -73,7 +73,7 @@ fn test_send_or_transfer(test_option: TestOptions) { let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( Execute { - contract_address: test_contract_address, + contract_address: Some(test_contract_address), calldata, value: U256::zero(), factory_deps: vec![], @@ -169,7 +169,7 @@ fn test_reentrancy_protection_send_or_transfer(test_option: TestOptions) { let account = &mut vm.rich_accounts[0]; let tx1 = account.get_l2_tx_for_execute( Execute { - contract_address: reentrant_recipeint_address, + contract_address: Some(reentrant_recipeint_address), calldata: reentrant_recipient_abi .function("setX") .unwrap() @@ -190,7 +190,7 @@ fn test_reentrancy_protection_send_or_transfer(test_option: TestOptions) { let tx2 = account.get_l2_tx_for_execute( Execute { - contract_address: test_contract_address, + contract_address: Some(test_contract_address), calldata, value, factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_latest/tests/upgrade.rs b/core/lib/multivm/src/versions/vm_latest/tests/upgrade.rs index 7c3ebff4a77d..d85a504de40f 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/upgrade.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/upgrade.rs @@ -276,7 +276,7 @@ fn get_forced_deploy_tx(deployment: &[ForceDeployment]) -> Transaction { .expect("failed to encode parameters"); let execute = Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, + contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), calldata, factory_deps: vec![], value: U256::zero(), @@ -326,7 +326,7 @@ fn get_complex_upgrade_tx( .unwrap(); let execute = Execute { - contract_address: COMPLEX_UPGRADER_ADDRESS, + contract_address: Some(COMPLEX_UPGRADER_ADDRESS), calldata: complex_upgrader_calldata, factory_deps: vec![], value: U256::zero(), diff --git a/core/lib/multivm/src/versions/vm_latest/types/internals/transaction_data.rs b/core/lib/multivm/src/versions/vm_latest/types/internals/transaction_data.rs index 502be0dc22cc..2ec86eb3ceaf 100644 --- a/core/lib/multivm/src/versions/vm_latest/types/internals/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_latest/types/internals/transaction_data.rs @@ -22,7 +22,7 @@ use crate::vm_latest::{ pub(crate) struct TransactionData { pub(crate) tx_type: u8, pub(crate) from: Address, - pub(crate) to: Address, + pub(crate) to: Option
, pub(crate) gas_limit: U256, pub(crate) pubdata_price_limit: U256, pub(crate) max_fee_per_gas: U256, @@ -169,7 +169,7 @@ impl TransactionData { encode(&[Token::Tuple(vec![ Token::Uint(U256::from_big_endian(&self.tx_type.to_be_bytes())), Token::Address(self.from), - Token::Address(self.to), + Token::Address(self.to.unwrap_or_default()), Token::Uint(self.gas_limit), Token::Uint(self.pubdata_price_limit), Token::Uint(self.max_fee_per_gas), @@ -305,7 +305,7 @@ mod tests { let transaction = TransactionData { tx_type: 113, from: Address::random(), - to: Address::random(), + to: Some(Address::random()), gas_limit: U256::from(1u32), pubdata_price_limit: U256::from(1u32), max_fee_per_gas: U256::from(1u32), diff --git a/core/lib/multivm/src/versions/vm_m5/test_utils.rs b/core/lib/multivm/src/versions/vm_m5/test_utils.rs index 785eb49835f1..d7c0dfb9f6d0 100644 --- a/core/lib/multivm/src/versions/vm_m5/test_utils.rs +++ b/core/lib/multivm/src/versions/vm_m5/test_utils.rs @@ -151,7 +151,7 @@ pub fn get_create_execute(code: &[u8], calldata: &[u8]) -> Execute { .expect("failed to encode parameters"); Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, + contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), calldata, factory_deps: vec![code.to_vec()], value: U256::zero(), diff --git a/core/lib/multivm/src/versions/vm_m5/transaction_data.rs b/core/lib/multivm/src/versions/vm_m5/transaction_data.rs index 7ef739fd5bf5..b64e3f770185 100644 --- a/core/lib/multivm/src/versions/vm_m5/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_m5/transaction_data.rs @@ -22,7 +22,7 @@ const L1_TX_TYPE: u8 = 255; pub struct TransactionData { pub tx_type: u8, pub from: Address, - pub to: Address, + pub to: Option
, pub gas_limit: U256, pub pubdata_price_limit: U256, pub max_fee_per_gas: U256, @@ -144,7 +144,7 @@ impl TransactionData { encode(&[Token::Tuple(vec![ Token::Uint(U256::from_big_endian(&self.tx_type.to_be_bytes())), Token::Address(self.from), - Token::Address(self.to), + Token::Address(self.to.unwrap_or_default()), Token::Uint(self.gas_limit), Token::Uint(self.pubdata_price_limit), Token::Uint(self.max_fee_per_gas), @@ -479,7 +479,7 @@ mod tests { let transaction = TransactionData { tx_type: 113, from: Address::random(), - to: Address::random(), + to: Some(Address::random()), gas_limit: U256::from(1u32), pubdata_price_limit: U256::from(1u32), max_fee_per_gas: U256::from(1u32), diff --git a/core/lib/multivm/src/versions/vm_m6/test_utils.rs b/core/lib/multivm/src/versions/vm_m6/test_utils.rs index ecad7d911b40..4bd39bc56dd4 100644 --- a/core/lib/multivm/src/versions/vm_m6/test_utils.rs +++ b/core/lib/multivm/src/versions/vm_m6/test_utils.rs @@ -151,7 +151,7 @@ pub fn get_create_execute(code: &[u8], calldata: &[u8]) -> Execute { .expect("failed to encode parameters"); Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, + contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), calldata, factory_deps: vec![code.to_vec()], value: U256::zero(), diff --git a/core/lib/multivm/src/versions/vm_m6/transaction_data.rs b/core/lib/multivm/src/versions/vm_m6/transaction_data.rs index 99ce4671c29b..a8f80ea3255e 100644 --- a/core/lib/multivm/src/versions/vm_m6/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_m6/transaction_data.rs @@ -23,7 +23,7 @@ pub(crate) const L1_TX_TYPE: u8 = 255; pub struct TransactionData { pub tx_type: u8, pub from: Address, - pub to: Address, + pub to: Option
, pub gas_limit: U256, pub pubdata_price_limit: U256, pub max_fee_per_gas: U256, @@ -171,7 +171,7 @@ impl TransactionData { encode(&[Token::Tuple(vec![ Token::Uint(U256::from_big_endian(&self.tx_type.to_be_bytes())), Token::Address(self.from), - Token::Address(self.to), + Token::Address(self.to.unwrap_or_default()), Token::Uint(self.gas_limit), Token::Uint(self.pubdata_price_limit), Token::Uint(self.max_fee_per_gas), @@ -592,7 +592,7 @@ mod tests { let transaction = TransactionData { tx_type: 113, from: Address::random(), - to: Address::random(), + to: Some(Address::random()), gas_limit: U256::from(1u32), pubdata_price_limit: U256::from(1u32), max_fee_per_gas: U256::from(1u32), diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/gas_limit.rs index 0ea1669cf217..1ff6ce12557f 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/gas_limit.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/gas_limit.rs @@ -21,10 +21,8 @@ fn test_tx_gas_limit_offset() { let gas_limit = 9999.into(); let tx = vm.rich_accounts[0].get_l2_tx_for_execute( Execute { - contract_address: Default::default(), - calldata: vec![], - value: Default::default(), - factory_deps: None, + contract_address: Some(Default::default()), + ..Default::default() }, Some(Fee { gas_limit, diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/types/internals/transaction_data.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/types/internals/transaction_data.rs index 205090ba633e..22ab09296c91 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/types/internals/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/types/internals/transaction_data.rs @@ -22,7 +22,7 @@ use crate::vm_refunds_enhancement::{ pub(crate) struct TransactionData { pub(crate) tx_type: u8, pub(crate) from: Address, - pub(crate) to: Address, + pub(crate) to: Option
, pub(crate) gas_limit: U256, pub(crate) pubdata_price_limit: U256, pub(crate) max_fee_per_gas: U256, @@ -169,7 +169,7 @@ impl TransactionData { encode(&[Token::Tuple(vec![ Token::Uint(U256::from_big_endian(&self.tx_type.to_be_bytes())), Token::Address(self.from), - Token::Address(self.to), + Token::Address(self.to.unwrap_or_default()), Token::Uint(self.gas_limit), Token::Uint(self.pubdata_price_limit), Token::Uint(self.max_fee_per_gas), @@ -325,7 +325,7 @@ mod tests { let transaction = TransactionData { tx_type: 113, from: Address::random(), - to: Address::random(), + to: Some(Address::random()), gas_limit: U256::from(1u32), pubdata_price_limit: U256::from(1u32), max_fee_per_gas: U256::from(1u32), diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/gas_limit.rs index 01ebe4c0d225..e51b8cab570e 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/gas_limit.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/gas_limit.rs @@ -21,10 +21,8 @@ fn test_tx_gas_limit_offset() { let gas_limit = 9999.into(); let tx = vm.rich_accounts[0].get_l2_tx_for_execute( Execute { - contract_address: Default::default(), - calldata: vec![], - value: Default::default(), - factory_deps: None, + contract_address: Some(Default::default()), + ..Default::default() }, Some(Fee { gas_limit, diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/types/internals/transaction_data.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/types/internals/transaction_data.rs index b42950399f61..c96004163a65 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/types/internals/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/types/internals/transaction_data.rs @@ -22,7 +22,7 @@ use crate::vm_virtual_blocks::{ pub(crate) struct TransactionData { pub(crate) tx_type: u8, pub(crate) from: Address, - pub(crate) to: Address, + pub(crate) to: Option
, pub(crate) gas_limit: U256, pub(crate) pubdata_price_limit: U256, pub(crate) max_fee_per_gas: U256, @@ -169,7 +169,7 @@ impl TransactionData { encode(&[Token::Tuple(vec![ Token::Uint(U256::from_big_endian(&self.tx_type.to_be_bytes())), Token::Address(self.from), - Token::Address(self.to), + Token::Address(self.to.unwrap_or_default()), Token::Uint(self.gas_limit), Token::Uint(self.pubdata_price_limit), Token::Uint(self.max_fee_per_gas), @@ -325,7 +325,7 @@ mod tests { let transaction = TransactionData { tx_type: 113, from: Address::random(), - to: Address::random(), + to: Some(Address::random()), gas_limit: U256::from(1u32), pubdata_price_limit: U256::from(1u32), max_fee_per_gas: U256::from(1u32), diff --git a/core/lib/types/src/l1/mod.rs b/core/lib/types/src/l1/mod.rs index 05f08987a2d3..e8144c75db2e 100644 --- a/core/lib/types/src/l1/mod.rs +++ b/core/lib/types/src/l1/mod.rs @@ -274,7 +274,9 @@ impl From for abi::NewPriorityRequest { transaction: abi::L2CanonicalTransaction { tx_type: PRIORITY_OPERATION_L2_TX_TYPE.into(), from: address_to_u256(&t.common_data.sender), - to: address_to_u256(&t.execute.contract_address), + // Unwrap used here because the contract address should always be present for L1 transactions. + // TODO: Consider restricting the contract address to not be optional in L1Tx. + to: address_to_u256(&t.execute.contract_address.unwrap()), gas_limit: t.common_data.gas_limit, gas_per_pubdata_byte_limit: t.common_data.gas_per_pubdata_limit, max_fee_per_gas: t.common_data.max_fee_per_gas, @@ -345,7 +347,7 @@ impl TryFrom for L1Tx { }; let execute = Execute { - contract_address: u256_to_account_address(&req.transaction.to), + contract_address: Some(u256_to_account_address(&req.transaction.to)), calldata: req.transaction.data, factory_deps: req.factory_deps, value: req.transaction.value, diff --git a/core/lib/types/src/l2/mod.rs b/core/lib/types/src/l2/mod.rs index 5a5276407529..036d2a7a036d 100644 --- a/core/lib/types/src/l2/mod.rs +++ b/core/lib/types/src/l2/mod.rs @@ -153,7 +153,7 @@ pub struct L2Tx { impl L2Tx { #[allow(clippy::too_many_arguments)] pub fn new( - contract_address: Address, + contract_address: Option
, calldata: Vec, nonce: Nonce, fee: Fee, @@ -185,7 +185,7 @@ impl L2Tx { #[allow(clippy::too_many_arguments)] pub fn new_signed( - contract_address: Address, + contract_address: Option
, calldata: Vec, nonce: Nonce, fee: Fee, @@ -232,7 +232,7 @@ impl L2Tx { } /// Returns recipient account of the transaction. - pub fn recipient_account(&self) -> Address { + pub fn recipient_account(&self) -> Option
{ self.execute.contract_address } @@ -324,7 +324,7 @@ impl From for TransactionRequest { let mut base_tx_req = TransactionRequest { nonce: U256::from(tx.common_data.nonce.0), from: Some(tx.common_data.initiator_address), - to: Some(tx.recipient_account()), + to: tx.recipient_account(), value: tx.execute.value, gas_price: tx.common_data.fee.max_fee_per_gas, max_priority_fee_per_gas: None, @@ -400,7 +400,7 @@ impl From for api::Transaction { chain_id: U256::from(tx.common_data.extract_chain_id().unwrap_or_default()), nonce: U256::from(tx.common_data.nonce.0), from: Some(tx.common_data.initiator_address), - to: Some(tx.recipient_account()), + to: tx.recipient_account(), value: tx.execute.value, gas_price: Some(tx.common_data.fee.max_fee_per_gas), max_priority_fee_per_gas: Some(tx.common_data.fee.max_priority_fee_per_gas), diff --git a/core/lib/types/src/lib.rs b/core/lib/types/src/lib.rs index 402e16afd435..86b2e3f03d51 100644 --- a/core/lib/types/src/lib.rs +++ b/core/lib/types/src/lib.rs @@ -104,7 +104,7 @@ impl Eq for Transaction {} impl Transaction { /// Returns recipient account of the transaction. - pub fn recipient_account(&self) -> Address { + pub fn recipient_account(&self) -> Option
{ self.execute.contract_address } @@ -253,7 +253,7 @@ impl TryFrom for abi::Transaction { tx: abi::L2CanonicalTransaction { tx_type: PRIORITY_OPERATION_L2_TX_TYPE.into(), from: address_to_u256(&data.sender), - to: address_to_u256(&tx.execute.contract_address), + to: address_to_u256(&tx.execute.contract_address.unwrap_or_default()), gas_limit: data.gas_limit, gas_per_pubdata_byte_limit: data.gas_per_pubdata_limit, max_fee_per_gas: data.max_fee_per_gas, @@ -284,7 +284,7 @@ impl TryFrom for abi::Transaction { tx: abi::L2CanonicalTransaction { tx_type: PROTOCOL_UPGRADE_TX_TYPE.into(), from: address_to_u256(&data.sender), - to: address_to_u256(&tx.execute.contract_address), + to: address_to_u256(&tx.execute.contract_address.unwrap_or_default()), gas_limit: data.gas_limit, gas_per_pubdata_byte_limit: data.gas_per_pubdata_limit, max_fee_per_gas: data.max_fee_per_gas, @@ -377,7 +377,7 @@ impl TryFrom for Transaction { unknown_type => anyhow::bail!("unknown tx type {unknown_type}"), }, execute: Execute { - contract_address: u256_to_account_address(&tx.to), + contract_address: Some(u256_to_account_address(&tx.to)), calldata: tx.data, factory_deps, value: tx.value, diff --git a/core/lib/types/src/transaction_request.rs b/core/lib/types/src/transaction_request.rs index c71e6e4206c5..5f26b1d6a6a5 100644 --- a/core/lib/types/src/transaction_request.rs +++ b/core/lib/types/src/transaction_request.rs @@ -108,8 +108,8 @@ impl CallRequestBuilder { } /// Set to address (None allowed for eth_estimateGas) - pub fn to(mut self, to: Address) -> Self { - self.call_request.to = Some(to); + pub fn to(mut self, to: Option
) -> Self { + self.call_request.to = to; self } @@ -817,10 +817,13 @@ impl L2Tx { let meta = value.eip712_meta.take().unwrap_or_default(); validate_factory_deps(&meta.factory_deps)?; + // TODO: Remove this check when evm equivalence gets enabled + if value.to.is_none() { + return Err(SerializationTransactionError::ToAddressIsNull); + } + let mut tx = L2Tx::new( - value - .to - .ok_or(SerializationTransactionError::ToAddressIsNull)?, + value.to, value.input.0.clone(), nonce, fee, diff --git a/core/lib/types/src/tx/execute.rs b/core/lib/types/src/tx/execute.rs index 03762040a6b8..c133261bc232 100644 --- a/core/lib/types/src/tx/execute.rs +++ b/core/lib/types/src/tx/execute.rs @@ -15,7 +15,7 @@ use crate::{ethabi, Address, EIP712TypedStructure, StructBuilder, H256, U256}; #[derive(Serialize, Deserialize)] #[serde(rename_all = "camelCase")] struct ExecuteSerde { - contract_address: Address, + contract_address: Option
, #[serde(with = "ZeroPrefixHexSerde")] calldata: Vec, value: U256, @@ -25,7 +25,7 @@ struct ExecuteSerde { /// `Execute` transaction executes a previously deployed smart contract in the L2 rollup. #[derive(Clone, Default, PartialEq)] pub struct Execute { - pub contract_address: Address, + pub contract_address: Option
, pub calldata: Vec, pub value: U256, /// Factory dependencies: list of contract bytecodes associated with the deploy transaction. @@ -72,7 +72,9 @@ impl EIP712TypedStructure for Execute { const TYPE_NAME: &'static str = "Transaction"; fn build_structure(&self, builder: &mut BUILDER) { - builder.add_member("to", &U256::from(self.contract_address.as_bytes())); + if let Some(contract_address) = self.contract_address { + builder.add_member("to", &contract_address); + } builder.add_member("value", &self.value); builder.add_member("data", &self.calldata.as_slice()); // Factory deps are not included into the transaction signature, since they are parsed from the diff --git a/core/node/api_server/src/execution_sandbox/tests.rs b/core/node/api_server/src/execution_sandbox/tests.rs index 35103779a49e..79c5a7330384 100644 --- a/core/node/api_server/src/execution_sandbox/tests.rs +++ b/core/node/api_server/src/execution_sandbox/tests.rs @@ -236,7 +236,7 @@ fn create_transfer(fee_per_gas: u64, gas_per_pubdata: u64) -> L2Tx { gas_per_pubdata_limit: gas_per_pubdata.into(), }; L2Tx::new_signed( - Address::random(), + Some(Address::random()), vec![], Nonce(0), fee, diff --git a/core/node/consensus/src/registry/testonly.rs b/core/node/consensus/src/registry/testonly.rs index a0c55a557feb..07a87e3b676e 100644 --- a/core/node/consensus/src/registry/testonly.rs +++ b/core/node/consensus/src/registry/testonly.rs @@ -13,7 +13,7 @@ pub(crate) fn make_tx( ) -> Transaction { account.get_l2_tx_for_execute( Execute { - contract_address: *address, + contract_address: Some(*address), calldata: call.calldata().unwrap(), value: U256::zero(), factory_deps: vec![], diff --git a/core/node/consensus/src/vm.rs b/core/node/consensus/src/vm.rs index 11b6b5c67e3b..c93cafc09f9c 100644 --- a/core/node/consensus/src/vm.rs +++ b/core/node/consensus/src/vm.rs @@ -56,7 +56,7 @@ impl VM { call: abi::Call, ) -> ctx::Result { let tx = L2Tx::new( - *address, + Some(*address), call.calldata().context("call.calldata()")?, Nonce(0), Fee { diff --git a/core/node/eth_watch/src/tests.rs b/core/node/eth_watch/src/tests.rs index 7ae3b5494e98..e6e343f50bca 100644 --- a/core/node/eth_watch/src/tests.rs +++ b/core/node/eth_watch/src/tests.rs @@ -147,7 +147,7 @@ impl EthClient for MockEthClient { fn build_l1_tx(serial_id: u64, eth_block: u64) -> L1Tx { let tx = L1Tx { execute: Execute { - contract_address: Address::repeat_byte(0x11), + contract_address: Some(Address::repeat_byte(0x11)), calldata: vec![1, 2, 3], factory_deps: vec![], value: U256::zero(), @@ -178,7 +178,7 @@ fn build_l1_tx(serial_id: u64, eth_block: u64) -> L1Tx { fn build_upgrade_tx(id: ProtocolVersionId, eth_block: u64) -> ProtocolUpgradeTx { let tx = ProtocolUpgradeTx { execute: Execute { - contract_address: Address::repeat_byte(0x11), + contract_address: Some(Address::repeat_byte(0x11)), calldata: vec![1, 2, 3], factory_deps: vec![], value: U256::zero(), diff --git a/core/node/state_keeper/src/executor/tests/tester.rs b/core/node/state_keeper/src/executor/tests/tester.rs index 8256435f2f5b..d524d1a20dd7 100644 --- a/core/node/state_keeper/src/executor/tests/tester.rs +++ b/core/node/state_keeper/src/executor/tests/tester.rs @@ -396,7 +396,7 @@ impl AccountLoadNextExecutable for Account { self.get_l2_tx_for_execute( Execute { - contract_address: address, + contract_address: Some(address), calldata: LoadnextContractExecutionParams { reads: 100, writes: writes as usize, @@ -432,7 +432,7 @@ impl AccountLoadNextExecutable for Account { self.get_l2_tx_for_execute( Execute { - contract_address: address, + contract_address: Some(address), calldata, value: Default::default(), factory_deps: vec![], diff --git a/core/node/state_keeper/src/testonly/mod.rs b/core/node/state_keeper/src/testonly/mod.rs index 23aec8af49fb..edcf3ccc4f5c 100644 --- a/core/node/state_keeper/src/testonly/mod.rs +++ b/core/node/state_keeper/src/testonly/mod.rs @@ -131,7 +131,7 @@ pub fn fee(gas_limit: u32) -> Fee { pub fn l2_transaction(account: &mut Account, gas_limit: u32) -> Transaction { account.get_l2_tx_for_execute( Execute { - contract_address: Address::random(), + contract_address: Some(Address::random()), calldata: vec![], value: Default::default(), factory_deps: vec![], @@ -143,7 +143,7 @@ pub fn l2_transaction(account: &mut Account, gas_limit: u32) -> Transaction { pub fn l1_transaction(account: &mut Account, serial_id: PriorityOpId) -> Transaction { account.get_l1_tx( Execute { - contract_address: Address::random(), + contract_address: Some(Address::random()), value: Default::default(), calldata: vec![], factory_deps: vec![], diff --git a/core/node/test_utils/src/lib.rs b/core/node/test_utils/src/lib.rs index acb65bf1634d..b9984b782111 100644 --- a/core/node/test_utils/src/lib.rs +++ b/core/node/test_utils/src/lib.rs @@ -138,7 +138,7 @@ pub fn create_l2_transaction(fee_per_gas: u64, gas_per_pubdata: u64) -> L2Tx { gas_per_pubdata_limit: gas_per_pubdata.into(), }; let mut tx = L2Tx::new_signed( - Address::random(), + Some(Address::random()), vec![], Nonce(0), fee, diff --git a/core/node/vm_runner/src/tests/mod.rs b/core/node/vm_runner/src/tests/mod.rs index 9fe9e99e92c8..53bef106a8f4 100644 --- a/core/node/vm_runner/src/tests/mod.rs +++ b/core/node/vm_runner/src/tests/mod.rs @@ -202,7 +202,7 @@ pub fn create_l2_transaction( }; let tx = account.get_l2_tx_for_execute( Execute { - contract_address: Address::random(), + contract_address: Some(Address::random()), calldata: vec![], value: Default::default(), factory_deps: vec![], diff --git a/core/tests/loadnext/src/sdk/operations/deploy_contract.rs b/core/tests/loadnext/src/sdk/operations/deploy_contract.rs index 161d156a53e9..67e877ae8efb 100644 --- a/core/tests/loadnext/src/sdk/operations/deploy_contract.rs +++ b/core/tests/loadnext/src/sdk/operations/deploy_contract.rs @@ -145,7 +145,7 @@ where let mut factory_deps = self.factory_deps.clone().unwrap_or_default(); factory_deps.push(bytecode); let l2_tx = L2Tx::new( - CONTRACT_DEPLOYER_ADDRESS, + Some(CONTRACT_DEPLOYER_ADDRESS), Execute::encode_deploy_params_create(Default::default(), main_contract_hash, calldata), Nonce(0), Default::default(), diff --git a/core/tests/loadnext/src/sdk/operations/execute_contract.rs b/core/tests/loadnext/src/sdk/operations/execute_contract.rs index d5fe57c7b79f..627e889ed012 100644 --- a/core/tests/loadnext/src/sdk/operations/execute_contract.rs +++ b/core/tests/loadnext/src/sdk/operations/execute_contract.rs @@ -144,7 +144,7 @@ where .unwrap_or_default(); let execute = L2Tx::new( - contract_address, + Some(contract_address), calldata, Nonce(0), Default::default(), diff --git a/core/tests/loadnext/src/sdk/operations/transfer.rs b/core/tests/loadnext/src/sdk/operations/transfer.rs index 94ee3aeb6082..651fabeb788b 100644 --- a/core/tests/loadnext/src/sdk/operations/transfer.rs +++ b/core/tests/loadnext/src/sdk/operations/transfer.rs @@ -153,7 +153,7 @@ where let tx = if token.is_zero() || token == L2_BASE_TOKEN_ADDRESS { // ETH estimate Execute { - contract_address: to, + contract_address: Some(to), calldata: Default::default(), factory_deps: vec![], value: amount, @@ -161,7 +161,7 @@ where } else { // ERC-20 estimate Execute { - contract_address: token, + contract_address: Some(token), calldata: create_transfer_calldata(to, amount), factory_deps: vec![], value: Default::default(), diff --git a/core/tests/loadnext/src/sdk/signer.rs b/core/tests/loadnext/src/sdk/signer.rs index 0f4b1cf29717..6f98f674ed95 100644 --- a/core/tests/loadnext/src/sdk/signer.rs +++ b/core/tests/loadnext/src/sdk/signer.rs @@ -51,7 +51,7 @@ impl Signer { // Sign Ether transfer if token.is_zero() || token == L2_BASE_TOKEN_ADDRESS { let mut transfer = L2Tx::new( - to, + Some(to), Default::default(), nonce, fee, @@ -73,7 +73,7 @@ impl Signer { // Sign ERC-20 transfer let data = create_transfer_calldata(to, amount); let mut transfer = L2Tx::new( - token, + Some(token), data, nonce, fee, @@ -122,7 +122,7 @@ impl Signer { paymaster_params: PaymasterParams, ) -> Result { let mut execute_contract = L2Tx::new( - contract, + Some(contract), calldata, nonce, fee, diff --git a/core/tests/test_account/src/lib.rs b/core/tests/test_account/src/lib.rs index 28e3d609e63d..d0c97abab729 100644 --- a/core/tests/test_account/src/lib.rs +++ b/core/tests/test_account/src/lib.rs @@ -129,7 +129,7 @@ impl Account { .expect("failed to encode parameters"); let execute = Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, + contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), calldata, factory_deps, value: U256::zero(), @@ -158,7 +158,7 @@ impl Account { tx: abi::L2CanonicalTransaction { tx_type: PRIORITY_OPERATION_L2_TX_TYPE.into(), from: address_to_u256(&self.address), - to: address_to_u256(&execute.contract_address), + to: address_to_u256(&execute.contract_address.unwrap_or_default()), gas_limit, gas_per_pubdata_byte_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), max_fee_per_gas, @@ -216,7 +216,7 @@ impl Account { .expect("failed to encode parameters"); let execute = Execute { - contract_address: address, + contract_address: Some(address), calldata, value: value.unwrap_or_default(), factory_deps: vec![], @@ -235,7 +235,7 @@ impl Account { ) -> Transaction { let calldata = params.to_bytes(); let execute = Execute { - contract_address: address, + contract_address: Some(address), calldata, value: U256::zero(), factory_deps: vec![], diff --git a/core/tests/vm-benchmark/src/transaction.rs b/core/tests/vm-benchmark/src/transaction.rs index 90e1c6360b81..d5fedfa4df94 100644 --- a/core/tests/vm-benchmark/src/transaction.rs +++ b/core/tests/vm-benchmark/src/transaction.rs @@ -47,7 +47,7 @@ pub fn get_deploy_tx_with_gas_limit(code: &[u8], gas_limit: u32, nonce: u32) -> .collect(); let mut signed = L2Tx::new_signed( - CONTRACT_DEPLOYER_ADDRESS, + Some(CONTRACT_DEPLOYER_ADDRESS), calldata, Nonce(nonce), tx_fee(gas_limit), @@ -76,7 +76,7 @@ fn tx_fee(gas_limit: u32) -> Fee { pub fn get_transfer_tx(nonce: u32) -> Transaction { let mut signed = L2Tx::new_signed( - PRIVATE_KEY.address(), + Some(PRIVATE_KEY.address()), vec![], // calldata Nonce(nonce), tx_fee(1_000_000), @@ -109,7 +109,7 @@ pub fn get_load_test_deploy_tx() -> Transaction { factory_deps.push(LOAD_TEST_CONTRACT.bytecode.clone()); let mut signed = L2Tx::new_signed( - CONTRACT_DEPLOYER_ADDRESS, + Some(CONTRACT_DEPLOYER_ADDRESS), create_calldata, Nonce(0), tx_fee(100_000_000), @@ -147,7 +147,7 @@ pub fn get_load_test_tx(nonce: u32, gas_limit: u32, params: LoadTestParams) -> T .expect("cannot encode `execute` inputs"); let mut signed = L2Tx::new_signed( - *LOAD_TEST_CONTRACT_ADDRESS, + Some(*LOAD_TEST_CONTRACT_ADDRESS), calldata, Nonce(nonce), tx_fee(gas_limit), From 36e534091f73f4e3ce86e322fb20842cda6a6b61 Mon Sep 17 00:00:00 2001 From: Artem Fomiuk <88630083+Artemka374@users.noreply.github.com> Date: Thu, 19 Sep 2024 12:16:14 +0300 Subject: [PATCH 098/116] feat(prover): WitnessGenerator refactoring #2 (#2899) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Introduce WitnessGenerator trait Rename some methods ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- .../bin/witness_generator/src/artifacts.rs | 24 +-- .../src/basic_circuits/artifacts.rs | 30 ++- .../src/basic_circuits/job_processor.rs | 57 +++-- .../src/basic_circuits/mod.rs | 129 ++++++------ .../src/leaf_aggregation/artifacts.rs | 46 ++--- .../src/leaf_aggregation/job_processor.rs | 25 ++- .../src/leaf_aggregation/mod.rs | 140 +++++++------ .../crates/bin/witness_generator/src/lib.rs | 1 + .../src/node_aggregation/artifacts.rs | 42 ++-- .../src/node_aggregation/job_processor.rs | 25 ++- .../src/node_aggregation/mod.rs | 95 +++++---- .../src/recursion_tip/artifacts.rs | 24 +-- .../src/recursion_tip/job_processor.rs | 20 +- .../src/recursion_tip/mod.rs | 194 ++++++++++-------- .../src/scheduler/artifacts.rs | 24 +-- .../src/scheduler/job_processor.rs | 23 ++- .../witness_generator/src/scheduler/mod.rs | 140 +++++++------ .../crates/bin/witness_generator/src/utils.rs | 22 -- .../src/witness_generator.rs | 25 +++ .../bin/witness_generator/tests/basic_test.rs | 46 +++-- 20 files changed, 602 insertions(+), 530 deletions(-) create mode 100644 prover/crates/bin/witness_generator/src/witness_generator.rs diff --git a/prover/crates/bin/witness_generator/src/artifacts.rs b/prover/crates/bin/witness_generator/src/artifacts.rs index f509d3b2f64a..7c444da047b2 100644 --- a/prover/crates/bin/witness_generator/src/artifacts.rs +++ b/prover/crates/bin/witness_generator/src/artifacts.rs @@ -6,45 +6,33 @@ use zksync_prover_dal::{ConnectionPool, Prover}; #[derive(Debug)] pub(crate) struct AggregationBlobUrls { - pub aggregations_urls: String, + pub aggregation_urls: String, pub circuit_ids_and_urls: Vec<(u8, String)>, } -#[derive(Debug)] -pub(crate) struct SchedulerBlobUrls { - pub circuit_ids_and_urls: Vec<(u8, String)>, - pub closed_form_inputs_and_urls: Vec<(u8, String, usize)>, - pub scheduler_witness_url: String, -} - -pub(crate) enum BlobUrls { - Url(String), - Aggregation(AggregationBlobUrls), - Scheduler(SchedulerBlobUrls), -} - #[async_trait] pub(crate) trait ArtifactsManager { type InputMetadata; type InputArtifacts; type OutputArtifacts; + type BlobUrls; async fn get_artifacts( metadata: &Self::InputMetadata, object_store: &dyn ObjectStore, ) -> anyhow::Result; - async fn save_artifacts( + async fn save_to_bucket( job_id: u32, artifacts: Self::OutputArtifacts, object_store: &dyn ObjectStore, - ) -> BlobUrls; + ) -> Self::BlobUrls; - async fn update_database( + async fn save_to_database( connection_pool: &ConnectionPool, job_id: u32, started_at: Instant, - blob_urls: BlobUrls, + blob_urls: Self::BlobUrls, artifacts: Self::OutputArtifacts, ) -> anyhow::Result<()>; } diff --git a/prover/crates/bin/witness_generator/src/basic_circuits/artifacts.rs b/prover/crates/bin/witness_generator/src/basic_circuits/artifacts.rs index 3447659f8296..aa85d185e66b 100644 --- a/prover/crates/bin/witness_generator/src/basic_circuits/artifacts.rs +++ b/prover/crates/bin/witness_generator/src/basic_circuits/artifacts.rs @@ -8,7 +8,7 @@ use zksync_prover_fri_utils::get_recursive_layer_circuit_id_for_base_layer; use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; use crate::{ - artifacts::{ArtifactsManager, BlobUrls}, + artifacts::ArtifactsManager, basic_circuits::{BasicCircuitArtifacts, BasicWitnessGenerator, BasicWitnessGeneratorJob}, utils::SchedulerPartialInputWrapper, }; @@ -18,6 +18,7 @@ impl ArtifactsManager for BasicWitnessGenerator { type InputMetadata = L1BatchNumber; type InputArtifacts = BasicWitnessGeneratorJob; type OutputArtifacts = BasicCircuitArtifacts; + type BlobUrls = String; async fn get_artifacts( metadata: &Self::InputMetadata, @@ -31,38 +32,31 @@ impl ArtifactsManager for BasicWitnessGenerator { }) } - async fn save_artifacts( + async fn save_to_bucket( job_id: u32, artifacts: Self::OutputArtifacts, object_store: &dyn ObjectStore, - ) -> BlobUrls { + ) -> String { let aux_output_witness_wrapper = AuxOutputWitnessWrapper(artifacts.aux_output_witness); object_store .put(L1BatchNumber(job_id), &aux_output_witness_wrapper) .await .unwrap(); let wrapper = SchedulerPartialInputWrapper(artifacts.scheduler_witness); - let url = object_store + object_store .put(L1BatchNumber(job_id), &wrapper) .await - .unwrap(); - - BlobUrls::Url(url) + .unwrap() } #[tracing::instrument(skip_all, fields(l1_batch = %job_id))] - async fn update_database( + async fn save_to_database( connection_pool: &ConnectionPool, job_id: u32, started_at: Instant, - blob_urls: BlobUrls, - _artifacts: Self::OutputArtifacts, + blob_urls: String, + artifacts: Self::OutputArtifacts, ) -> anyhow::Result<()> { - let blob_urls = match blob_urls { - BlobUrls::Scheduler(blobs) => blobs, - _ => unreachable!(), - }; - let mut connection = connection_pool .connection() .await @@ -79,7 +73,7 @@ impl ArtifactsManager for BasicWitnessGenerator { .fri_prover_jobs_dal() .insert_prover_jobs( L1BatchNumber(job_id), - blob_urls.circuit_ids_and_urls, + artifacts.circuit_urls, AggregationRound::BasicCircuits, 0, protocol_version_id, @@ -89,8 +83,8 @@ impl ArtifactsManager for BasicWitnessGenerator { .fri_witness_generator_dal() .create_aggregation_jobs( L1BatchNumber(job_id), - &blob_urls.closed_form_inputs_and_urls, - &blob_urls.scheduler_witness_url, + &artifacts.queue_urls, + &blob_urls, get_recursive_layer_circuit_id_for_base_layer, protocol_version_id, ) diff --git a/prover/crates/bin/witness_generator/src/basic_circuits/job_processor.rs b/prover/crates/bin/witness_generator/src/basic_circuits/job_processor.rs index 08732689e3a6..50e747b1ce1b 100644 --- a/prover/crates/bin/witness_generator/src/basic_circuits/job_processor.rs +++ b/prover/crates/bin/witness_generator/src/basic_circuits/job_processor.rs @@ -4,13 +4,15 @@ use anyhow::Context as _; use tracing::Instrument; use zksync_prover_dal::ProverDal; use zksync_prover_fri_types::{get_current_pod_name, AuxOutputWitnessWrapper}; +use zksync_prover_keystore::keystore::Keystore; use zksync_queued_job_processor::{async_trait, JobProcessor}; use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; use crate::{ - artifacts::{ArtifactsManager, BlobUrls, SchedulerBlobUrls}, + artifacts::ArtifactsManager, basic_circuits::{BasicCircuitArtifacts, BasicWitnessGenerator, BasicWitnessGeneratorJob}, metrics::WITNESS_GENERATOR_METRICS, + witness_generator::WitnessGenerator, }; #[async_trait] @@ -35,19 +37,15 @@ impl JobProcessor for BasicWitnessGenerator { ) .await { - Some(block_number) => { - tracing::info!( - "Processing FRI basic witness-gen for block {}", - block_number - ); - let started_at = Instant::now(); - let job = Self::get_artifacts(&block_number, &*self.object_store).await?; - - WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::BasicCircuits.into()] - .observe(started_at.elapsed()); - - Ok(Some((block_number, job))) - } + Some(block_number) => Ok(Some(( + block_number, + ::prepare_job( + block_number, + &*self.object_store, + Keystore::locate(), // todo: this should be removed + ) + .await?, + ))), None => Ok(None), } } @@ -73,11 +71,15 @@ impl JobProcessor for BasicWitnessGenerator { let max_circuits_in_flight = self.config.max_circuits_in_flight; tokio::spawn(async move { let block_number = job.block_number; - Ok( - Self::process_job_impl(object_store, job, started_at, max_circuits_in_flight) - .instrument(tracing::info_span!("basic_circuit", %block_number)) - .await, + ::process_job( + job, + object_store, + Some(max_circuits_in_flight), + started_at, ) + .instrument(tracing::info_span!("basic_circuit", %block_number)) + .await + .map(Some) }) } @@ -92,8 +94,6 @@ impl JobProcessor for BasicWitnessGenerator { None => Ok(()), Some(artifacts) => { let blob_started_at = Instant::now(); - let circuit_urls = artifacts.circuit_urls.clone(); - let queue_urls = artifacts.queue_urls.clone(); let aux_output_witness_wrapper = AuxOutputWitnessWrapper(artifacts.aux_output_witness.clone()); @@ -105,26 +105,17 @@ impl JobProcessor for BasicWitnessGenerator { .unwrap(); } - let scheduler_witness_url = - match Self::save_artifacts(job_id.0, artifacts.clone(), &*self.object_store) - .await - { - BlobUrls::Url(url) => url, - _ => unreachable!(), - }; + let blob_urls = + Self::save_to_bucket(job_id.0, artifacts.clone(), &*self.object_store).await; WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::BasicCircuits.into()] .observe(blob_started_at.elapsed()); - Self::update_database( + Self::save_to_database( &self.prover_connection_pool, job_id.0, started_at, - BlobUrls::Scheduler(SchedulerBlobUrls { - circuit_ids_and_urls: circuit_urls, - closed_form_inputs_and_urls: queue_urls, - scheduler_witness_url, - }), + blob_urls, artifacts, ) .await?; diff --git a/prover/crates/bin/witness_generator/src/basic_circuits/mod.rs b/prover/crates/bin/witness_generator/src/basic_circuits/mod.rs index c9755c333dad..e76ef180c526 100644 --- a/prover/crates/bin/witness_generator/src/basic_circuits/mod.rs +++ b/prover/crates/bin/witness_generator/src/basic_circuits/mod.rs @@ -5,6 +5,7 @@ use std::{ time::Instant, }; +use async_trait::async_trait; use circuit_definitions::{ circuit_definitions::base_layer::{ZkSyncBaseLayerCircuit, ZkSyncBaseLayerStorage}, encodings::recursion_request::RecursionQueueSimulator, @@ -35,12 +36,14 @@ use zksync_object_store::ObjectStore; use zksync_prover_dal::{ConnectionPool, Prover}; use zksync_prover_fri_types::{keys::ClosedFormInputKey, CircuitAuxData}; use zksync_prover_interface::inputs::WitnessInputData; +use zksync_prover_keystore::keystore::Keystore; use zksync_system_constants::BOOTLOADER_ADDRESS; use zksync_types::{ basic_fri_types::AggregationRound, protocol_version::ProtocolSemanticVersion, L1BatchNumber, }; use crate::{ + artifacts::ArtifactsManager, metrics::WITNESS_GENERATOR_METRICS, precalculated_merkle_paths_provider::PrecalculatedMerklePathsProvider, storage_oracle::StorageOracle, @@ -49,6 +52,7 @@ use crate::{ ClosedFormInputWrapper, KZG_TRUSTED_SETUP_FILE, }, witness::WitnessStorage, + witness_generator::WitnessGenerator, }; mod artifacts; @@ -108,17 +112,24 @@ impl BasicWitnessGenerator { protocol_version, } } +} + +#[async_trait] +impl WitnessGenerator for BasicWitnessGenerator { + type Job = BasicWitnessGeneratorJob; + type Metadata = L1BatchNumber; + type Artifacts = BasicCircuitArtifacts; - async fn process_job_impl( + async fn process_job( + job: BasicWitnessGeneratorJob, object_store: Arc, - basic_job: BasicWitnessGeneratorJob, + max_circuits_in_flight: Option, started_at: Instant, - max_circuits_in_flight: usize, - ) -> Option { + ) -> anyhow::Result { let BasicWitnessGeneratorJob { block_number, data: job, - } = basic_job; + } = job; tracing::info!( "Starting witness generation of type {:?} for block {}", @@ -126,65 +137,43 @@ impl BasicWitnessGenerator { block_number.0 ); - Some( - process_basic_circuits_job( - object_store, - started_at, - block_number, - job, - max_circuits_in_flight, - ) - .await, + let (circuit_urls, queue_urls, scheduler_witness, aux_output_witness) = generate_witness( + block_number, + object_store, + job, + max_circuits_in_flight.unwrap(), ) + .await; + WITNESS_GENERATOR_METRICS.witness_generation_time[&AggregationRound::BasicCircuits.into()] + .observe(started_at.elapsed()); + tracing::info!( + "Witness generation for block {} is complete in {:?}", + block_number.0, + started_at.elapsed() + ); + + Ok(BasicCircuitArtifacts { + circuit_urls, + queue_urls, + scheduler_witness, + aux_output_witness, + }) } -} -#[tracing::instrument(skip_all, fields(l1_batch = %block_number))] -pub(super) async fn process_basic_circuits_job( - object_store: Arc, - started_at: Instant, - block_number: L1BatchNumber, - job: WitnessInputData, - max_circuits_in_flight: usize, -) -> BasicCircuitArtifacts { - let (circuit_urls, queue_urls, scheduler_witness, aux_output_witness) = - generate_witness(block_number, object_store, job, max_circuits_in_flight).await; - WITNESS_GENERATOR_METRICS.witness_generation_time[&AggregationRound::BasicCircuits.into()] - .observe(started_at.elapsed()); - tracing::info!( - "Witness generation for block {} is complete in {:?}", - block_number.0, - started_at.elapsed() - ); + async fn prepare_job( + metadata: L1BatchNumber, + object_store: &dyn ObjectStore, + _keystore: Keystore, + ) -> anyhow::Result { + tracing::info!("Processing FRI basic witness-gen for block {}", metadata.0); + let started_at = Instant::now(); + let job = Self::get_artifacts(&metadata, object_store).await?; - BasicCircuitArtifacts { - circuit_urls, - queue_urls, - scheduler_witness, - aux_output_witness, - } -} + WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::BasicCircuits.into()] + .observe(started_at.elapsed()); -#[tracing::instrument(skip_all, fields(l1_batch = %block_number, circuit_id = %circuit_id))] -async fn save_recursion_queue( - block_number: L1BatchNumber, - circuit_id: u8, - recursion_queue_simulator: RecursionQueueSimulator, - closed_form_inputs: Vec>, - object_store: Arc, -) -> (u8, String, usize) { - let key = ClosedFormInputKey { - block_number, - circuit_id, - }; - let basic_circuit_count = closed_form_inputs.len(); - let closed_form_inputs = closed_form_inputs - .iter() - .map(|x| ZkSyncBaseLayerStorage::from_inner(circuit_id, x.clone())) - .collect(); - let wrapper = ClosedFormInputWrapper(closed_form_inputs, recursion_queue_simulator); - let blob_url = object_store.put(key, &wrapper).await.unwrap(); - (circuit_id, blob_url, basic_circuit_count) + Ok(job) + } } #[tracing::instrument(skip_all, fields(l1_batch = %block_number))] @@ -464,3 +453,25 @@ async fn generate_witness( block_aux_witness, ) } + +#[tracing::instrument(skip_all, fields(l1_batch = %block_number, circuit_id = %circuit_id))] +async fn save_recursion_queue( + block_number: L1BatchNumber, + circuit_id: u8, + recursion_queue_simulator: RecursionQueueSimulator, + closed_form_inputs: Vec>, + object_store: Arc, +) -> (u8, String, usize) { + let key = ClosedFormInputKey { + block_number, + circuit_id, + }; + let basic_circuit_count = closed_form_inputs.len(); + let closed_form_inputs = closed_form_inputs + .iter() + .map(|x| ZkSyncBaseLayerStorage::from_inner(circuit_id, x.clone())) + .collect(); + let wrapper = ClosedFormInputWrapper(closed_form_inputs, recursion_queue_simulator); + let blob_url = object_store.put(key, &wrapper).await.unwrap(); + (circuit_id, blob_url, basic_circuit_count) +} diff --git a/prover/crates/bin/witness_generator/src/leaf_aggregation/artifacts.rs b/prover/crates/bin/witness_generator/src/leaf_aggregation/artifacts.rs index a94587d00ec6..c83997e36b80 100644 --- a/prover/crates/bin/witness_generator/src/leaf_aggregation/artifacts.rs +++ b/prover/crates/bin/witness_generator/src/leaf_aggregation/artifacts.rs @@ -3,15 +3,15 @@ use std::time::Instant; use async_trait::async_trait; use zksync_object_store::ObjectStore; use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; -use zksync_prover_fri_types::keys::ClosedFormInputKey; +use zksync_prover_fri_types::keys::{AggregationsKey, ClosedFormInputKey}; use zksync_prover_fri_utils::get_recursive_layer_circuit_id_for_base_layer; use zksync_types::{basic_fri_types::AggregationRound, prover_dal::LeafAggregationJobMetadata}; use crate::{ - artifacts::{AggregationBlobUrls, ArtifactsManager, BlobUrls}, + artifacts::{AggregationBlobUrls, ArtifactsManager}, leaf_aggregation::{LeafAggregationArtifacts, LeafAggregationWitnessGenerator}, metrics::WITNESS_GENERATOR_METRICS, - utils::{save_node_aggregations_artifacts, ClosedFormInputWrapper}, + utils::{AggregationWrapper, ClosedFormInputWrapper}, }; #[async_trait] @@ -19,6 +19,7 @@ impl ArtifactsManager for LeafAggregationWitnessGenerator { type InputMetadata = LeafAggregationJobMetadata; type InputArtifacts = ClosedFormInputWrapper; type OutputArtifacts = LeafAggregationArtifacts; + type BlobUrls = AggregationBlobUrls; async fn get_artifacts( metadata: &Self::InputMetadata, @@ -41,38 +42,40 @@ impl ArtifactsManager for LeafAggregationWitnessGenerator { skip_all, fields(l1_batch = %artifacts.block_number, circuit_id = %artifacts.circuit_id) )] - async fn save_artifacts( + async fn save_to_bucket( _job_id: u32, artifacts: Self::OutputArtifacts, object_store: &dyn ObjectStore, - ) -> BlobUrls { + ) -> AggregationBlobUrls { let started_at = Instant::now(); - let aggregations_urls = save_node_aggregations_artifacts( - artifacts.block_number, - get_recursive_layer_circuit_id_for_base_layer(artifacts.circuit_id), - 0, - artifacts.aggregations, - object_store, - ) - .await; + let key = AggregationsKey { + block_number: artifacts.block_number, + circuit_id: get_recursive_layer_circuit_id_for_base_layer(artifacts.circuit_id), + depth: 0, + }; + let aggregation_urls = object_store + .put(key, &AggregationWrapper(artifacts.aggregations)) + .await + .unwrap(); + WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::LeafAggregation.into()] .observe(started_at.elapsed()); - BlobUrls::Aggregation(AggregationBlobUrls { - aggregations_urls, + AggregationBlobUrls { + aggregation_urls, circuit_ids_and_urls: artifacts.circuit_ids_and_urls, - }) + } } #[tracing::instrument( skip_all, fields(l1_batch = %job_id) )] - async fn update_database( + async fn save_to_database( connection_pool: &ConnectionPool, job_id: u32, started_at: Instant, - blob_urls: BlobUrls, + blob_urls: AggregationBlobUrls, artifacts: Self::OutputArtifacts, ) -> anyhow::Result<()> { tracing::info!( @@ -82,11 +85,6 @@ impl ArtifactsManager for LeafAggregationWitnessGenerator { artifacts.circuit_id, ); - let blob_urls = match blob_urls { - BlobUrls::Aggregation(blob_urls) => blob_urls, - _ => panic!("Unexpected blob urls type"), - }; - let mut prover_connection = connection_pool.connection().await.unwrap(); let mut transaction = prover_connection.start_transaction().await.unwrap(); let number_of_dependent_jobs = blob_urls.circuit_ids_and_urls.len(); @@ -124,7 +122,7 @@ impl ArtifactsManager for LeafAggregationWitnessGenerator { get_recursive_layer_circuit_id_for_base_layer(artifacts.circuit_id), number_of_dependent_jobs, 0, - blob_urls.aggregations_urls, + blob_urls.aggregation_urls, ) .await; tracing::info!( diff --git a/prover/crates/bin/witness_generator/src/leaf_aggregation/job_processor.rs b/prover/crates/bin/witness_generator/src/leaf_aggregation/job_processor.rs index e032084151eb..440636b85fae 100644 --- a/prover/crates/bin/witness_generator/src/leaf_aggregation/job_processor.rs +++ b/prover/crates/bin/witness_generator/src/leaf_aggregation/job_processor.rs @@ -10,10 +10,11 @@ use zksync_types::basic_fri_types::AggregationRound; use crate::{ artifacts::ArtifactsManager, leaf_aggregation::{ - prepare_leaf_aggregation_job, LeafAggregationArtifacts, LeafAggregationWitnessGenerator, + LeafAggregationArtifacts, LeafAggregationWitnessGenerator, LeafAggregationWitnessGeneratorJob, }, metrics::WITNESS_GENERATOR_METRICS, + witness_generator::WitnessGenerator, }; #[async_trait] @@ -37,9 +38,13 @@ impl JobProcessor for LeafAggregationWitnessGenerator { tracing::info!("Processing leaf aggregation job {:?}", metadata.id); Ok(Some(( metadata.id, - prepare_leaf_aggregation_job(metadata, &*self.object_store, self.keystore.clone()) - .await - .context("prepare_leaf_aggregation_job()")?, + ::prepare_job( + metadata, + &*self.object_store, + self.keystore.clone(), + ) + .await + .context("prepare_leaf_aggregation_job()")?, ))) } @@ -63,7 +68,13 @@ impl JobProcessor for LeafAggregationWitnessGenerator { let object_store = self.object_store.clone(); let max_circuits_in_flight = self.config.max_circuits_in_flight; tokio::spawn(async move { - Ok(Self::process_job_impl(job, started_at, object_store, max_circuits_in_flight).await) + ::process_job( + job, + object_store, + Some(max_circuits_in_flight), + started_at, + ) + .await }) } @@ -83,7 +94,7 @@ impl JobProcessor for LeafAggregationWitnessGenerator { let blob_save_started_at = Instant::now(); - let blob_urls = Self::save_artifacts(job_id, artifacts.clone(), &*self.object_store).await; + let blob_urls = Self::save_to_bucket(job_id, artifacts.clone(), &*self.object_store).await; WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::LeafAggregation.into()] .observe(blob_save_started_at.elapsed()); @@ -93,7 +104,7 @@ impl JobProcessor for LeafAggregationWitnessGenerator { block_number.0, circuit_id, ); - Self::update_database( + Self::save_to_database( &self.prover_connection_pool, job_id, started_at, diff --git a/prover/crates/bin/witness_generator/src/leaf_aggregation/mod.rs b/prover/crates/bin/witness_generator/src/leaf_aggregation/mod.rs index d669a4cc97e3..960843259c32 100644 --- a/prover/crates/bin/witness_generator/src/leaf_aggregation/mod.rs +++ b/prover/crates/bin/witness_generator/src/leaf_aggregation/mod.rs @@ -1,6 +1,7 @@ use std::{sync::Arc, time::Instant}; use anyhow::Context as _; +use async_trait::async_trait; use circuit_definitions::circuit_definitions::recursion_layer::base_circuit_type_into_recursive_leaf_circuit_type; use tokio::sync::Semaphore; use zkevm_test_harness::{ @@ -36,6 +37,7 @@ use crate::{ load_proofs_for_job_ids, save_recursive_layer_prover_input_artifacts, ClosedFormInputWrapper, }, + witness_generator::WitnessGenerator, }; mod artifacts; @@ -85,69 +87,6 @@ impl LeafAggregationWitnessGenerator { keystore, } } - - #[tracing::instrument( - skip_all, - fields(l1_batch = %leaf_job.block_number, circuit_id = %leaf_job.circuit_id) - )] - pub async fn process_job_impl( - leaf_job: LeafAggregationWitnessGeneratorJob, - started_at: Instant, - object_store: Arc, - max_circuits_in_flight: usize, - ) -> LeafAggregationArtifacts { - tracing::info!( - "Starting witness generation of type {:?} for block {} with circuit {}", - AggregationRound::LeafAggregation, - leaf_job.block_number.0, - leaf_job.circuit_id, - ); - process_leaf_aggregation_job(started_at, leaf_job, object_store, max_circuits_in_flight) - .await - } -} - -#[tracing::instrument( - skip_all, - fields(l1_batch = %metadata.block_number, circuit_id = %metadata.circuit_id) -)] -pub async fn prepare_leaf_aggregation_job( - metadata: LeafAggregationJobMetadata, - object_store: &dyn ObjectStore, - keystore: Keystore, -) -> anyhow::Result { - let started_at = Instant::now(); - let closed_form_input = - LeafAggregationWitnessGenerator::get_artifacts(&metadata, object_store).await?; - - WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::LeafAggregation.into()] - .observe(started_at.elapsed()); - - let started_at = Instant::now(); - let base_vk = keystore - .load_base_layer_verification_key(metadata.circuit_id) - .context("get_base_layer_vk_for_circuit_type()")?; - - let leaf_circuit_id = base_circuit_type_into_recursive_leaf_circuit_type( - BaseLayerCircuitType::from_numeric_value(metadata.circuit_id), - ) as u8; - - let leaf_vk = keystore - .load_recursive_layer_verification_key(leaf_circuit_id) - .context("get_recursive_layer_vk_for_circuit_type()")?; - let leaf_params = compute_leaf_params(metadata.circuit_id, base_vk.clone(), leaf_vk); - - WITNESS_GENERATOR_METRICS.prepare_job_time[&AggregationRound::LeafAggregation.into()] - .observe(started_at.elapsed()); - - Ok(LeafAggregationWitnessGeneratorJob { - circuit_id: metadata.circuit_id, - block_number: metadata.block_number, - closed_form_inputs: closed_form_input, - proofs_ids: metadata.prover_job_ids_for_proofs, - base_vk, - leaf_params, - }) } #[tracing::instrument( @@ -261,3 +200,78 @@ pub async fn process_leaf_aggregation_job( closed_form_inputs: job.closed_form_inputs.0, } } + +#[async_trait] +impl WitnessGenerator for LeafAggregationWitnessGenerator { + type Job = LeafAggregationWitnessGeneratorJob; + type Metadata = LeafAggregationJobMetadata; + type Artifacts = LeafAggregationArtifacts; + + #[tracing::instrument( + skip_all, + fields(l1_batch = %job.block_number, circuit_id = %job.circuit_id) + )] + async fn process_job( + job: LeafAggregationWitnessGeneratorJob, + object_store: Arc, + max_circuits_in_flight: Option, + started_at: Instant, + ) -> anyhow::Result { + tracing::info!( + "Starting witness generation of type {:?} for block {} with circuit {}", + AggregationRound::LeafAggregation, + job.block_number.0, + job.circuit_id, + ); + Ok(process_leaf_aggregation_job( + started_at, + job, + object_store, + max_circuits_in_flight.unwrap(), + ) + .await) + } + + #[tracing::instrument( + skip_all, + fields(l1_batch = %metadata.block_number, circuit_id = %metadata.circuit_id) + )] + async fn prepare_job( + metadata: LeafAggregationJobMetadata, + object_store: &dyn ObjectStore, + keystore: Keystore, + ) -> anyhow::Result { + let started_at = Instant::now(); + let closed_form_input = + LeafAggregationWitnessGenerator::get_artifacts(&metadata, object_store).await?; + + WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::LeafAggregation.into()] + .observe(started_at.elapsed()); + + let started_at = Instant::now(); + let base_vk = keystore + .load_base_layer_verification_key(metadata.circuit_id) + .context("get_base_layer_vk_for_circuit_type()")?; + + let leaf_circuit_id = base_circuit_type_into_recursive_leaf_circuit_type( + BaseLayerCircuitType::from_numeric_value(metadata.circuit_id), + ) as u8; + + let leaf_vk = keystore + .load_recursive_layer_verification_key(leaf_circuit_id) + .context("get_recursive_layer_vk_for_circuit_type()")?; + let leaf_params = compute_leaf_params(metadata.circuit_id, base_vk.clone(), leaf_vk); + + WITNESS_GENERATOR_METRICS.prepare_job_time[&AggregationRound::LeafAggregation.into()] + .observe(started_at.elapsed()); + + Ok(LeafAggregationWitnessGeneratorJob { + circuit_id: metadata.circuit_id, + block_number: metadata.block_number, + closed_form_inputs: closed_form_input, + proofs_ids: metadata.prover_job_ids_for_proofs, + base_vk, + leaf_params, + }) + } +} diff --git a/prover/crates/bin/witness_generator/src/lib.rs b/prover/crates/bin/witness_generator/src/lib.rs index c0ac9718c6ee..b24b548a49ba 100644 --- a/prover/crates/bin/witness_generator/src/lib.rs +++ b/prover/crates/bin/witness_generator/src/lib.rs @@ -14,3 +14,4 @@ mod storage_oracle; mod tests; pub mod utils; mod witness; +pub mod witness_generator; diff --git a/prover/crates/bin/witness_generator/src/node_aggregation/artifacts.rs b/prover/crates/bin/witness_generator/src/node_aggregation/artifacts.rs index 245027f0d677..09f01899bf3c 100644 --- a/prover/crates/bin/witness_generator/src/node_aggregation/artifacts.rs +++ b/prover/crates/bin/witness_generator/src/node_aggregation/artifacts.rs @@ -7,10 +7,10 @@ use zksync_prover_fri_types::keys::AggregationsKey; use zksync_types::{basic_fri_types::AggregationRound, prover_dal::NodeAggregationJobMetadata}; use crate::{ - artifacts::{AggregationBlobUrls, ArtifactsManager, BlobUrls}, + artifacts::{AggregationBlobUrls, ArtifactsManager}, metrics::WITNESS_GENERATOR_METRICS, node_aggregation::{NodeAggregationArtifacts, NodeAggregationWitnessGenerator}, - utils::{save_node_aggregations_artifacts, AggregationWrapper}, + utils::AggregationWrapper, }; #[async_trait] @@ -18,6 +18,7 @@ impl ArtifactsManager for NodeAggregationWitnessGenerator { type InputMetadata = NodeAggregationJobMetadata; type InputArtifacts = AggregationWrapper; type OutputArtifacts = NodeAggregationArtifacts; + type BlobUrls = AggregationBlobUrls; #[tracing::instrument( skip_all, @@ -46,46 +47,43 @@ impl ArtifactsManager for NodeAggregationWitnessGenerator { skip_all, fields(l1_batch = %artifacts.block_number, circuit_id = %artifacts.circuit_id) )] - async fn save_artifacts( + async fn save_to_bucket( _job_id: u32, artifacts: Self::OutputArtifacts, object_store: &dyn ObjectStore, - ) -> BlobUrls { + ) -> AggregationBlobUrls { let started_at = Instant::now(); - let aggregations_urls = save_node_aggregations_artifacts( - artifacts.block_number, - artifacts.circuit_id, - artifacts.depth, - artifacts.next_aggregations, - object_store, - ) - .await; + let key = AggregationsKey { + block_number: artifacts.block_number, + circuit_id: artifacts.circuit_id, + depth: artifacts.depth, + }; + let aggregation_urls = object_store + .put(key, &AggregationWrapper(artifacts.next_aggregations)) + .await + .unwrap(); WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::NodeAggregation.into()] .observe(started_at.elapsed()); - BlobUrls::Aggregation(AggregationBlobUrls { - aggregations_urls, + AggregationBlobUrls { + aggregation_urls, circuit_ids_and_urls: artifacts.recursive_circuit_ids_and_urls, - }) + } } #[tracing::instrument( skip_all, fields(l1_batch = % job_id) )] - async fn update_database( + async fn save_to_database( connection_pool: &ConnectionPool, job_id: u32, started_at: Instant, - blob_urls: BlobUrls, + blob_urls: AggregationBlobUrls, artifacts: Self::OutputArtifacts, ) -> anyhow::Result<()> { let mut prover_connection = connection_pool.connection().await.unwrap(); - let blob_urls = match blob_urls { - BlobUrls::Aggregation(blobs) => blobs, - _ => unreachable!(), - }; let mut transaction = prover_connection.start_transaction().await.unwrap(); let dependent_jobs = blob_urls.circuit_ids_and_urls.len(); let protocol_version_id = transaction @@ -111,7 +109,7 @@ impl ArtifactsManager for NodeAggregationWitnessGenerator { artifacts.circuit_id, Some(dependent_jobs as i32), artifacts.depth, - &blob_urls.aggregations_urls, + &blob_urls.aggregation_urls, protocol_version_id, ) .await; diff --git a/prover/crates/bin/witness_generator/src/node_aggregation/job_processor.rs b/prover/crates/bin/witness_generator/src/node_aggregation/job_processor.rs index a015462cd6fe..0f66c988c10d 100644 --- a/prover/crates/bin/witness_generator/src/node_aggregation/job_processor.rs +++ b/prover/crates/bin/witness_generator/src/node_aggregation/job_processor.rs @@ -11,9 +11,10 @@ use crate::{ artifacts::ArtifactsManager, metrics::WITNESS_GENERATOR_METRICS, node_aggregation::{ - prepare_job, NodeAggregationArtifacts, NodeAggregationWitnessGenerator, + NodeAggregationArtifacts, NodeAggregationWitnessGenerator, NodeAggregationWitnessGeneratorJob, }, + witness_generator::WitnessGenerator, }; #[async_trait] @@ -37,9 +38,13 @@ impl JobProcessor for NodeAggregationWitnessGenerator { tracing::info!("Processing node aggregation job {:?}", metadata.id); Ok(Some(( metadata.id, - prepare_job(metadata, &*self.object_store, self.keystore.clone()) - .await - .context("prepare_job()")?, + ::prepare_job( + metadata, + &*self.object_store, + self.keystore.clone(), + ) + .await + .context("prepare_job()")?, ))) } @@ -63,7 +68,13 @@ impl JobProcessor for NodeAggregationWitnessGenerator { let object_store = self.object_store.clone(); let max_circuits_in_flight = self.config.max_circuits_in_flight; tokio::spawn(async move { - Ok(Self::process_job_impl(job, started_at, object_store, max_circuits_in_flight).await) + ::process_job( + job, + object_store, + Some(max_circuits_in_flight), + started_at, + ) + .await }) } @@ -79,12 +90,12 @@ impl JobProcessor for NodeAggregationWitnessGenerator { ) -> anyhow::Result<()> { let blob_save_started_at = Instant::now(); - let blob_urls = Self::save_artifacts(job_id, artifacts.clone(), &*self.object_store).await; + let blob_urls = Self::save_to_bucket(job_id, artifacts.clone(), &*self.object_store).await; WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::NodeAggregation.into()] .observe(blob_save_started_at.elapsed()); - Self::update_database( + Self::save_to_database( &self.prover_connection_pool, job_id, started_at, diff --git a/prover/crates/bin/witness_generator/src/node_aggregation/mod.rs b/prover/crates/bin/witness_generator/src/node_aggregation/mod.rs index 047caa363a89..f2c9a6fb8919 100644 --- a/prover/crates/bin/witness_generator/src/node_aggregation/mod.rs +++ b/prover/crates/bin/witness_generator/src/node_aggregation/mod.rs @@ -1,6 +1,7 @@ use std::{sync::Arc, time::Instant}; use anyhow::Context as _; +use async_trait::async_trait; use circuit_definitions::circuit_definitions::recursion_layer::RECURSION_ARITY; use tokio::sync::Semaphore; use zkevm_test_harness::witness::recursive_aggregation::{ @@ -30,6 +31,7 @@ use crate::{ artifacts::ArtifactsManager, metrics::WITNESS_GENERATOR_METRICS, utils::{load_proofs_for_job_ids, save_recursive_layer_prover_input_artifacts}, + witness_generator::WitnessGenerator, }; mod artifacts; @@ -81,17 +83,24 @@ impl NodeAggregationWitnessGenerator { keystore, } } +} + +#[async_trait] +impl WitnessGenerator for NodeAggregationWitnessGenerator { + type Job = NodeAggregationWitnessGeneratorJob; + type Metadata = NodeAggregationJobMetadata; + type Artifacts = NodeAggregationArtifacts; #[tracing::instrument( skip_all, fields(l1_batch = % job.block_number, circuit_id = % job.circuit_id) )] - pub async fn process_job_impl( + async fn process_job( job: NodeAggregationWitnessGeneratorJob, - started_at: Instant, object_store: Arc, - max_circuits_in_flight: usize, - ) -> NodeAggregationArtifacts { + max_circuits_in_flight: Option, + started_at: Instant, + ) -> anyhow::Result { let node_vk_commitment = compute_node_vk_commitment(job.node_vk.clone()); tracing::info!( "Starting witness generation of type {:?} for block {} circuit id {} depth {}", @@ -117,7 +126,7 @@ impl NodeAggregationWitnessGenerator { proofs_ids.len() ); - let semaphore = Arc::new(Semaphore::new(max_circuits_in_flight)); + let semaphore = Arc::new(Semaphore::new(max_circuits_in_flight.unwrap())); let mut handles = vec![]; for (circuit_idx, (chunk, proofs_ids_for_chunk)) in job @@ -205,52 +214,54 @@ impl NodeAggregationWitnessGenerator { started_at.elapsed(), ); - NodeAggregationArtifacts { + Ok(NodeAggregationArtifacts { circuit_id: job.circuit_id, block_number: job.block_number, depth: job.depth + 1, next_aggregations, recursive_circuit_ids_and_urls, - } + }) } -} -#[tracing::instrument( - skip_all, - fields(l1_batch = % metadata.block_number, circuit_id = % metadata.circuit_id) -)] -pub async fn prepare_job( - metadata: NodeAggregationJobMetadata, - object_store: &dyn ObjectStore, - keystore: Keystore, -) -> anyhow::Result { - let started_at = Instant::now(); - let artifacts = NodeAggregationWitnessGenerator::get_artifacts(&metadata, object_store).await?; + #[tracing::instrument( + skip_all, + fields(l1_batch = % metadata.block_number, circuit_id = % metadata.circuit_id) + )] + async fn prepare_job( + metadata: Self::Metadata, + object_store: &dyn ObjectStore, + keystore: Keystore, + ) -> anyhow::Result { + let started_at = Instant::now(); + let artifacts = + NodeAggregationWitnessGenerator::get_artifacts(&metadata, object_store).await?; - WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::NodeAggregation.into()] - .observe(started_at.elapsed()); + WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::NodeAggregation.into()] + .observe(started_at.elapsed()); - let started_at = Instant::now(); - let leaf_vk = keystore - .load_recursive_layer_verification_key(metadata.circuit_id) - .context("get_recursive_layer_vk_for_circuit_type")?; - let node_vk = keystore - .load_recursive_layer_verification_key( - ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8, - ) - .context("get_recursive_layer_vk_for_circuit_type()")?; + let started_at = Instant::now(); + let leaf_vk = keystore + .load_recursive_layer_verification_key(metadata.circuit_id) + .context("get_recursive_layer_vk_for_circuit_type")?; + let node_vk = keystore + .load_recursive_layer_verification_key( + ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8, + ) + .context("get_recursive_layer_vk_for_circuit_type()")?; - WITNESS_GENERATOR_METRICS.prepare_job_time[&AggregationRound::NodeAggregation.into()] - .observe(started_at.elapsed()); + WITNESS_GENERATOR_METRICS.prepare_job_time[&AggregationRound::NodeAggregation.into()] + .observe(started_at.elapsed()); - Ok(NodeAggregationWitnessGeneratorJob { - circuit_id: metadata.circuit_id, - block_number: metadata.block_number, - depth: metadata.depth, - aggregations: artifacts.0, - proofs_ids: metadata.prover_job_ids_for_proofs, - leaf_vk, - node_vk, - all_leafs_layer_params: get_leaf_vk_params(&keystore).context("get_leaf_vk_params()")?, - }) + Ok(NodeAggregationWitnessGeneratorJob { + circuit_id: metadata.circuit_id, + block_number: metadata.block_number, + depth: metadata.depth, + aggregations: artifacts.0, + proofs_ids: metadata.prover_job_ids_for_proofs, + leaf_vk, + node_vk, + all_leafs_layer_params: get_leaf_vk_params(&keystore) + .context("get_leaf_vk_params()")?, + }) + } } diff --git a/prover/crates/bin/witness_generator/src/recursion_tip/artifacts.rs b/prover/crates/bin/witness_generator/src/recursion_tip/artifacts.rs index 8379fcf9f933..b61aa948100b 100644 --- a/prover/crates/bin/witness_generator/src/recursion_tip/artifacts.rs +++ b/prover/crates/bin/witness_generator/src/recursion_tip/artifacts.rs @@ -12,7 +12,7 @@ use zksync_prover_fri_types::{keys::FriCircuitKey, CircuitWrapper, FriProofWrapp use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; use crate::{ - artifacts::{ArtifactsManager, BlobUrls}, + artifacts::ArtifactsManager, recursion_tip::{RecursionTipArtifacts, RecursionTipWitnessGenerator}, }; @@ -21,6 +21,7 @@ impl ArtifactsManager for RecursionTipWitnessGenerator { type InputMetadata = Vec<(u8, u32)>; type InputArtifacts = Vec; type OutputArtifacts = RecursionTipArtifacts; + type BlobUrls = String; /// Loads all proofs for a given recursion tip's job ids. /// Note that recursion tip may not have proofs for some specific circuits (because the batch didn't contain them). @@ -73,11 +74,11 @@ impl ArtifactsManager for RecursionTipWitnessGenerator { Ok(proofs) } - async fn save_artifacts( + async fn save_to_bucket( job_id: u32, artifacts: Self::OutputArtifacts, object_store: &dyn ObjectStore, - ) -> BlobUrls { + ) -> String { let key = FriCircuitKey { block_number: L1BatchNumber(job_id), circuit_id: 255, @@ -86,29 +87,22 @@ impl ArtifactsManager for RecursionTipWitnessGenerator { aggregation_round: AggregationRound::RecursionTip, }; - let blob_url = object_store + object_store .put( key, &CircuitWrapper::Recursive(artifacts.recursion_tip_circuit.clone()), ) .await - .unwrap(); - - BlobUrls::Url(blob_url) + .unwrap() } - async fn update_database( + async fn save_to_database( connection_pool: &ConnectionPool, job_id: u32, started_at: Instant, - blob_urls: BlobUrls, + blob_urls: String, _artifacts: Self::OutputArtifacts, ) -> anyhow::Result<()> { - let blob_url = match blob_urls { - BlobUrls::Url(url) => url, - _ => panic!("Unexpected blob urls type"), - }; - let mut prover_connection = connection_pool.connection().await?; let mut transaction = prover_connection.start_transaction().await?; let protocol_version_id = transaction @@ -123,7 +117,7 @@ impl ArtifactsManager for RecursionTipWitnessGenerator { 0, 0, AggregationRound::RecursionTip, - &blob_url, + &blob_urls, false, protocol_version_id, ) diff --git a/prover/crates/bin/witness_generator/src/recursion_tip/job_processor.rs b/prover/crates/bin/witness_generator/src/recursion_tip/job_processor.rs index f114724cfec4..9ab7d934a3e7 100644 --- a/prover/crates/bin/witness_generator/src/recursion_tip/job_processor.rs +++ b/prover/crates/bin/witness_generator/src/recursion_tip/job_processor.rs @@ -11,9 +11,10 @@ use crate::{ artifacts::ArtifactsManager, metrics::WITNESS_GENERATOR_METRICS, recursion_tip::{ - prepare_job, RecursionTipArtifacts, RecursionTipWitnessGenerator, + RecursionTipArtifacts, RecursionTipJobMetadata, RecursionTipWitnessGenerator, RecursionTipWitnessGeneratorJob, }, + witness_generator::WitnessGenerator, }; #[async_trait] @@ -49,9 +50,11 @@ impl JobProcessor for RecursionTipWitnessGenerator { Ok(Some(( l1_batch_number, - prepare_job( - l1_batch_number, - final_node_proof_job_ids, + ::prepare_job( + RecursionTipJobMetadata { + l1_batch_number, + final_node_proof_job_ids, + }, &*self.object_store, self.keystore.clone(), ) @@ -77,7 +80,10 @@ impl JobProcessor for RecursionTipWitnessGenerator { job: RecursionTipWitnessGeneratorJob, started_at: Instant, ) -> tokio::task::JoinHandle> { - tokio::task::spawn_blocking(move || Ok(Self::process_job_sync(job, started_at))) + let object_store = self.object_store.clone(); + tokio::spawn(async move { + ::process_job(job, object_store, None, started_at).await + }) } #[tracing::instrument( @@ -93,12 +99,12 @@ impl JobProcessor for RecursionTipWitnessGenerator { let blob_save_started_at = Instant::now(); let blob_urls = - Self::save_artifacts(job_id.0, artifacts.clone(), &*self.object_store).await; + Self::save_to_bucket(job_id.0, artifacts.clone(), &*self.object_store).await; WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::RecursionTip.into()] .observe(blob_save_started_at.elapsed()); - Self::update_database( + Self::save_to_database( &self.prover_connection_pool, job_id.0, started_at, diff --git a/prover/crates/bin/witness_generator/src/recursion_tip/mod.rs b/prover/crates/bin/witness_generator/src/recursion_tip/mod.rs index 4abb56a7d788..40abb756c8a5 100644 --- a/prover/crates/bin/witness_generator/src/recursion_tip/mod.rs +++ b/prover/crates/bin/witness_generator/src/recursion_tip/mod.rs @@ -1,6 +1,7 @@ use std::{sync::Arc, time::Instant}; use anyhow::Context; +use async_trait::async_trait; use circuit_definitions::{ circuit_definitions::recursion_layer::{ recursion_tip::RecursionTipCircuit, ZkSyncRecursionLayerStorageType, @@ -45,6 +46,7 @@ use zksync_types::{ use crate::{ artifacts::ArtifactsManager, metrics::WITNESS_GENERATOR_METRICS, utils::ClosedFormInputWrapper, + witness_generator::WitnessGenerator, }; mod artifacts; @@ -66,6 +68,11 @@ pub struct RecursionTipArtifacts { pub recursion_tip_circuit: ZkSyncRecursiveLayerCircuit, } +pub struct RecursionTipJobMetadata { + pub l1_batch_number: L1BatchNumber, + pub final_node_proof_job_ids: Vec<(u8, u32)>, +} + #[derive(Debug)] pub struct RecursionTipWitnessGenerator { config: FriWitnessGeneratorConfig, @@ -91,15 +98,24 @@ impl RecursionTipWitnessGenerator { keystore, } } +} + +#[async_trait] +impl WitnessGenerator for RecursionTipWitnessGenerator { + type Job = RecursionTipWitnessGeneratorJob; + type Metadata = RecursionTipJobMetadata; + type Artifacts = RecursionTipArtifacts; #[tracing::instrument( skip_all, fields(l1_batch = %job.block_number) )] - pub fn process_job_sync( - job: RecursionTipWitnessGeneratorJob, + async fn process_job( + job: Self::Job, + _object_store: Arc, + _max_circuits_in_flight: Option, started_at: Instant, - ) -> RecursionTipArtifacts { + ) -> anyhow::Result { tracing::info!( "Starting fri witness generation of type {:?} for block {}", AggregationRound::RecursionTip, @@ -127,100 +143,102 @@ impl RecursionTipWitnessGenerator { started_at.elapsed() ); - RecursionTipArtifacts { + Ok(RecursionTipArtifacts { recursion_tip_circuit: ZkSyncRecursiveLayerCircuit::RecursionTipCircuit( recursive_tip_circuit, ), - } + }) } -} -#[tracing::instrument( - skip_all, - fields(l1_batch = %l1_batch_number) -)] -pub async fn prepare_job( - l1_batch_number: L1BatchNumber, - final_node_proof_job_ids: Vec<(u8, u32)>, - object_store: &dyn ObjectStore, - keystore: Keystore, -) -> anyhow::Result { - let started_at = Instant::now(); - let recursion_tip_proofs = - RecursionTipWitnessGenerator::get_artifacts(&final_node_proof_job_ids, object_store) - .await?; - WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::RecursionTip.into()] - .observe(started_at.elapsed()); - - let node_vk = keystore - .load_recursive_layer_verification_key( - ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8, + #[tracing::instrument( + skip_all, + fields(l1_batch = %metadata.l1_batch_number) + )] + async fn prepare_job( + metadata: RecursionTipJobMetadata, + object_store: &dyn ObjectStore, + keystore: Keystore, + ) -> anyhow::Result { + let started_at = Instant::now(); + let recursion_tip_proofs = RecursionTipWitnessGenerator::get_artifacts( + &metadata.final_node_proof_job_ids, + object_store, ) - .context("get_recursive_layer_vk_for_circuit_type()")?; + .await?; + WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::RecursionTip.into()] + .observe(started_at.elapsed()); + + let node_vk = keystore + .load_recursive_layer_verification_key( + ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8, + ) + .context("get_recursive_layer_vk_for_circuit_type()")?; + + let node_layer_vk_commitment = compute_node_vk_commitment(node_vk.clone()); + + let mut recursion_queues = vec![]; + for circuit_id in BaseLayerCircuitType::as_iter_u8() { + let key = ClosedFormInputKey { + block_number: metadata.l1_batch_number, + circuit_id, + }; + let ClosedFormInputWrapper(_, recursion_queue) = object_store.get(key).await?; + recursion_queues.push((circuit_id, recursion_queue)); + } + + // RECURSION_TIP_ARITY is the maximum amount of proof that a single recursion tip can support. + // Given recursion_tip has at most 1 proof per circuit, it implies we can't add more circuit types without bumping arity up. + assert!( + RECURSION_TIP_ARITY >= recursion_queues.len(), + "recursion tip received more circuits ({}) than supported ({})", + recursion_queues.len(), + RECURSION_TIP_ARITY + ); + let mut branch_circuit_type_set = [GoldilocksField::ZERO; RECURSION_TIP_ARITY]; + let mut queue_set: [_; RECURSION_TIP_ARITY] = + std::array::from_fn(|_| QueueState::placeholder_witness()); + + for (index, (circuit_id, recursion_queue)) in recursion_queues.iter().enumerate() { + branch_circuit_type_set[index] = + GoldilocksField::from_u64_unchecked(*circuit_id as u64); + queue_set[index] = take_sponge_like_queue_state_from_simulator(recursion_queue); + } - let node_layer_vk_commitment = compute_node_vk_commitment(node_vk.clone()); + let leaf_vk_commits = get_leaf_vk_params(&keystore).context("get_leaf_vk_params()")?; + assert_eq!( + leaf_vk_commits.len(), + 16, + "expected 16 leaf vk commits, which corresponds to the numebr of circuits, got {}", + leaf_vk_commits.len() + ); + let leaf_layer_parameters: [RecursionLeafParametersWitness; 16] = + leaf_vk_commits + .iter() + .map(|el| el.1.clone()) + .collect::>() + .try_into() + .unwrap(); + + let input = RecursionTipInputWitness { + leaf_layer_parameters, + node_layer_vk_commitment, + branch_circuit_type_set, + queue_set, + }; - let mut recursion_queues = vec![]; - for circuit_id in BaseLayerCircuitType::as_iter_u8() { - let key = ClosedFormInputKey { - block_number: l1_batch_number, - circuit_id, + let recursion_tip_witness = RecursionTipInstanceWitness { + input, + vk_witness: node_vk.clone().into_inner(), + proof_witnesses: recursion_tip_proofs.into(), }; - let ClosedFormInputWrapper(_, recursion_queue) = object_store.get(key).await?; - recursion_queues.push((circuit_id, recursion_queue)); - } - // RECURSION_TIP_ARITY is the maximum amount of proof that a single recursion tip can support. - // Given recursion_tip has at most 1 proof per circuit, it implies we can't add more circuit types without bumping arity up. - assert!( - RECURSION_TIP_ARITY >= recursion_queues.len(), - "recursion tip received more circuits ({}) than supported ({})", - recursion_queues.len(), - RECURSION_TIP_ARITY - ); - let mut branch_circuit_type_set = [GoldilocksField::ZERO; RECURSION_TIP_ARITY]; - let mut queue_set: [_; RECURSION_TIP_ARITY] = - std::array::from_fn(|_| QueueState::placeholder_witness()); - - for (index, (circuit_id, recursion_queue)) in recursion_queues.iter().enumerate() { - branch_circuit_type_set[index] = GoldilocksField::from_u64_unchecked(*circuit_id as u64); - queue_set[index] = take_sponge_like_queue_state_from_simulator(recursion_queue); - } + WITNESS_GENERATOR_METRICS.prepare_job_time[&AggregationRound::RecursionTip.into()] + .observe(started_at.elapsed()); - let leaf_vk_commits = get_leaf_vk_params(&keystore).context("get_leaf_vk_params()")?; - assert_eq!( - leaf_vk_commits.len(), - 16, - "expected 16 leaf vk commits, which corresponds to the numebr of circuits, got {}", - leaf_vk_commits.len() - ); - let leaf_layer_parameters: [RecursionLeafParametersWitness; 16] = - leaf_vk_commits - .iter() - .map(|el| el.1.clone()) - .collect::>() - .try_into() - .unwrap(); - - let input = RecursionTipInputWitness { - leaf_layer_parameters, - node_layer_vk_commitment, - branch_circuit_type_set, - queue_set, - }; - - let recursion_tip_witness = RecursionTipInstanceWitness { - input, - vk_witness: node_vk.clone().into_inner(), - proof_witnesses: recursion_tip_proofs.into(), - }; - - WITNESS_GENERATOR_METRICS.prepare_job_time[&AggregationRound::RecursionTip.into()] - .observe(started_at.elapsed()); - - Ok(RecursionTipWitnessGeneratorJob { - block_number: l1_batch_number, - recursion_tip_witness, - node_vk, - }) + Ok(RecursionTipWitnessGeneratorJob { + block_number: metadata.l1_batch_number, + recursion_tip_witness, + node_vk, + }) + } } diff --git a/prover/crates/bin/witness_generator/src/scheduler/artifacts.rs b/prover/crates/bin/witness_generator/src/scheduler/artifacts.rs index b20a97641887..77d1da685d0b 100644 --- a/prover/crates/bin/witness_generator/src/scheduler/artifacts.rs +++ b/prover/crates/bin/witness_generator/src/scheduler/artifacts.rs @@ -8,7 +8,7 @@ use zksync_prover_fri_types::{keys::FriCircuitKey, CircuitWrapper, FriProofWrapp use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; use crate::{ - artifacts::{ArtifactsManager, BlobUrls}, + artifacts::ArtifactsManager, scheduler::{SchedulerArtifacts, SchedulerWitnessGenerator}, }; @@ -17,6 +17,7 @@ impl ArtifactsManager for SchedulerWitnessGenerator { type InputMetadata = u32; type InputArtifacts = FriProofWrapper; type OutputArtifacts = SchedulerArtifacts; + type BlobUrls = String; async fn get_artifacts( metadata: &Self::InputMetadata, @@ -27,11 +28,11 @@ impl ArtifactsManager for SchedulerWitnessGenerator { Ok(artifacts) } - async fn save_artifacts( + async fn save_to_bucket( job_id: u32, artifacts: Self::OutputArtifacts, object_store: &dyn ObjectStore, - ) -> BlobUrls { + ) -> String { let key = FriCircuitKey { block_number: L1BatchNumber(job_id), circuit_id: 1, @@ -40,29 +41,22 @@ impl ArtifactsManager for SchedulerWitnessGenerator { aggregation_round: AggregationRound::Scheduler, }; - let blob_url = object_store + object_store .put( key, &CircuitWrapper::Recursive(artifacts.scheduler_circuit.clone()), ) .await - .unwrap(); - - BlobUrls::Url(blob_url) + .unwrap() } - async fn update_database( + async fn save_to_database( connection_pool: &ConnectionPool, job_id: u32, started_at: Instant, - blob_urls: BlobUrls, + blob_urls: String, _artifacts: Self::OutputArtifacts, ) -> anyhow::Result<()> { - let blob_url = match blob_urls { - BlobUrls::Url(url) => url, - _ => panic!("Unexpected blob urls type"), - }; - let mut prover_connection = connection_pool.connection().await?; let mut transaction = prover_connection.start_transaction().await?; let protocol_version_id = transaction @@ -77,7 +71,7 @@ impl ArtifactsManager for SchedulerWitnessGenerator { 0, 0, AggregationRound::Scheduler, - &blob_url, + &blob_urls, false, protocol_version_id, ) diff --git a/prover/crates/bin/witness_generator/src/scheduler/job_processor.rs b/prover/crates/bin/witness_generator/src/scheduler/job_processor.rs index fe4f2db4090a..b5745f980917 100644 --- a/prover/crates/bin/witness_generator/src/scheduler/job_processor.rs +++ b/prover/crates/bin/witness_generator/src/scheduler/job_processor.rs @@ -11,8 +11,10 @@ use crate::{ artifacts::ArtifactsManager, metrics::WITNESS_GENERATOR_METRICS, scheduler::{ - prepare_job, SchedulerArtifacts, SchedulerWitnessGenerator, SchedulerWitnessGeneratorJob, + SchedulerArtifacts, SchedulerWitnessGenerator, SchedulerWitnessGeneratorJob, + SchedulerWitnessJobMetadata, }, + witness_generator::WitnessGenerator, }; #[async_trait] @@ -44,9 +46,11 @@ impl JobProcessor for SchedulerWitnessGenerator { Ok(Some(( l1_batch_number, - prepare_job( - l1_batch_number, - recursion_tip_job_id, + ::prepare_job( + SchedulerWitnessJobMetadata { + l1_batch_number, + recursion_tip_job_id, + }, &*self.object_store, self.keystore.clone(), ) @@ -72,10 +76,9 @@ impl JobProcessor for SchedulerWitnessGenerator { job: SchedulerWitnessGeneratorJob, started_at: Instant, ) -> tokio::task::JoinHandle> { - tokio::task::spawn_blocking(move || { - let block_number = job.block_number; - let _span = tracing::info_span!("scheduler", %block_number).entered(); - Ok(Self::process_job_sync(job, started_at)) + let object_store = self.object_store.clone(); + tokio::spawn(async move { + ::process_job(job, object_store, None, started_at).await }) } @@ -92,12 +95,12 @@ impl JobProcessor for SchedulerWitnessGenerator { let blob_save_started_at = Instant::now(); let blob_urls = - Self::save_artifacts(job_id.0, artifacts.clone(), &*self.object_store).await; + Self::save_to_bucket(job_id.0, artifacts.clone(), &*self.object_store).await; WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::Scheduler.into()] .observe(blob_save_started_at.elapsed()); - Self::update_database( + Self::save_to_database( &self.prover_connection_pool, job_id.0, started_at, diff --git a/prover/crates/bin/witness_generator/src/scheduler/mod.rs b/prover/crates/bin/witness_generator/src/scheduler/mod.rs index 10230b35c4f6..7af3d68d5a75 100644 --- a/prover/crates/bin/witness_generator/src/scheduler/mod.rs +++ b/prover/crates/bin/witness_generator/src/scheduler/mod.rs @@ -1,6 +1,7 @@ use std::{convert::TryInto, sync::Arc, time::Instant}; use anyhow::Context as _; +use async_trait::async_trait; use zkevm_test_harness::zkevm_circuits::recursion::{ leaf_layer::input::RecursionLeafParametersWitness, NUM_BASE_LAYER_CIRCUITS, }; @@ -29,7 +30,7 @@ use zksync_types::{ use crate::{ artifacts::ArtifactsManager, metrics::WITNESS_GENERATOR_METRICS, - utils::SchedulerPartialInputWrapper, + utils::SchedulerPartialInputWrapper, witness_generator::WitnessGenerator, }; mod artifacts; @@ -54,6 +55,11 @@ pub struct SchedulerWitnessGeneratorJob { [RecursionLeafParametersWitness; NUM_BASE_LAYER_CIRCUITS], } +pub struct SchedulerWitnessJobMetadata { + pub l1_batch_number: L1BatchNumber, + pub recursion_tip_job_id: u32, +} + #[derive(Debug)] pub struct SchedulerWitnessGenerator { config: FriWitnessGeneratorConfig, @@ -79,15 +85,24 @@ impl SchedulerWitnessGenerator { keystore, } } +} + +#[async_trait] +impl WitnessGenerator for SchedulerWitnessGenerator { + type Job = SchedulerWitnessGeneratorJob; + type Metadata = SchedulerWitnessJobMetadata; + type Artifacts = SchedulerArtifacts; #[tracing::instrument( skip_all, fields(l1_batch = %job.block_number) )] - pub fn process_job_sync( + async fn process_job( job: SchedulerWitnessGeneratorJob, + _object_store: Arc, + _max_circuits_in_flight: Option, started_at: Instant, - ) -> SchedulerArtifacts { + ) -> anyhow::Result { tracing::info!( "Starting fri witness generation of type {:?} for block {}", AggregationRound::Scheduler, @@ -118,66 +133,67 @@ impl SchedulerWitnessGenerator { started_at.elapsed() ); - SchedulerArtifacts { + Ok(SchedulerArtifacts { scheduler_circuit: ZkSyncRecursiveLayerCircuit::SchedulerCircuit(scheduler_circuit), - } + }) } -} -#[tracing::instrument( - skip_all, - fields(l1_batch = %l1_batch_number) -)] -pub async fn prepare_job( - l1_batch_number: L1BatchNumber, - recursion_tip_job_id: u32, - object_store: &dyn ObjectStore, - keystore: Keystore, -) -> anyhow::Result { - let started_at = Instant::now(); - let wrapper = - SchedulerWitnessGenerator::get_artifacts(&recursion_tip_job_id, object_store).await?; - let recursion_tip_proof = match wrapper { - FriProofWrapper::Base(_) => Err(anyhow::anyhow!( - "Expected only recursive proofs for scheduler l1 batch {l1_batch_number}, got Base" - )), - FriProofWrapper::Recursive(recursive_proof) => Ok(recursive_proof.into_inner()), - }?; - WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::Scheduler.into()] - .observe(started_at.elapsed()); - - let started_at = Instant::now(); - let node_vk = keystore - .load_recursive_layer_verification_key( - ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8, - ) - .context("get_recursive_layer_vk_for_circuit_type()")?; - let SchedulerPartialInputWrapper(mut scheduler_witness) = - object_store.get(l1_batch_number).await?; - - let recursion_tip_vk = keystore - .load_recursive_layer_verification_key( - ZkSyncRecursionLayerStorageType::RecursionTipCircuit as u8, - ) - .context("get_recursion_tip_vk()")?; - scheduler_witness.proof_witnesses = vec![recursion_tip_proof].into(); - - let leaf_vk_commits = get_leaf_vk_params(&keystore).context("get_leaf_vk_params()")?; - let leaf_layer_parameters = leaf_vk_commits - .iter() - .map(|el| el.1.clone()) - .collect::>() - .try_into() - .unwrap(); - - WITNESS_GENERATOR_METRICS.prepare_job_time[&AggregationRound::Scheduler.into()] - .observe(started_at.elapsed()); - - Ok(SchedulerWitnessGeneratorJob { - block_number: l1_batch_number, - scheduler_witness, - node_vk, - leaf_layer_parameters, - recursion_tip_vk, - }) + #[tracing::instrument( + skip_all, + fields(l1_batch = %metadata.l1_batch_number) + )] + async fn prepare_job( + metadata: SchedulerWitnessJobMetadata, + object_store: &dyn ObjectStore, + keystore: Keystore, + ) -> anyhow::Result { + let started_at = Instant::now(); + let wrapper = + SchedulerWitnessGenerator::get_artifacts(&metadata.recursion_tip_job_id, object_store) + .await?; + let recursion_tip_proof = match wrapper { + FriProofWrapper::Base(_) => Err(anyhow::anyhow!( + "Expected only recursive proofs for scheduler l1 batch {}, got Base", + metadata.l1_batch_number + )), + FriProofWrapper::Recursive(recursive_proof) => Ok(recursive_proof.into_inner()), + }?; + WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::Scheduler.into()] + .observe(started_at.elapsed()); + + let started_at = Instant::now(); + let node_vk = keystore + .load_recursive_layer_verification_key( + ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8, + ) + .context("get_recursive_layer_vk_for_circuit_type()")?; + let SchedulerPartialInputWrapper(mut scheduler_witness) = + object_store.get(metadata.l1_batch_number).await?; + + let recursion_tip_vk = keystore + .load_recursive_layer_verification_key( + ZkSyncRecursionLayerStorageType::RecursionTipCircuit as u8, + ) + .context("get_recursion_tip_vk()")?; + scheduler_witness.proof_witnesses = vec![recursion_tip_proof].into(); + + let leaf_vk_commits = get_leaf_vk_params(&keystore).context("get_leaf_vk_params()")?; + let leaf_layer_parameters = leaf_vk_commits + .iter() + .map(|el| el.1.clone()) + .collect::>() + .try_into() + .unwrap(); + + WITNESS_GENERATOR_METRICS.prepare_job_time[&AggregationRound::Scheduler.into()] + .observe(started_at.elapsed()); + + Ok(SchedulerWitnessGeneratorJob { + block_number: metadata.l1_batch_number, + scheduler_witness, + node_vk, + leaf_layer_parameters, + recursion_tip_vk, + }) + } } diff --git a/prover/crates/bin/witness_generator/src/utils.rs b/prover/crates/bin/witness_generator/src/utils.rs index 3ea2b539773f..8524bdae9ff0 100644 --- a/prover/crates/bin/witness_generator/src/utils.rs +++ b/prover/crates/bin/witness_generator/src/utils.rs @@ -204,28 +204,6 @@ pub async fn save_recursive_layer_prover_input_artifacts( ids_and_urls } -#[tracing::instrument( - skip_all, - fields(l1_batch = %block_number, circuit_id = %circuit_id) -)] -pub async fn save_node_aggregations_artifacts( - block_number: L1BatchNumber, - circuit_id: u8, - depth: u16, - aggregations: Vec<(u64, RecursionQueueSimulator)>, - object_store: &dyn ObjectStore, -) -> String { - let key = AggregationsKey { - block_number, - circuit_id, - depth, - }; - object_store - .put(key, &AggregationWrapper(aggregations)) - .await - .unwrap() -} - #[tracing::instrument(skip_all)] pub async fn load_proofs_for_job_ids( job_ids: &[u32], diff --git a/prover/crates/bin/witness_generator/src/witness_generator.rs b/prover/crates/bin/witness_generator/src/witness_generator.rs new file mode 100644 index 000000000000..eb9200d7950d --- /dev/null +++ b/prover/crates/bin/witness_generator/src/witness_generator.rs @@ -0,0 +1,25 @@ +use std::{sync::Arc, time::Instant}; + +use async_trait::async_trait; +use zksync_object_store::ObjectStore; +use zksync_prover_keystore::keystore::Keystore; + +#[async_trait] +pub trait WitnessGenerator { + type Job: Send + 'static; + type Metadata; + type Artifacts; + + async fn process_job( + job: Self::Job, + object_store: Arc, + max_circuits_in_flight: Option, + started_at: Instant, + ) -> anyhow::Result; + + async fn prepare_job( + metadata: Self::Metadata, + object_store: &dyn ObjectStore, + keystore: Keystore, + ) -> anyhow::Result; +} diff --git a/prover/crates/bin/witness_generator/tests/basic_test.rs b/prover/crates/bin/witness_generator/tests/basic_test.rs index 3323e3c681e4..379ddc3a4eb4 100644 --- a/prover/crates/bin/witness_generator/tests/basic_test.rs +++ b/prover/crates/bin/witness_generator/tests/basic_test.rs @@ -15,9 +15,9 @@ use zksync_types::{ L1BatchNumber, }; use zksync_witness_generator::{ - leaf_aggregation::{prepare_leaf_aggregation_job, LeafAggregationWitnessGenerator}, - node_aggregation::{self, NodeAggregationWitnessGenerator}, - utils::AggregationWrapper, + leaf_aggregation::LeafAggregationWitnessGenerator, + node_aggregation::NodeAggregationWitnessGenerator, utils::AggregationWrapper, + witness_generator::WitnessGenerator, }; fn compare_serialized(expected: &T, actual: &T) { @@ -52,17 +52,22 @@ async fn test_leaf_witness_gen() { .unwrap(); let keystore = Keystore::locate(); - let job = prepare_leaf_aggregation_job(leaf_aggregation_job_metadata, &*object_store, keystore) - .await - .unwrap(); + let job = LeafAggregationWitnessGenerator::prepare_job( + leaf_aggregation_job_metadata, + &*object_store, + keystore, + ) + .await + .unwrap(); - let artifacts = LeafAggregationWitnessGenerator::process_job_impl( + let artifacts = LeafAggregationWitnessGenerator::process_job( job, - Instant::now(), object_store.clone(), - 500, + Some(500), + Instant::now(), ) - .await; + .await + .unwrap(); let aggregations = AggregationWrapper(artifacts.aggregations); @@ -142,18 +147,23 @@ async fn test_node_witness_gen() { }; let keystore = Keystore::locate(); - let job = - node_aggregation::prepare_job(node_aggregation_job_metadata, &*object_store, keystore) - .await - .unwrap(); + let job = NodeAggregationWitnessGenerator::prepare_job( + node_aggregation_job_metadata, + &*object_store, + keystore, + ) + .await + .unwrap(); - let artifacts = NodeAggregationWitnessGenerator::process_job_impl( + let artifacts = NodeAggregationWitnessGenerator::process_job( job, - Instant::now(), object_store.clone(), - 500, + Some(500), + Instant::now(), ) - .await; + .await + .unwrap(); + let aggregations = AggregationWrapper(artifacts.next_aggregations); let expected_results_object_store_config = ObjectStoreConfig { From abe0440811ae4daf4a0f307922a282e9664308e0 Mon Sep 17 00:00:00 2001 From: Patrick Date: Thu, 19 Sep 2024 11:22:48 +0200 Subject: [PATCH 099/116] feat(tee): use hex serialization for RPC responses (#2887) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Following Anton's suggestion, we have switched to hex serialization for API/RPC requests and responses. Previously, we used default JSON serialization for Vec, which resulted in a lengthy comma-separated list of integers. This change standardizes serialization, making it more efficient and reducing the size of the responses. The previous format, with a series of comma-separated integers for pubkey-like fields, looked odd. Then: ``` curl -X POST\ -H "Content-Type: application/json" \ --data '{"jsonrpc": "2.0", "id": 1, "method": "unstable_getTeeProofs", "params": [491882, "sgx"] }' \ https://mainnet.era.zksync.io {"jsonrpc":"2.0","result":[{"attestation":[3,0,2,0,0,0,0,0,10, ``` Now: ``` $ curl -X POST \ -H "Content-Type: application/json" \ --data '{"jsonrpc": "2.0", "id": 1, "method": "unstable_getTeeProofs", "params": [1, "sgx"] }' \ http://localhost:3050 {"jsonrpc":"2.0","result":[{"l1BatchNumber":1,"teeType":"sgx","pubkey":"0506070809","signature":"0001020304","proof":"0a0b0c0d0e","provedAt":"2024-09-16T11:53:38.253033Z","attestation":"0403020100"}],"id":1} ``` This change needs to be deployed in lockstep with: https://github.com/matter-labs/teepot/pull/209. ## Why ❔ To improve user experience (UX) and optimize the RPC response size. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. Co-authored-by: Artem Makhortov <13339874+artmakh@users.noreply.github.com> --- Cargo.lock | 1 + core/lib/prover_interface/Cargo.toml | 2 +- core/lib/prover_interface/src/api.rs | 4 ++ core/lib/prover_interface/src/outputs.rs | 5 ++ .../tests/job_serialization.rs | 6 +- core/lib/types/Cargo.toml | 1 + core/lib/types/src/api/mod.rs | 6 ++ core/node/api_server/src/web3/testonly.rs | 2 +- core/node/api_server/src/web3/tests/mod.rs | 1 + .../api_server/src/web3/tests/unstable.rs | 69 +++++++++++++++++++ core/node/proof_data_handler/src/tests.rs | 6 +- prover/Cargo.lock | 1 + 12 files changed, 96 insertions(+), 8 deletions(-) create mode 100644 core/node/api_server/src/web3/tests/unstable.rs diff --git a/Cargo.lock b/Cargo.lock index 2aaf875a2f49..90849bc72b37 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10898,6 +10898,7 @@ dependencies = [ "secp256k1", "serde", "serde_json", + "serde_with", "strum", "thiserror", "tokio", diff --git a/core/lib/prover_interface/Cargo.toml b/core/lib/prover_interface/Cargo.toml index 8c73c2c6ac38..889b80b4fbee 100644 --- a/core/lib/prover_interface/Cargo.toml +++ b/core/lib/prover_interface/Cargo.toml @@ -20,7 +20,7 @@ circuit_sequencer_api_1_5_0.workspace = true serde.workspace = true strum = { workspace = true, features = ["derive"] } -serde_with = { workspace = true, features = ["base64"] } +serde_with = { workspace = true, features = ["base64", "hex"] } chrono = { workspace = true, features = ["serde"] } [dev-dependencies] diff --git a/core/lib/prover_interface/src/api.rs b/core/lib/prover_interface/src/api.rs index bc95345bbbaa..776cd3141cbe 100644 --- a/core/lib/prover_interface/src/api.rs +++ b/core/lib/prover_interface/src/api.rs @@ -2,6 +2,7 @@ //! This module defines the types used in the API. use serde::{Deserialize, Serialize}; +use serde_with::{hex::Hex, serde_as}; use zksync_types::{ protocol_version::{L1VerifierConfig, ProtocolSemanticVersion}, tee_types::TeeType, @@ -71,8 +72,11 @@ pub struct VerifyProofRequest(pub Box); #[derive(Debug, PartialEq, Serialize, Deserialize)] pub struct SubmitTeeProofRequest(pub Box); +#[serde_as] #[derive(Debug, PartialEq, Serialize, Deserialize)] pub struct RegisterTeeAttestationRequest { + #[serde_as(as = "Hex")] pub attestation: Vec, + #[serde_as(as = "Hex")] pub pubkey: Vec, } diff --git a/core/lib/prover_interface/src/outputs.rs b/core/lib/prover_interface/src/outputs.rs index 9672bfb2142b..60a9eaba760b 100644 --- a/core/lib/prover_interface/src/outputs.rs +++ b/core/lib/prover_interface/src/outputs.rs @@ -2,6 +2,7 @@ use core::fmt; use circuit_sequencer_api_1_5_0::proof::FinalProof; use serde::{Deserialize, Serialize}; +use serde_with::{hex::Hex, serde_as}; use zksync_object_store::{serialize_using_bincode, Bucket, StoredObject}; use zksync_types::{protocol_version::ProtocolSemanticVersion, tee_types::TeeType, L1BatchNumber}; @@ -14,14 +15,18 @@ pub struct L1BatchProofForL1 { } /// A "final" TEE proof that can be sent to the L1 contract. +#[serde_as] #[derive(Clone, PartialEq, Serialize, Deserialize)] pub struct L1BatchTeeProofForL1 { // signature generated within the TEE enclave, using the privkey corresponding to the pubkey + #[serde_as(as = "Hex")] pub signature: Vec, // pubkey used for signature verification; each key pair is attested by the TEE attestation // stored in the db + #[serde_as(as = "Hex")] pub pubkey: Vec, // data that was signed + #[serde_as(as = "Hex")] pub proof: Vec, // type of TEE used for attestation pub tee_type: TeeType, diff --git a/core/lib/prover_interface/tests/job_serialization.rs b/core/lib/prover_interface/tests/job_serialization.rs index a2aee0c2733e..ead59749abe3 100644 --- a/core/lib/prover_interface/tests/job_serialization.rs +++ b/core/lib/prover_interface/tests/job_serialization.rs @@ -167,9 +167,9 @@ fn test_proof_request_serialization() { #[test] fn test_tee_proof_request_serialization() { let tee_proof_str = r#"{ - "signature": [ 0, 1, 2, 3, 4 ], - "pubkey": [ 5, 6, 7, 8, 9 ], - "proof": [ 10, 11, 12, 13, 14 ], + "signature": "0001020304", + "pubkey": "0506070809", + "proof": "0A0B0C0D0E", "tee_type": "sgx" }"#; let tee_proof_result = serde_json::from_str::(tee_proof_str).unwrap(); diff --git a/core/lib/types/Cargo.toml b/core/lib/types/Cargo.toml index 55cbef761ad5..54c38384a7ad 100644 --- a/core/lib/types/Cargo.toml +++ b/core/lib/types/Cargo.toml @@ -28,6 +28,7 @@ once_cell.workspace = true rlp.workspace = true serde.workspace = true serde_json.workspace = true +serde_with = { workspace = true, features = ["hex"] } bigdecimal.workspace = true strum = { workspace = true, features = ["derive"] } thiserror.workspace = true diff --git a/core/lib/types/src/api/mod.rs b/core/lib/types/src/api/mod.rs index 916fae6a35bc..f648204ca557 100644 --- a/core/lib/types/src/api/mod.rs +++ b/core/lib/types/src/api/mod.rs @@ -1,6 +1,7 @@ use chrono::{DateTime, Utc}; use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; use serde_json::Value; +use serde_with::{hex::Hex, serde_as}; use strum::Display; use zksync_basic_types::{ tee_types::TeeType, @@ -784,15 +785,20 @@ pub struct Proof { pub storage_proof: Vec, } +#[serde_as] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct TeeProof { pub l1_batch_number: L1BatchNumber, pub tee_type: Option, + #[serde_as(as = "Option")] pub pubkey: Option>, + #[serde_as(as = "Option")] pub signature: Option>, + #[serde_as(as = "Option")] pub proof: Option>, pub proved_at: DateTime, + #[serde_as(as = "Option")] pub attestation: Option>, } diff --git a/core/node/api_server/src/web3/testonly.rs b/core/node/api_server/src/web3/testonly.rs index a77498d4341d..18ee3a641d0a 100644 --- a/core/node/api_server/src/web3/testonly.rs +++ b/core/node/api_server/src/web3/testonly.rs @@ -182,7 +182,7 @@ async fn spawn_server( let (pub_sub_events_sender, pub_sub_events_receiver) = mpsc::unbounded_channel(); let mut namespaces = Namespace::DEFAULT.to_vec(); - namespaces.extend([Namespace::Debug, Namespace::Snapshots]); + namespaces.extend([Namespace::Debug, Namespace::Snapshots, Namespace::Unstable]); let server_builder = match transport { ApiTransportLabel::Http => ApiBuilder::jsonrpsee_backend(api_config, pool).http(0), diff --git a/core/node/api_server/src/web3/tests/mod.rs b/core/node/api_server/src/web3/tests/mod.rs index 635620e9c525..fe90f1483a5a 100644 --- a/core/node/api_server/src/web3/tests/mod.rs +++ b/core/node/api_server/src/web3/tests/mod.rs @@ -63,6 +63,7 @@ use crate::web3::testonly::{spawn_http_server, spawn_ws_server}; mod debug; mod filters; mod snapshots; +mod unstable; mod vm; mod ws; diff --git a/core/node/api_server/src/web3/tests/unstable.rs b/core/node/api_server/src/web3/tests/unstable.rs new file mode 100644 index 000000000000..1d425f8b9515 --- /dev/null +++ b/core/node/api_server/src/web3/tests/unstable.rs @@ -0,0 +1,69 @@ +//! Tests for the `unstable` Web3 namespace. + +use zksync_types::tee_types::TeeType; +use zksync_web3_decl::namespaces::UnstableNamespaceClient; + +use super::*; + +#[derive(Debug)] +struct GetTeeProofsTest {} + +impl GetTeeProofsTest { + fn new() -> Self { + Self {} + } +} + +#[async_trait] +impl HttpTest for GetTeeProofsTest { + async fn test( + &self, + client: &DynClient, + pool: &ConnectionPool, + ) -> anyhow::Result<()> { + let batch_no = L1BatchNumber(1337); + let tee_type = TeeType::Sgx; + let proof = client.tee_proofs(batch_no, Some(tee_type)).await?; + + assert!(proof.is_empty()); + + let mut storage = pool.connection().await.unwrap(); + storage + .tee_verifier_input_producer_dal() + .create_tee_verifier_input_producer_job(batch_no) + .await?; + + let pubkey = vec![0xDE, 0xAD, 0xBE, 0xEF]; + let attestation = vec![0xC0, 0xFF, 0xEE]; + let mut tee_proof_generation_dal = storage.tee_proof_generation_dal(); + tee_proof_generation_dal + .save_attestation(&pubkey, &attestation) + .await?; + tee_proof_generation_dal + .insert_tee_proof_generation_job(batch_no, tee_type) + .await?; + + let signature = vec![0, 1, 2, 3, 4]; + let proof_vec = vec![5, 6, 7, 8, 9]; + tee_proof_generation_dal + .save_proof_artifacts_metadata(batch_no, tee_type, &pubkey, &signature, &proof_vec) + .await?; + + let proofs = client.tee_proofs(batch_no, Some(tee_type)).await?; + assert!(proofs.len() == 1); + let proof = &proofs[0]; + assert!(proof.l1_batch_number == batch_no); + assert!(proof.tee_type == Some(tee_type)); + assert!(proof.pubkey.as_ref() == Some(&pubkey)); + assert!(proof.signature.as_ref() == Some(&signature)); + assert!(proof.proof.as_ref() == Some(&proof_vec)); + assert!(proof.attestation.as_ref() == Some(&attestation)); + + Ok(()) + } +} + +#[tokio::test] +async fn get_tee_proofs() { + test_http_server(GetTeeProofsTest::new()).await; +} diff --git a/core/node/proof_data_handler/src/tests.rs b/core/node/proof_data_handler/src/tests.rs index 6ab7e4dec436..86cc53234486 100644 --- a/core/node/proof_data_handler/src/tests.rs +++ b/core/node/proof_data_handler/src/tests.rs @@ -131,9 +131,9 @@ async fn submit_tee_proof() { // send a request to the /tee/submit_proofs endpoint, using a mocked TEE proof let tee_proof_request_str = r#"{ - "signature": [ 0, 1, 2, 3, 4 ], - "pubkey": [ 5, 6, 7, 8, 9 ], - "proof": [ 10, 11, 12, 13, 14 ], + "signature": "0001020304", + "pubkey": "0506070809", + "proof": "0A0B0C0D0E", "tee_type": "sgx" }"#; let tee_proof_request = diff --git a/prover/Cargo.lock b/prover/Cargo.lock index d29f0110f217..1dbadf75d9cb 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -8002,6 +8002,7 @@ dependencies = [ "secp256k1", "serde", "serde_json", + "serde_with", "strum", "thiserror", "tracing", From 55f1605bf6548809d6fff5256e597561b647a52e Mon Sep 17 00:00:00 2001 From: Artem Makhortov <13339874+artmakh@users.noreply.github.com> Date: Thu, 19 Sep 2024 16:31:49 +0700 Subject: [PATCH 100/116] feat(ci): Add runtime-base image (#2919) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Adding image which will be used as runtime-base for services ## Why ❔ As a workaround for dockerhub ratelimits ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- .github/workflows/build-runtime-base.yml | 68 ++++++++++++++++++++++++ docker/runtime-base/Dockerfile | 9 ++++ 2 files changed, 77 insertions(+) create mode 100644 .github/workflows/build-runtime-base.yml create mode 100644 docker/runtime-base/Dockerfile diff --git a/.github/workflows/build-runtime-base.yml b/.github/workflows/build-runtime-base.yml new file mode 100644 index 000000000000..0767fce93868 --- /dev/null +++ b/.github/workflows/build-runtime-base.yml @@ -0,0 +1,68 @@ +name: Build base Docker image +on: + workflow_dispatch: + inputs: + repo_ref: + description: "git reference of the zksync-era to build" + required: true + default: main +jobs: + build-images: + name: Build and Push Docker Images + runs-on: matterlabs-ci-runner-high-performance + outputs: + image_tag_sha: ${{ steps.get-sha.outputs.image_tag_sha }} + # Needed to push to Gihub Package Registry + permissions: + packages: write + contents: read + env: + REPO_REF: ${{ github.event.inputs.repo_ref }} + strategy: + matrix: + name: [ runtime-base ] + image_name: [ zksync-runtime-base ] + + steps: + - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 + with: + submodules: "recursive" + + - name: Login to google container registry + run: | + gcloud auth configure-docker us-docker.pkg.dev -q + + - name: Login to GitHub Container Registry + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Get tag + id: get-sha + run: | + echo IMAGE_TAG_SHA=$(git rev-parse --short HEAD) >> $GITHUB_ENV + echo image_tag_sha=$(git rev-parse --short HEAD) >> $GITHUB_OUTPUT + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 + + - name: Set up QEMU + uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf # v3.2.0 + + - name: Build and push + uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0 + with: + push: true + context: . + platforms: | + - linux/arm64 + - linux/amd64 + file: docker/${{ matrix.name }}/Dockerfile + labels: | + org.opencontainers.image.source=https://github.com/matter-labs/zksync-era + org.opencontainers.image.licenses="MIT OR Apache-2.0" + tags: | + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.image_name }}:latest + ghcr.io/${{ github.repository_owner }}/${{ matrix.image_name }}:latest diff --git a/docker/runtime-base/Dockerfile b/docker/runtime-base/Dockerfile new file mode 100644 index 000000000000..09d920b1c436 --- /dev/null +++ b/docker/runtime-base/Dockerfile @@ -0,0 +1,9 @@ +FROM debian:bookworm-slim + +RUN apt-get update && \ + apt-get install -y \ + curl \ + libpq5 \ + ca-certificates \ + && \ + rm -rf /var/lib/apt/lists/* From 6acd606107b7b02e5921e8648a92ba4c408c6040 Mon Sep 17 00:00:00 2001 From: Artem Makhortov <13339874+artmakh@users.noreply.github.com> Date: Thu, 19 Sep 2024 17:00:54 +0700 Subject: [PATCH 101/116] fix(ci): Rename workflows for runner and builder base (#2921) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Rename workflows for runner and builder base ## Why ❔ Easier to find =) ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- .github/workflows/build-base.yml | 2 +- .github/workflows/build-runtime-base.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build-base.yml b/.github/workflows/build-base.yml index d8e557225620..83be44c126f9 100644 --- a/.github/workflows/build-base.yml +++ b/.github/workflows/build-base.yml @@ -1,4 +1,4 @@ -name: Build base Docker image +name: Build zksync-build-base Docker image on: workflow_dispatch: inputs: diff --git a/.github/workflows/build-runtime-base.yml b/.github/workflows/build-runtime-base.yml index 0767fce93868..1c4d69727715 100644 --- a/.github/workflows/build-runtime-base.yml +++ b/.github/workflows/build-runtime-base.yml @@ -1,4 +1,4 @@ -name: Build base Docker image +name: Build zksync-runtime-base Docker image on: workflow_dispatch: inputs: From eda70b4e534bdc56192e82dc939a038c23b85110 Mon Sep 17 00:00:00 2001 From: Artem Makhortov <13339874+artmakh@users.noreply.github.com> Date: Thu, 19 Sep 2024 17:10:27 +0700 Subject: [PATCH 102/116] fix(ci): zksync-runtime-base proper platforms names (#2923) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Proper platform names for docker build action ## Why ❔ Docker documentation (which is linked in docker-build-action docs) [points to non-working reference](https://docs.docker.com/reference/cli/docker/buildx/build/#platform) ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- .github/workflows/build-runtime-base.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build-runtime-base.yml b/.github/workflows/build-runtime-base.yml index 1c4d69727715..8545f9fd1023 100644 --- a/.github/workflows/build-runtime-base.yml +++ b/.github/workflows/build-runtime-base.yml @@ -57,8 +57,8 @@ jobs: push: true context: . platforms: | - - linux/arm64 - - linux/amd64 + - arm64 + - amd64 file: docker/${{ matrix.name }}/Dockerfile labels: | org.opencontainers.image.source=https://github.com/matter-labs/zksync-era From 1b06409e4672696605262127ecd0c0f1798f16c2 Mon Sep 17 00:00:00 2001 From: Artem Makhortov <13339874+artmakh@users.noreply.github.com> Date: Thu, 19 Sep 2024 17:44:46 +0700 Subject: [PATCH 103/116] fix(ci): Proper type of var for platforms in docker build action (#2924) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Proper type of var for platforms in docker build action ## Why ❔ ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- .github/workflows/build-runtime-base.yml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.github/workflows/build-runtime-base.yml b/.github/workflows/build-runtime-base.yml index 8545f9fd1023..eaec05bc6bcf 100644 --- a/.github/workflows/build-runtime-base.yml +++ b/.github/workflows/build-runtime-base.yml @@ -56,9 +56,7 @@ jobs: with: push: true context: . - platforms: | - - arm64 - - amd64 + platforms: arm64, amd64 file: docker/${{ matrix.name }}/Dockerfile labels: | org.opencontainers.image.source=https://github.com/matter-labs/zksync-era From 174dcf5d9f36b8baae001fadb18e8cd8b5161736 Mon Sep 17 00:00:00 2001 From: Artem Makhortov <13339874+artmakh@users.noreply.github.com> Date: Thu, 19 Sep 2024 18:17:03 +0700 Subject: [PATCH 104/116] feat(ci): Migrate to vendored image as a zksync-runtime-base (#2926) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Move building images to zksync-runtime-base ## Why ❔ Workaround for dockerhub rate limits ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- docker/contract-verifier/Dockerfile | 4 ++-- docker/external-node/Dockerfile | 4 +--- docker/prover-fri-gateway/Dockerfile | 3 +-- docker/prover-job-monitor/Dockerfile | 3 +-- docker/server-v2/Dockerfile | 4 ++-- docker/snapshots-creator/Dockerfile | 4 ++-- docker/verified-sources-fetcher/Dockerfile | 5 +++-- docker/witness-generator/Dockerfile | 4 +--- docker/witness-vector-generator/Dockerfile | 4 +--- 9 files changed, 14 insertions(+), 21 deletions(-) diff --git a/docker/contract-verifier/Dockerfile b/docker/contract-verifier/Dockerfile index b8d77163f141..7fcc695bf70b 100644 --- a/docker/contract-verifier/Dockerfile +++ b/docker/contract-verifier/Dockerfile @@ -19,9 +19,9 @@ COPY . . RUN cargo build --release -FROM debian:bookworm-slim +FROM ghcr.io/matter-labs/zksync-runtime-base:latest -RUN apt-get update && apt-get install -y curl libpq5 ca-certificates wget python3 jq && rm -rf /var/lib/apt/lists/* +RUN apt-get update && apt-get install -y wget python3 jq && rm -rf /var/lib/apt/lists/* # install zksolc 1.3.x RUN skip_versions="v1.3.12 v1.3.15 v1.3.20" && \ diff --git a/docker/external-node/Dockerfile b/docker/external-node/Dockerfile index a573a76511a2..79b79cbc5f68 100644 --- a/docker/external-node/Dockerfile +++ b/docker/external-node/Dockerfile @@ -17,9 +17,7 @@ WORKDIR /usr/src/zksync COPY . . RUN cargo build --release -FROM debian:bookworm-slim - -RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* +FROM ghcr.io/matter-labs/zksync-runtime-base:latest COPY --from=builder /usr/src/zksync/target/release/zksync_external_node /usr/bin COPY --from=builder /usr/src/zksync/target/release/block_reverter /usr/bin diff --git a/docker/prover-fri-gateway/Dockerfile b/docker/prover-fri-gateway/Dockerfile index a8d389df2839..3e631b35156e 100644 --- a/docker/prover-fri-gateway/Dockerfile +++ b/docker/prover-fri-gateway/Dockerfile @@ -18,8 +18,7 @@ COPY . . RUN cd prover && cargo build --release --bin zksync_prover_fri_gateway -FROM debian:bookworm-slim -RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* +FROM ghcr.io/matter-labs/zksync-runtime-base:latest # copy VK required for proof wrapping COPY prover/data/keys/ /prover/data/keys/ diff --git a/docker/prover-job-monitor/Dockerfile b/docker/prover-job-monitor/Dockerfile index b7255f5df433..88b46df27ffa 100644 --- a/docker/prover-job-monitor/Dockerfile +++ b/docker/prover-job-monitor/Dockerfile @@ -18,8 +18,7 @@ COPY . . RUN cd prover && cargo build --release --bin zksync_prover_job_monitor -FROM debian:bookworm-slim -RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* +FROM ghcr.io/matter-labs/zksync-runtime-base:latest COPY --from=builder /usr/src/zksync/prover/target/release/zksync_prover_job_monitor /usr/bin/ diff --git a/docker/server-v2/Dockerfile b/docker/server-v2/Dockerfile index d5b3ef2a5e61..460ac70c622c 100644 --- a/docker/server-v2/Dockerfile +++ b/docker/server-v2/Dockerfile @@ -19,9 +19,9 @@ COPY . . RUN cargo build --release --features=rocksdb/io-uring -FROM debian:bookworm-slim +FROM ghcr.io/matter-labs/zksync-runtime-base:latest -RUN apt-get update && apt-get install -y curl libpq5 liburing-dev ca-certificates && \ +RUN apt-get update && apt-get install -y liburing-dev && \ rm -rf /var/lib/apt/lists/* EXPOSE 3000 diff --git a/docker/snapshots-creator/Dockerfile b/docker/snapshots-creator/Dockerfile index 044599bcc920..2d3c83064981 100644 --- a/docker/snapshots-creator/Dockerfile +++ b/docker/snapshots-creator/Dockerfile @@ -17,9 +17,9 @@ COPY . . RUN cargo build --release --bin snapshots_creator -FROM debian:bookworm-slim +FROM ghcr.io/matter-labs/zksync-runtime-base:latest -RUN apt-get update && apt-get install -y curl libpq5 liburing-dev ca-certificates && \ +RUN apt-get update && apt-get install -y liburing-dev && \ rm -rf /var/lib/apt/lists/* COPY --from=builder /usr/src/zksync/target/release/snapshots_creator /usr/bin diff --git a/docker/verified-sources-fetcher/Dockerfile b/docker/verified-sources-fetcher/Dockerfile index f7a08eba587d..87475f3187f3 100644 --- a/docker/verified-sources-fetcher/Dockerfile +++ b/docker/verified-sources-fetcher/Dockerfile @@ -18,8 +18,9 @@ COPY . . RUN cargo build --release --bin verified_sources_fetcher -FROM debian:bookworm-slim -RUN apt-get update && apt-get install -y apt-transport-https ca-certificates gnupg curl git && rm -rf /var/lib/apt/lists/* +FROM ghcr.io/matter-labs/zksync-runtime-base:latest + +RUN apt-get update && apt-get install -y apt-transport-https gnupg git && rm -rf /var/lib/apt/lists/* RUN curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | gpg --dearmor -o /usr/share/keyrings/cloud.google.gpg RUN echo "deb [signed-by=/usr/share/keyrings/cloud.google.gpg] https://packages.cloud.google.com/apt cloud-sdk main" | tee -a /etc/apt/sources.list.d/google-cloud-sdk.list diff --git a/docker/witness-generator/Dockerfile b/docker/witness-generator/Dockerfile index e8f017c4971d..9211c3e23e53 100644 --- a/docker/witness-generator/Dockerfile +++ b/docker/witness-generator/Dockerfile @@ -20,9 +20,7 @@ COPY . . RUN cd prover && cargo build --release --bin zksync_witness_generator -FROM debian:bookworm-slim - -RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* +FROM ghcr.io/matter-labs/zksync-runtime-base:latest COPY prover/data/keys/ /prover/data/keys/ diff --git a/docker/witness-vector-generator/Dockerfile b/docker/witness-vector-generator/Dockerfile index dd04de7b7852..93d8dd308a58 100644 --- a/docker/witness-vector-generator/Dockerfile +++ b/docker/witness-vector-generator/Dockerfile @@ -18,9 +18,7 @@ COPY . . RUN cd prover && cargo build --release --bin zksync_witness_vector_generator -FROM debian:bookworm-slim - -RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* +FROM ghcr.io/matter-labs/zksync-runtime-base:latest # copy finalization hints required for witness vector generation COPY prover/data/keys/ /prover/data/keys/ From f6d68da33a487a2ae1953a28fac3a2b24ac200bb Mon Sep 17 00:00:00 2001 From: zksync-era-bot <147085853+zksync-era-bot@users.noreply.github.com> Date: Thu, 19 Sep 2024 14:52:15 +0400 Subject: [PATCH 105/116] chore(main): release core 24.25.0 (#2816) :robot: I have created a release *beep* *boop* --- ## [24.25.0](https://github.com/matter-labs/zksync-era/compare/core-v24.24.0...core-v24.25.0) (2024-09-19) ### Features * (DB migration) Rename recursion_scheduler_level_vk_hash to snark_wrapper_vk_hash ([#2809](https://github.com/matter-labs/zksync-era/issues/2809)) ([64f9551](https://github.com/matter-labs/zksync-era/commit/64f95514c99f95da2a19a97ff064c29a97efc22f)) * add da clients ([#2743](https://github.com/matter-labs/zksync-era/issues/2743)) ([9218612](https://github.com/matter-labs/zksync-era/commit/9218612fdb2b63c20841e2e2e5a45bbd23c01fbc)) * attester committees data extractor (BFT-434) ([#2684](https://github.com/matter-labs/zksync-era/issues/2684)) ([92dde03](https://github.com/matter-labs/zksync-era/commit/92dde039ee8a0bc08e2019b7fa6f243a34d9816f)) * emit errors in prover API metrics ([#2890](https://github.com/matter-labs/zksync-era/issues/2890)) ([2ac7cc5](https://github.com/matter-labs/zksync-era/commit/2ac7cc5836e69fc82c98df2005fedee01c1084e1)) * **en:** Resume incomplete snapshot in snapshot creator in more cases ([#2886](https://github.com/matter-labs/zksync-era/issues/2886)) ([f095b4a](https://github.com/matter-labs/zksync-era/commit/f095b4a3223222ac712de53592fe1e68f766600f)) * make `to` address optional for transaction data ([#2852](https://github.com/matter-labs/zksync-era/issues/2852)) ([8363c1d](https://github.com/matter-labs/zksync-era/commit/8363c1d8697ad9bd2fe5d326218476bc3dad38af)) * **prover:** Optimize setup keys loading ([#2847](https://github.com/matter-labs/zksync-era/issues/2847)) ([19887ef](https://github.com/matter-labs/zksync-era/commit/19887ef21a8bbd26977353f8ee277b711850dfd2)) * Selector generator tool ([#2844](https://github.com/matter-labs/zksync-era/issues/2844)) ([b359b08](https://github.com/matter-labs/zksync-era/commit/b359b085895da6582f1d28722107bc5b25f1232c)) * **tee:** use hex serialization for RPC responses ([#2887](https://github.com/matter-labs/zksync-era/issues/2887)) ([abe0440](https://github.com/matter-labs/zksync-era/commit/abe0440811ae4daf4a0f307922a282e9664308e0)) * **utils:** Rework locate_workspace, introduce Workspace type ([#2830](https://github.com/matter-labs/zksync-era/issues/2830)) ([d256092](https://github.com/matter-labs/zksync-era/commit/d2560928cc67b40a97a5497ac8542915bf6f91a9)) * **zk_toolbox:** Add external_node consensus support ([#2821](https://github.com/matter-labs/zksync-era/issues/2821)) ([4a10d7d](https://github.com/matter-labs/zksync-era/commit/4a10d7d9554d6c1aa2f4fc46557d40baaad8ff2f)) ### Bug Fixes * count SECP256 precompile to account validation gas limit as well ([#2859](https://github.com/matter-labs/zksync-era/issues/2859)) ([fee0c2a](https://github.com/matter-labs/zksync-era/commit/fee0c2ad08a5ab4a04252765b367eb9fbb1f3db7)) * **en:** Fix connection starvation during snapshot recovery ([#2836](https://github.com/matter-labs/zksync-era/issues/2836)) ([52f4f76](https://github.com/matter-labs/zksync-era/commit/52f4f763674d25f8a5e7f3a111354a559f798d52)) * **eth_watch:** fix `get_events_inner` ([#2882](https://github.com/matter-labs/zksync-era/issues/2882)) ([c957dd8](https://github.com/matter-labs/zksync-era/commit/c957dd8011213e0e95fa5962e2310321b29a0d16)) * handling of HTTP 403 thrown by proxyd ([#2835](https://github.com/matter-labs/zksync-era/issues/2835)) ([2d71c74](https://github.com/matter-labs/zksync-era/commit/2d71c7408a0eed3662fc51f70fa9f525d66e4c6f)) * **state-keeper:** Restore processed tx metrics in state keeper ([#2815](https://github.com/matter-labs/zksync-era/issues/2815)) ([4d8862b](https://github.com/matter-labs/zksync-era/commit/4d8862b76a55ac78edd481694fefd2107736ffd9)) * **tee-prover:** fix deserialization of `std::time::Duration` in `envy` config ([#2817](https://github.com/matter-labs/zksync-era/issues/2817)) ([df8641a](https://github.com/matter-labs/zksync-era/commit/df8641a912a8d480ceecff58b0bfaef05e04f0c8)) --- This PR was generated with [Release Please](https://github.com/googleapis/release-please). See [documentation](https://github.com/googleapis/release-please#release-please). --------- Co-authored-by: zksync-era-bot --- .github/release-please/manifest.json | 2 +- Cargo.lock | 2 +- core/CHANGELOG.md | 27 +++++++++++++++++++++++++++ core/bin/external_node/Cargo.toml | 2 +- 4 files changed, 30 insertions(+), 3 deletions(-) diff --git a/.github/release-please/manifest.json b/.github/release-please/manifest.json index 811c773b6f54..9bae11c7b26a 100644 --- a/.github/release-please/manifest.json +++ b/.github/release-please/manifest.json @@ -1,5 +1,5 @@ { - "core": "24.24.0", + "core": "24.25.0", "prover": "16.5.0", "zk_toolbox": "0.1.2" } diff --git a/Cargo.lock b/Cargo.lock index 90849bc72b37..7d57107bff4e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9855,7 +9855,7 @@ dependencies = [ [[package]] name = "zksync_external_node" -version = "24.24.0" +version = "24.25.0" dependencies = [ "anyhow", "assert_matches", diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index 7d4381b09bef..f07928071035 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,5 +1,32 @@ # Changelog +## [24.25.0](https://github.com/matter-labs/zksync-era/compare/core-v24.24.0...core-v24.25.0) (2024-09-19) + + +### Features + +* (DB migration) Rename recursion_scheduler_level_vk_hash to snark_wrapper_vk_hash ([#2809](https://github.com/matter-labs/zksync-era/issues/2809)) ([64f9551](https://github.com/matter-labs/zksync-era/commit/64f95514c99f95da2a19a97ff064c29a97efc22f)) +* add da clients ([#2743](https://github.com/matter-labs/zksync-era/issues/2743)) ([9218612](https://github.com/matter-labs/zksync-era/commit/9218612fdb2b63c20841e2e2e5a45bbd23c01fbc)) +* attester committees data extractor (BFT-434) ([#2684](https://github.com/matter-labs/zksync-era/issues/2684)) ([92dde03](https://github.com/matter-labs/zksync-era/commit/92dde039ee8a0bc08e2019b7fa6f243a34d9816f)) +* emit errors in prover API metrics ([#2890](https://github.com/matter-labs/zksync-era/issues/2890)) ([2ac7cc5](https://github.com/matter-labs/zksync-era/commit/2ac7cc5836e69fc82c98df2005fedee01c1084e1)) +* **en:** Resume incomplete snapshot in snapshot creator in more cases ([#2886](https://github.com/matter-labs/zksync-era/issues/2886)) ([f095b4a](https://github.com/matter-labs/zksync-era/commit/f095b4a3223222ac712de53592fe1e68f766600f)) +* make `to` address optional for transaction data ([#2852](https://github.com/matter-labs/zksync-era/issues/2852)) ([8363c1d](https://github.com/matter-labs/zksync-era/commit/8363c1d8697ad9bd2fe5d326218476bc3dad38af)) +* **prover:** Optimize setup keys loading ([#2847](https://github.com/matter-labs/zksync-era/issues/2847)) ([19887ef](https://github.com/matter-labs/zksync-era/commit/19887ef21a8bbd26977353f8ee277b711850dfd2)) +* Selector generator tool ([#2844](https://github.com/matter-labs/zksync-era/issues/2844)) ([b359b08](https://github.com/matter-labs/zksync-era/commit/b359b085895da6582f1d28722107bc5b25f1232c)) +* **tee:** use hex serialization for RPC responses ([#2887](https://github.com/matter-labs/zksync-era/issues/2887)) ([abe0440](https://github.com/matter-labs/zksync-era/commit/abe0440811ae4daf4a0f307922a282e9664308e0)) +* **utils:** Rework locate_workspace, introduce Workspace type ([#2830](https://github.com/matter-labs/zksync-era/issues/2830)) ([d256092](https://github.com/matter-labs/zksync-era/commit/d2560928cc67b40a97a5497ac8542915bf6f91a9)) +* **zk_toolbox:** Add external_node consensus support ([#2821](https://github.com/matter-labs/zksync-era/issues/2821)) ([4a10d7d](https://github.com/matter-labs/zksync-era/commit/4a10d7d9554d6c1aa2f4fc46557d40baaad8ff2f)) + + +### Bug Fixes + +* count SECP256 precompile to account validation gas limit as well ([#2859](https://github.com/matter-labs/zksync-era/issues/2859)) ([fee0c2a](https://github.com/matter-labs/zksync-era/commit/fee0c2ad08a5ab4a04252765b367eb9fbb1f3db7)) +* **en:** Fix connection starvation during snapshot recovery ([#2836](https://github.com/matter-labs/zksync-era/issues/2836)) ([52f4f76](https://github.com/matter-labs/zksync-era/commit/52f4f763674d25f8a5e7f3a111354a559f798d52)) +* **eth_watch:** fix `get_events_inner` ([#2882](https://github.com/matter-labs/zksync-era/issues/2882)) ([c957dd8](https://github.com/matter-labs/zksync-era/commit/c957dd8011213e0e95fa5962e2310321b29a0d16)) +* handling of HTTP 403 thrown by proxyd ([#2835](https://github.com/matter-labs/zksync-era/issues/2835)) ([2d71c74](https://github.com/matter-labs/zksync-era/commit/2d71c7408a0eed3662fc51f70fa9f525d66e4c6f)) +* **state-keeper:** Restore processed tx metrics in state keeper ([#2815](https://github.com/matter-labs/zksync-era/issues/2815)) ([4d8862b](https://github.com/matter-labs/zksync-era/commit/4d8862b76a55ac78edd481694fefd2107736ffd9)) +* **tee-prover:** fix deserialization of `std::time::Duration` in `envy` config ([#2817](https://github.com/matter-labs/zksync-era/issues/2817)) ([df8641a](https://github.com/matter-labs/zksync-era/commit/df8641a912a8d480ceecff58b0bfaef05e04f0c8)) + ## [24.24.0](https://github.com/matter-labs/zksync-era/compare/core-v24.23.0...core-v24.24.0) (2024-09-05) diff --git a/core/bin/external_node/Cargo.toml b/core/bin/external_node/Cargo.toml index a1d3951ff3d8..c7a4476173f8 100644 --- a/core/bin/external_node/Cargo.toml +++ b/core/bin/external_node/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "zksync_external_node" description = "Non-validator ZKsync node" -version = "24.24.0" # x-release-please-version +version = "24.25.0" # x-release-please-version edition.workspace = true authors.workspace = true homepage.workspace = true From ab898e7f1b3d0cd5d31c1820d50ee0fb043c1e6f Mon Sep 17 00:00:00 2001 From: Danil Date: Thu, 19 Sep 2024 13:00:27 +0200 Subject: [PATCH 106/116] fix(zk_toolbox): show readable log (#2911) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ ## Why ❔ ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --------- Signed-off-by: Danil --- .github/workflows/vm-perf-to-prometheus.yml | 2 +- zk_toolbox/crates/common/src/cmd.rs | 5 +---- zk_toolbox/crates/common/src/term/logger.rs | 7 +++---- 3 files changed, 5 insertions(+), 9 deletions(-) diff --git a/.github/workflows/vm-perf-to-prometheus.yml b/.github/workflows/vm-perf-to-prometheus.yml index 2c82b796d70e..4c8c90a0d8f2 100644 --- a/.github/workflows/vm-perf-to-prometheus.yml +++ b/.github/workflows/vm-perf-to-prometheus.yml @@ -34,7 +34,7 @@ jobs: run_retried docker compose pull zk docker compose up -d zk ci_run zkt - ci_run zk_supervisor contracts all + ci_run zk_supervisor contracts - name: run benchmarks run: | diff --git a/zk_toolbox/crates/common/src/cmd.rs b/zk_toolbox/crates/common/src/cmd.rs index 7bf0147b69c0..130a3b2c100e 100644 --- a/zk_toolbox/crates/common/src/cmd.rs +++ b/zk_toolbox/crates/common/src/cmd.rs @@ -147,10 +147,7 @@ impl<'a> Cmd<'a> { fn check_output_status(command_text: &str, output: &std::process::Output) -> CmdResult<()> { if !output.status.success() { logger::new_line(); - logger::error_note( - &format!("Command failed to run: {}", command_text), - &log_output(output), - ); + logger::error_note("Command failed to run", &log_output(output)); return Err(CmdError { stderr: Some(String::from_utf8(output.stderr.clone())?), source: anyhow::anyhow!("Command failed to run: {}", command_text), diff --git a/zk_toolbox/crates/common/src/term/logger.rs b/zk_toolbox/crates/common/src/term/logger.rs index 33a88bd961e2..17e518d9ad92 100644 --- a/zk_toolbox/crates/common/src/term/logger.rs +++ b/zk_toolbox/crates/common/src/term/logger.rs @@ -56,10 +56,9 @@ pub fn note(msg: impl Display, content: impl Display) { } pub fn error_note(msg: &str, content: &str) { - let symbol = CliclackTheme.state_symbol(&ThemeState::Submit); - let note = CliclackTheme - .format_note(msg, content) - .replace(&symbol, &CliclackTheme.error_symbol()); + let note = CliclackTheme.format_log(msg, &CliclackTheme.error_symbol()); + term_write(note); + let note = CliclackTheme.format_log(content, &CliclackTheme.error_symbol()); term_write(note); } From 6ec528ef4c46ba0432683003dcee111e9cbd444e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bruno=20Fran=C3=A7a?= Date: Thu, 19 Sep 2024 12:49:37 +0100 Subject: [PATCH 107/116] docs: Update system requirements (#2894) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ - 30 GB of free disk space are no longer sufficient to run a testnet EN. Upped the system requirements to 50 GB. - State growth is normally used to refer to the growth of the state tree, not of the historical state. What we have is historical state pruning, not state pruning. --- docs/guides/external-node/00_quick_start.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/guides/external-node/00_quick_start.md b/docs/guides/external-node/00_quick_start.md index 3902fdc15560..776e8a56e497 100644 --- a/docs/guides/external-node/00_quick_start.md +++ b/docs/guides/external-node/00_quick_start.md @@ -65,12 +65,12 @@ The HTTP JSON-RPC API can be accessed on port `3060` and WebSocket API can be ac > [!NOTE] > -> To stop state growth, you can enable state pruning by uncommenting `EN_PRUNING_ENABLED: true` in docker compose file, +> To stop historical DB growth, you can enable DB pruning by uncommenting `EN_PRUNING_ENABLED: true` in docker compose file, > you can read more about pruning in > [08_pruning.md](https://github.com/matter-labs/zksync-era/blob/main/docs/guides/external-node/08_pruning.md) - 32 GB of RAM and a relatively modern CPU -- 30 GB of storage for testnet nodes +- 50 GB of storage for testnet nodes - 300 GB of storage for mainnet nodes - 100 Mbps connection (1 Gbps+ recommended) From 209ac10a949a6c868578d999368951b82b6b3a99 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Thu, 19 Sep 2024 14:54:53 +0300 Subject: [PATCH 108/116] chore(vm): Bump vm2 repo revision (#2883) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Bumps the `vm2` crate revision and updates new VM usage correspondingly. ## Why ❔ To keep the `vm2` dependency up to date and track possible regressions. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- Cargo.lock | 4 +- Cargo.toml | 2 +- .../src/versions/vm_fast/circuits_tracer.rs | 8 +- .../multivm/src/versions/vm_fast/events.rs | 11 ++- core/lib/multivm/src/versions/vm_fast/glue.rs | 9 ++- .../src/versions/vm_fast/tests/bootloader.rs | 2 +- .../tests/tester/transaction_test_info.rs | 13 +-- .../src/versions/vm_fast/tests/utils.rs | 2 +- core/lib/multivm/src/versions/vm_fast/vm.rs | 80 +++++-------------- prover/Cargo.lock | 4 +- 10 files changed, 47 insertions(+), 88 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7d57107bff4e..8d062ebb361e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10966,7 +10966,7 @@ dependencies = [ [[package]] name = "zksync_vm2" version = "0.1.0" -source = "git+https://github.com/matter-labs/vm2.git?rev=cd6136c42ec56856e0abcf2a98d1a9e120161482#cd6136c42ec56856e0abcf2a98d1a9e120161482" +source = "git+https://github.com/matter-labs/vm2.git?rev=74577d9be13b1bff9d1a712389731f669b179e47#74577d9be13b1bff9d1a712389731f669b179e47" dependencies = [ "enum_dispatch", "primitive-types", @@ -10978,7 +10978,7 @@ dependencies = [ [[package]] name = "zksync_vm2_interface" version = "0.1.0" -source = "git+https://github.com/matter-labs/vm2.git?rev=cd6136c42ec56856e0abcf2a98d1a9e120161482#cd6136c42ec56856e0abcf2a98d1a9e120161482" +source = "git+https://github.com/matter-labs/vm2.git?rev=74577d9be13b1bff9d1a712389731f669b179e47#74577d9be13b1bff9d1a712389731f669b179e47" dependencies = [ "primitive-types", ] diff --git a/Cargo.toml b/Cargo.toml index 1e2fb9e0c7aa..5a8a507b0340 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -227,7 +227,7 @@ zk_evm_1_4_1 = { package = "zk_evm", version = "0.141" } zk_evm_1_5_0 = { package = "zk_evm", version = "=0.150.5" } # New VM; pinned to a specific commit because of instability -zksync_vm2 = { git = "https://github.com/matter-labs/vm2.git", rev = "cd6136c42ec56856e0abcf2a98d1a9e120161482" } +zksync_vm2 = { git = "https://github.com/matter-labs/vm2.git", rev = "74577d9be13b1bff9d1a712389731f669b179e47" } # Consensus dependencies. zksync_concurrency = "=0.1.1" diff --git a/core/lib/multivm/src/versions/vm_fast/circuits_tracer.rs b/core/lib/multivm/src/versions/vm_fast/circuits_tracer.rs index de6ead71e655..e6b1a53e9d05 100644 --- a/core/lib/multivm/src/versions/vm_fast/circuits_tracer.rs +++ b/core/lib/multivm/src/versions/vm_fast/circuits_tracer.rs @@ -1,5 +1,5 @@ use circuit_sequencer_api_1_5_0::{geometry_config::get_geometry_config, toolset::GeometryConfig}; -use zksync_vm2::{CycleStats, Opcode, OpcodeType, StateInterface, Tracer}; +use zksync_vm2::interface::{CycleStats, Opcode, OpcodeType, StateInterface, Tracer}; use zksync_vm_interface::CircuitStatistic; use crate::vm_latest::tracers::circuits_capacity::*; @@ -17,7 +17,7 @@ pub(crate) struct CircuitsTracer { keccak256_cycles: u32, ecrecover_cycles: u32, sha256_cycles: u32, - secp256k1_verify_cycles: u32, + secp256r1_verify_cycles: u32, transient_storage_checker_cycles: u32, } @@ -115,7 +115,7 @@ impl Tracer for CircuitsTracer { CycleStats::Keccak256(cycles) => self.keccak256_cycles += cycles, CycleStats::Sha256(cycles) => self.sha256_cycles += cycles, CycleStats::EcRecover(cycles) => self.ecrecover_cycles += cycles, - CycleStats::Secp256k1Verify(cycles) => self.secp256k1_verify_cycles += cycles, + CycleStats::Secp256r1Verify(cycles) => self.secp256r1_verify_cycles += cycles, CycleStats::Decommit(cycles) => self.code_decommitter_cycles += cycles, CycleStats::StorageRead => self.storage_application_cycles += 1, CycleStats::StorageWrite => self.storage_application_cycles += 2, @@ -146,7 +146,7 @@ impl CircuitsTracer { ecrecover: self.ecrecover_cycles as f32 / GEOMETRY_CONFIG.cycles_per_ecrecover_circuit as f32, sha256: self.sha256_cycles as f32 / GEOMETRY_CONFIG.cycles_per_sha256_circuit as f32, - secp256k1_verify: self.secp256k1_verify_cycles as f32 + secp256k1_verify: self.secp256r1_verify_cycles as f32 / GEOMETRY_CONFIG.cycles_per_secp256r1_verify_circuit as f32, transient_storage_checker: self.transient_storage_checker_cycles as f32 / GEOMETRY_CONFIG.cycles_per_transient_storage_sorter as f32, diff --git a/core/lib/multivm/src/versions/vm_fast/events.rs b/core/lib/multivm/src/versions/vm_fast/events.rs index 2312c3d97b40..294e8adce32b 100644 --- a/core/lib/multivm/src/versions/vm_fast/events.rs +++ b/core/lib/multivm/src/versions/vm_fast/events.rs @@ -1,6 +1,6 @@ use zksync_types::{L1BatchNumber, H256}; use zksync_utils::h256_to_account_address; -use zksync_vm2::Event; +use zksync_vm2::interface::Event; use crate::interface::VmEvent; @@ -23,18 +23,21 @@ impl EventAccumulator { } } -pub(crate) fn merge_events(events: &[Event], block_number: L1BatchNumber) -> Vec { +pub(crate) fn merge_events( + events: impl Iterator, + block_number: L1BatchNumber, +) -> Vec { let mut result = vec![]; let mut current: Option<(usize, u32, EventAccumulator)> = None; - for message in events.iter() { + for event in events { let Event { shard_id, is_first, tx_number, key, value, - } = message.clone(); + } = event; if !is_first { if let Some((mut remaining_data_length, mut remaining_topics, mut event)) = diff --git a/core/lib/multivm/src/versions/vm_fast/glue.rs b/core/lib/multivm/src/versions/vm_fast/glue.rs index f24c82af11e9..c2d38f351c04 100644 --- a/core/lib/multivm/src/versions/vm_fast/glue.rs +++ b/core/lib/multivm/src/versions/vm_fast/glue.rs @@ -1,18 +1,19 @@ use zksync_types::l2_to_l1_log::{L2ToL1Log, SystemL2ToL1Log}; use zksync_utils::u256_to_h256; +use zksync_vm2::interface; use crate::glue::GlueFrom; -impl GlueFrom<&zksync_vm2::L2ToL1Log> for SystemL2ToL1Log { - fn glue_from(value: &zksync_vm2::L2ToL1Log) -> Self { - let zksync_vm2::L2ToL1Log { +impl GlueFrom for SystemL2ToL1Log { + fn glue_from(value: interface::L2ToL1Log) -> Self { + let interface::L2ToL1Log { key, value, is_service, address, shard_id, tx_number, - } = *value; + } = value; Self(L2ToL1Log { shard_id, diff --git a/core/lib/multivm/src/versions/vm_fast/tests/bootloader.rs b/core/lib/multivm/src/versions/vm_fast/tests/bootloader.rs index 5c1158a5909d..48e1b10de442 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/bootloader.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/bootloader.rs @@ -1,6 +1,6 @@ use assert_matches::assert_matches; use zksync_types::U256; -use zksync_vm2::HeapId; +use zksync_vm2::interface::HeapId; use crate::{ interface::{ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterfaceExt}, diff --git a/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs index ce45390260c5..b84af0481d70 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs @@ -1,6 +1,7 @@ use std::fmt; use zksync_types::{ExecuteTransactionCommon, Transaction, H160, U256}; +use zksync_vm2::interface::{Event, StateInterface}; use super::VmTester; use crate::{ @@ -190,7 +191,7 @@ impl TransactionTestInfo { struct VmStateDump { state: S, storage_writes: Vec<((H160, U256), U256)>, - events: Box<[zksync_vm2::Event]>, + events: Box<[Event]>, } impl PartialEq for VmStateDump { @@ -205,14 +206,8 @@ impl Vm { fn dump_state(&self) -> VmStateDump { VmStateDump { state: self.inner.dump_state(), - storage_writes: self - .inner - .world_diff() - .get_storage_state() - .iter() - .map(|(k, v)| (*k, *v)) - .collect(), - events: self.inner.world_diff().events().into(), + storage_writes: self.inner.get_storage_state().collect(), + events: self.inner.events().collect(), } } } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/utils.rs b/core/lib/multivm/src/versions/vm_fast/tests/utils.rs index d91e13076514..5ab5aa0dec92 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/utils.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/utils.rs @@ -10,7 +10,7 @@ use zksync_types::{ U256, }; use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; -use zksync_vm2::{HeapId, StateInterface}; +use zksync_vm2::interface::{HeapId, StateInterface}; use crate::interface::storage::ReadStorage; diff --git a/core/lib/multivm/src/versions/vm_fast/vm.rs b/core/lib/multivm/src/versions/vm_fast/vm.rs index 5a73ce49b06c..66ee04c73fd9 100644 --- a/core/lib/multivm/src/versions/vm_fast/vm.rs +++ b/core/lib/multivm/src/versions/vm_fast/vm.rs @@ -16,8 +16,8 @@ use zksync_types::{ }; use zksync_utils::{bytecode::hash_bytecode, h256_to_u256, u256_to_h256}; use zksync_vm2::{ - decode::decode_program, CallframeInterface, ExecutionEnd, FatPointer, HeapId, Program, - Settings, StateInterface, Tracer, VirtualMachine, + interface::{CallframeInterface, HeapId, StateInterface, Tracer}, + ExecutionEnd, FatPointer, Program, Settings, VirtualMachine, }; use super::{ @@ -79,7 +79,7 @@ impl Vm { operator_suggested_refund: 0, }; let mut last_tx_result = None; - let mut pubdata_before = self.inner.world_diff().pubdata() as u32; + let mut pubdata_before = self.inner.pubdata() as u32; let result = loop { let hook = match self.inner.run(&mut self.world, tracer) { @@ -125,7 +125,7 @@ impl Vm { ) .as_u64(); - let pubdata_published = self.inner.world_diff().pubdata() as u32; + let pubdata_published = self.inner.pubdata() as u32; refunds.operator_suggested_refund = compute_refund( &self.batch_env, @@ -186,8 +186,7 @@ impl Vm { unreachable!("We do not provide the pubdata when executing the block tip or a single transaction"); } - let events = - merge_events(self.inner.world_diff().events(), self.batch_env.number); + let events = merge_events(self.inner.events(), self.batch_env.number); let published_bytecodes = events .iter() @@ -421,7 +420,7 @@ impl Vm { BOOTLOADER_ADDRESS, bootloader, H160::zero(), - vec![], + &[], system_env.bootloader_gas_limit, Settings { default_aa_code_hash, @@ -461,7 +460,8 @@ impl Vm { // visible for testing pub(super) fn get_current_execution_state(&self) -> CurrentExecutionState { let world_diff = self.inner.world_diff(); - let events = merge_events(world_diff.events(), self.batch_env.number); + let vm = &self.inner; + let events = merge_events(vm.events(), self.batch_env.number); let user_l2_to_l1_logs = extract_l2tol1logs_from_l1_messenger(&events) .into_iter() @@ -480,22 +480,12 @@ impl Vm { }) .collect(), used_contract_hashes: self.decommitted_hashes().collect(), - system_logs: world_diff - .l2_to_l1_logs() - .iter() - .map(|x| x.glue_into()) - .collect(), + system_logs: vm.l2_to_l1_logs().map(GlueInto::glue_into).collect(), user_l2_to_l1_logs, storage_refunds: world_diff.storage_refunds().to_vec(), pubdata_costs: world_diff.pubdata_costs().to_vec(), } } - - fn delete_history_if_appropriate(&mut self) { - if self.snapshot.is_none() && !self.has_previous_far_calls() { - self.inner.delete_history(); - } - } } impl VmInterface for Vm { @@ -519,7 +509,7 @@ impl VmInterface for Vm { let mut tracer = CircuitsTracer::default(); let start = self.inner.world_diff().snapshot(); - let pubdata_before = self.inner.world_diff().pubdata(); + let pubdata_before = self.inner.pubdata(); let gas_before = self.gas_remaining(); let (result, refunds) = self.run(execution_mode, &mut tracer, track_refunds); @@ -549,7 +539,7 @@ impl VmInterface for Vm { }) .collect(); let events = merge_events( - self.inner.world_diff().events_after(&start), + self.inner.world_diff().events_after(&start).iter().copied(), self.batch_env.number, ); let user_l2_to_l1_logs = extract_l2tol1logs_from_l1_messenger(&events) @@ -562,7 +552,7 @@ impl VmInterface for Vm { .world_diff() .l2_to_l1_logs_after(&start) .iter() - .map(|x| x.glue_into()) + .map(|&log| log.glue_into()) .collect(); VmExecutionLogs { storage_logs, @@ -573,7 +563,7 @@ impl VmInterface for Vm { } }; - let pubdata_after = self.inner.world_diff().pubdata(); + let pubdata_after = self.inner.pubdata(); let circuit_statistic = tracer.circuit_statistic(); let gas_remaining = self.gas_remaining(); VmExecutionResultAndLogs { @@ -648,7 +638,6 @@ impl VmInterface for Vm { #[derive(Debug)] struct VmSnapshot { - vm_snapshot: zksync_vm2::Snapshot, bootloader_snapshot: BootloaderStateSnapshot, gas_for_account_validation: u32, } @@ -660,9 +649,8 @@ impl VmInterfaceHistoryEnabled for Vm { "cannot create a VM snapshot until a previous snapshot is rolled back to or popped" ); - self.delete_history_if_appropriate(); + self.inner.make_snapshot(); self.snapshot = Some(VmSnapshot { - vm_snapshot: self.inner.snapshot(), bootloader_snapshot: self.bootloader_state.get_snapshot(), gas_for_account_validation: self.gas_for_account_validation, }); @@ -670,21 +658,18 @@ impl VmInterfaceHistoryEnabled for Vm { fn rollback_to_the_latest_snapshot(&mut self) { let VmSnapshot { - vm_snapshot, bootloader_snapshot, gas_for_account_validation, } = self.snapshot.take().expect("no snapshots to rollback to"); - self.inner.rollback(vm_snapshot); + self.inner.rollback(); self.bootloader_state.apply_snapshot(bootloader_snapshot); self.gas_for_account_validation = gas_for_account_validation; - - self.delete_history_if_appropriate(); } fn pop_snapshot_no_rollback(&mut self) { + self.inner.pop_snapshot(); self.snapshot = None; - self.delete_history_if_appropriate(); } } @@ -721,39 +706,13 @@ impl World { } } - fn bytecode_to_program(bytecode: &[u8]) -> Program { - Program::new( - decode_program( - &bytecode - .chunks_exact(8) - .map(|chunk| u64::from_be_bytes(chunk.try_into().unwrap())) - .collect::>(), - false, - ), - bytecode - .chunks_exact(32) - .map(U256::from_big_endian) - .collect::>(), - ) - } - fn convert_system_contract_code( code: &SystemContractCode, is_bootloader: bool, ) -> (U256, Program) { ( h256_to_u256(code.hash), - Program::new( - decode_program( - &code - .code - .iter() - .flat_map(|x| x.0.into_iter().rev()) - .collect::>(), - is_bootloader, - ), - code.code.clone(), - ), + Program::from_words(code.code.clone(), is_bootloader), ) } } @@ -808,11 +767,12 @@ impl zksync_vm2::World for World { self.program_cache .entry(hash) .or_insert_with(|| { - Self::bytecode_to_program(self.bytecode_cache.entry(hash).or_insert_with(|| { + let bytecode = self.bytecode_cache.entry(hash).or_insert_with(|| { self.storage .load_factory_dep(u256_to_h256(hash)) .expect("vm tried to decommit nonexistent bytecode") - })) + }); + Program::new(bytecode, false) }) .clone() } diff --git a/prover/Cargo.lock b/prover/Cargo.lock index 1dbadf75d9cb..9a6c4b424232 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -8086,7 +8086,7 @@ dependencies = [ [[package]] name = "zksync_vm2" version = "0.1.0" -source = "git+https://github.com/matter-labs/vm2.git?rev=cd6136c42ec56856e0abcf2a98d1a9e120161482#cd6136c42ec56856e0abcf2a98d1a9e120161482" +source = "git+https://github.com/matter-labs/vm2.git?rev=74577d9be13b1bff9d1a712389731f669b179e47#74577d9be13b1bff9d1a712389731f669b179e47" dependencies = [ "enum_dispatch", "primitive-types", @@ -8098,7 +8098,7 @@ dependencies = [ [[package]] name = "zksync_vm2_interface" version = "0.1.0" -source = "git+https://github.com/matter-labs/vm2.git?rev=cd6136c42ec56856e0abcf2a98d1a9e120161482#cd6136c42ec56856e0abcf2a98d1a9e120161482" +source = "git+https://github.com/matter-labs/vm2.git?rev=74577d9be13b1bff9d1a712389731f669b179e47#74577d9be13b1bff9d1a712389731f669b179e47" dependencies = [ "primitive-types", ] From 633bca453c21bb9f26b9b95a77366128fd199213 Mon Sep 17 00:00:00 2001 From: Harald Hoyer Date: Thu, 19 Sep 2024 14:58:23 +0200 Subject: [PATCH 109/116] feat(nix): use precompiled `rocksdb` and `snappy` (#2925) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Use precompiled `rocksdb` and `snappy` in the nix build. ## Why ❔ Speeds up compilation and is usually better maintained. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. Signed-off-by: Harald Hoyer --- flake.nix | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/flake.nix b/flake.nix index cc14faebfed5..ef618816f9c9 100644 --- a/flake.nix +++ b/flake.nix @@ -67,7 +67,6 @@ }; craneLib = (crane.mkLib pkgs).overrideToolchain rustVersion; - NIX_OUTPATH_USED_AS_RANDOM_SEED = "aaaaaaaaaa"; commonArgs = { nativeBuildInputs = with pkgs;[ @@ -81,6 +80,8 @@ snappy.dev lz4.dev bzip2.dev + rocksdb + snappy.dev ]; src = with pkgs.lib.fileset; toSource { @@ -97,7 +98,9 @@ env = { OPENSSL_NO_VENDOR = "1"; - inherit NIX_OUTPATH_USED_AS_RANDOM_SEED; + ROCKSDB_LIB_DIR = "${pkgs.rocksdb.out}/lib"; + SNAPPY_LIB_DIR = "${pkgs.snappy.out}/lib"; + NIX_OUTPATH_USED_AS_RANDOM_SEED = "aaaaaaaaaa"; }; doCheck = false; From 2cec83f26e0b9309387135ca43718af4fcd6f6b1 Mon Sep 17 00:00:00 2001 From: Artem Fomiuk <88630083+Artemka374@users.noreply.github.com> Date: Thu, 19 Sep 2024 17:40:22 +0300 Subject: [PATCH 110/116] feat(prover): Add endpoint to PJM to get queue reports (#2918) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Add `/queue_report` endpoint, which will get the data about queue and send it. ## Why ❔ To work with new autoscaler ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --------- Co-authored-by: EmilLuta --- core/lib/basic_types/src/prover_dal.rs | 3 +- .../config/src/configs/prover_job_monitor.rs | 2 + core/lib/config/src/testonly.rs | 1 + core/lib/env_config/src/prover_job_monitor.rs | 3 + .../src/proto/config/prover_job_monitor.proto | 1 + .../protobuf_config/src/prover_job_monitor.rs | 4 + etc/env/base/prover_job_monitor.toml | 1 + etc/env/file_based/general.yaml | 1 + prover/Cargo.lock | 29 ++- prover/Cargo.toml | 1 + .../crates/bin/prover_job_monitor/Cargo.toml | 3 + .../src/autoscaler_queue_reporter.rs | 176 ++++++++++++++++++ .../crates/bin/prover_job_monitor/src/lib.rs | 1 + .../crates/bin/prover_job_monitor/src/main.rs | 30 ++- .../witness_generator_queue_reporter.rs | 2 +- ...dbd694e1781e013247d090a280a1f894de464.json | 38 ++++ .../lib/prover_dal/src/fri_prover_dal.rs | 47 ++++- .../src/fri_witness_generator_dal.rs | 4 +- 18 files changed, 334 insertions(+), 13 deletions(-) create mode 100644 prover/crates/bin/prover_job_monitor/src/autoscaler_queue_reporter.rs create mode 100644 prover/crates/lib/prover_dal/.sqlx/query-97adb49780c9edde6a3cfda09dadbd694e1781e013247d090a280a1f894de464.json diff --git a/core/lib/basic_types/src/prover_dal.rs b/core/lib/basic_types/src/prover_dal.rs index 7eb671448608..36f6c89135a0 100644 --- a/core/lib/basic_types/src/prover_dal.rs +++ b/core/lib/basic_types/src/prover_dal.rs @@ -2,6 +2,7 @@ use std::{net::IpAddr, ops::Add, str::FromStr}; use chrono::{DateTime, Duration, NaiveDateTime, NaiveTime, Utc}; +use serde::{Deserialize, Serialize}; use strum::{Display, EnumString}; use crate::{ @@ -27,7 +28,7 @@ pub struct ExtendedJobCountStatistics { pub successful: usize, } -#[derive(Debug, Clone, Copy, Default)] +#[derive(Debug, Clone, Copy, Default, Serialize, Deserialize)] pub struct JobCountStatistics { pub queued: usize, pub in_progress: usize, diff --git a/core/lib/config/src/configs/prover_job_monitor.rs b/core/lib/config/src/configs/prover_job_monitor.rs index c16b1db81b7a..d60a0e90c204 100644 --- a/core/lib/config/src/configs/prover_job_monitor.rs +++ b/core/lib/config/src/configs/prover_job_monitor.rs @@ -61,6 +61,8 @@ pub struct ProverJobMonitorConfig { /// The interval between runs for Witness Job Queuer. #[serde(default = "ProverJobMonitorConfig::default_witness_job_queuer_run_interval_ms")] pub witness_job_queuer_run_interval_ms: u64, + /// HTTP port of the ProverJobMonitor to send requests to. + pub http_port: u16, } impl ProverJobMonitorConfig { diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index 4a2858b9cbfc..21141ddefff6 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -1113,6 +1113,7 @@ impl Distribution for Encod prover_queue_reporter_run_interval_ms: self.sample(rng), witness_generator_queue_reporter_run_interval_ms: self.sample(rng), witness_job_queuer_run_interval_ms: self.sample(rng), + http_port: self.sample(rng), } } } diff --git a/core/lib/env_config/src/prover_job_monitor.rs b/core/lib/env_config/src/prover_job_monitor.rs index 3a8f80473eb1..884ebecacbb8 100644 --- a/core/lib/env_config/src/prover_job_monitor.rs +++ b/core/lib/env_config/src/prover_job_monitor.rs @@ -31,6 +31,7 @@ mod tests { prover_queue_reporter_run_interval_ms: 10000, witness_generator_queue_reporter_run_interval_ms: 10000, witness_job_queuer_run_interval_ms: 10000, + http_port: 3074, } } @@ -55,6 +56,7 @@ mod tests { fn from_env_with_default() { let config = r#" PROVER_JOB_MONITOR_PROMETHEUS_PORT=3317 + PROVER_JOB_MONITOR_HTTP_PORT=3074 PROVER_JOB_MONITOR_MAX_DB_CONNECTIONS=9 "#; let mut lock = MUTEX.lock(); @@ -80,6 +82,7 @@ mod tests { PROVER_JOB_MONITOR_PROVER_QUEUE_REPORTER_RUN_INTERVAL_MS=10001 PROVER_JOB_MONITOR_WITNESS_GENERATOR_QUEUE_REPORTER_RUN_INTERVAL_MS=10001 PROVER_JOB_MONITOR_WITNESS_JOB_QUEUER_RUN_INTERVAL_MS=10001 + PROVER_JOB_MONITOR_HTTP_PORT=3074 "#; let mut lock = MUTEX.lock(); lock.set_env(config); diff --git a/core/lib/protobuf_config/src/proto/config/prover_job_monitor.proto b/core/lib/protobuf_config/src/proto/config/prover_job_monitor.proto index 7b505aa3bcfb..9aabf6e34832 100644 --- a/core/lib/protobuf_config/src/proto/config/prover_job_monitor.proto +++ b/core/lib/protobuf_config/src/proto/config/prover_job_monitor.proto @@ -17,4 +17,5 @@ message ProverJobMonitor { optional uint64 prover_queue_reporter_run_interval_ms = 12; // optional; ms optional uint64 witness_generator_queue_reporter_run_interval_ms = 13; // optional; ms optional uint64 witness_job_queuer_run_interval_ms = 14; // optional; ms + optional uint32 http_port = 15; // required; u32 } diff --git a/core/lib/protobuf_config/src/prover_job_monitor.rs b/core/lib/protobuf_config/src/prover_job_monitor.rs index a1c5a7c05995..a174d0882406 100644 --- a/core/lib/protobuf_config/src/prover_job_monitor.rs +++ b/core/lib/protobuf_config/src/prover_job_monitor.rs @@ -95,6 +95,9 @@ impl ProtoRepr for proto::ProverJobMonitor { .or_else(|| Some(Self::Type::default_witness_job_queuer_run_interval_ms())), ) .context("witness_job_queuer_run_interval_ms")?, + http_port: required(&self.http_port) + .and_then(|x| Ok((*x).try_into()?)) + .context("http_port")?, }) } @@ -126,6 +129,7 @@ impl ProtoRepr for proto::ProverJobMonitor { this.witness_generator_queue_reporter_run_interval_ms, ), witness_job_queuer_run_interval_ms: Some(this.witness_job_queuer_run_interval_ms), + http_port: Some(this.http_port.into()), } } } diff --git a/etc/env/base/prover_job_monitor.toml b/etc/env/base/prover_job_monitor.toml index 40cdf76b8b10..ce206c74ffde 100644 --- a/etc/env/base/prover_job_monitor.toml +++ b/etc/env/base/prover_job_monitor.toml @@ -13,3 +13,4 @@ proof_compressor_queue_reporter_run_interval_ms = 10000 prover_queue_reporter_run_interval_ms = 10000 witness_generator_queue_reporter_run_interval_ms = 10000 witness_job_queuer_run_interval_ms = 10000 +http_port = 3074 diff --git a/etc/env/file_based/general.yaml b/etc/env/file_based/general.yaml index ca9c3fd0c796..6a36f65c97c3 100644 --- a/etc/env/file_based/general.yaml +++ b/etc/env/file_based/general.yaml @@ -287,6 +287,7 @@ prover_job_monitor: prover_queue_reporter_run_interval_ms: 10000 witness_generator_queue_reporter_run_interval_ms: 10000 witness_job_queuer_run_interval_ms: 10000 + http_port: 3074 base_token_adjuster: diff --git a/prover/Cargo.lock b/prover/Cargo.lock index 9a6c4b424232..5624403d7853 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -313,6 +313,8 @@ dependencies = [ "http 1.1.0", "http-body 1.0.0", "http-body-util", + "hyper 1.3.1", + "hyper-util", "itoa", "matchit", "memchr", @@ -321,10 +323,15 @@ dependencies = [ "pin-project-lite", "rustversion", "serde", + "serde_json", + "serde_path_to_error", + "serde_urlencoded", "sync_wrapper 1.0.1", + "tokio", "tower", "tower-layer", "tower-service", + "tracing", ] [[package]] @@ -345,6 +352,7 @@ dependencies = [ "sync_wrapper 0.1.2", "tower-layer", "tower-service", + "tracing", ] [[package]] @@ -5112,9 +5120,9 @@ checksum = "a3f0bf26fd526d2a95683cd0f87bf103b8539e2ca1ef48ce002d67aad59aa0b4" [[package]] name = "serde" -version = "1.0.203" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7253ab4de971e72fb7be983802300c30b5a7f0c2e56fab8abfc6a214307c0094" +checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" dependencies = [ "serde_derive", ] @@ -5131,9 +5139,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.203" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "500cbc0ebeb6f46627f50f3f5811ccf6bf00643be300b4c3eabc0ef55dc5b5ba" +checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2 1.0.85", "quote 1.0.36", @@ -5151,6 +5159,16 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_path_to_error" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af99884400da37c88f5e9146b7f1fd0fbcae8f6eec4e9da38b67d05486f814a6" +dependencies = [ + "itoa", + "serde", +] + [[package]] name = "serde_urlencoded" version = "0.7.1" @@ -7928,13 +7946,16 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", + "axum", "clap 4.5.4", "ctrlc", + "serde", "tokio", "tracing", "vise", "zksync_config", "zksync_core_leftovers", + "zksync_db_connection", "zksync_prover_dal", "zksync_types", "zksync_utils", diff --git a/prover/Cargo.toml b/prover/Cargo.toml index 624661adc8dc..fd171b254d5a 100644 --- a/prover/Cargo.toml +++ b/prover/Cargo.toml @@ -19,6 +19,7 @@ categories = ["cryptography"] [workspace.dependencies] # Common dependencies anyhow = "1.0" +axum = "0.7.5" async-trait = "0.1" bincode = "1" chrono = "0.4.38" diff --git a/prover/crates/bin/prover_job_monitor/Cargo.toml b/prover/crates/bin/prover_job_monitor/Cargo.toml index 160d3a603e36..a4bf8765a946 100644 --- a/prover/crates/bin/prover_job_monitor/Cargo.toml +++ b/prover/crates/bin/prover_job_monitor/Cargo.toml @@ -16,6 +16,7 @@ zksync_prover_dal.workspace = true zksync_utils.workspace = true zksync_types.workspace = true zksync_config = { workspace = true, features = ["observability_ext"] } +zksync_db_connection.workspace = true vise.workspace = true @@ -25,3 +26,5 @@ clap = { workspace = true, features = ["derive"] } ctrlc = { workspace = true, features = ["termination"] } tracing.workspace = true async-trait.workspace = true +serde.workspace = true +axum.workspace = true diff --git a/prover/crates/bin/prover_job_monitor/src/autoscaler_queue_reporter.rs b/prover/crates/bin/prover_job_monitor/src/autoscaler_queue_reporter.rs new file mode 100644 index 000000000000..aff78409dbb3 --- /dev/null +++ b/prover/crates/bin/prover_job_monitor/src/autoscaler_queue_reporter.rs @@ -0,0 +1,176 @@ +use std::collections::HashMap; + +use axum::{ + http::StatusCode, + response::{IntoResponse, Response}, + routing::get, + Json, Router, +}; +use serde::{Deserialize, Serialize}; +use zksync_db_connection::error::DalError; +use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; +use zksync_types::{ + basic_fri_types::AggregationRound, protocol_version::ProtocolSemanticVersion, + prover_dal::JobCountStatistics, +}; + +#[derive(Debug, Clone)] +pub struct AutoscalerQueueReporter { + connection_pool: ConnectionPool, +} + +#[derive(Default, Debug, Serialize, Deserialize)] +pub struct QueueReport { + pub basic_witness_jobs: JobCountStatistics, + pub leaf_witness_jobs: JobCountStatistics, + pub node_witness_jobs: JobCountStatistics, + pub recursion_tip_witness_jobs: JobCountStatistics, + pub scheduler_witness_jobs: JobCountStatistics, + pub prover_jobs: JobCountStatistics, + pub proof_compressor_jobs: JobCountStatistics, +} + +#[derive(Default, Debug, Serialize, Deserialize)] +pub struct VersionedQueueReport { + pub version: ProtocolSemanticVersion, + pub report: QueueReport, +} + +impl AutoscalerQueueReporter { + pub fn new(connection_pool: ConnectionPool) -> Self { + Self { connection_pool } + } + + pub async fn get_report(&self) -> Result>, ProcessorError> { + tracing::debug!("Received request to get queue report"); + + let mut result = HashMap::::new(); + + for round in AggregationRound::ALL_ROUNDS { + self.get_witness_jobs_report(round, &mut result).await?; + } + + self.get_prover_jobs_report(&mut result).await?; + self.get_proof_compressor_jobs_report(&mut result).await?; + + Ok(Json( + result + .into_iter() + .map(|(version, report)| VersionedQueueReport { version, report }) + .collect(), + )) + } + + async fn get_witness_jobs_report( + &self, + aggregation_round: AggregationRound, + state: &mut HashMap, + ) -> anyhow::Result<()> { + let stats = self + .connection_pool + .connection() + .await? + .fri_witness_generator_dal() + .get_witness_jobs_stats(aggregation_round) + .await; + + for (protocol_version, job_stats) in stats { + let report = state.entry(protocol_version).or_default(); + + match aggregation_round { + AggregationRound::BasicCircuits => report.basic_witness_jobs = job_stats, + AggregationRound::LeafAggregation => report.leaf_witness_jobs = job_stats, + AggregationRound::NodeAggregation => report.node_witness_jobs = job_stats, + AggregationRound::RecursionTip => report.recursion_tip_witness_jobs = job_stats, + AggregationRound::Scheduler => report.scheduler_witness_jobs = job_stats, + } + } + Ok(()) + } + + async fn get_prover_jobs_report( + &self, + state: &mut HashMap, + ) -> anyhow::Result<()> { + let stats = self + .connection_pool + .connection() + .await? + .fri_prover_jobs_dal() + .get_generic_prover_jobs_stats() + .await; + + for (protocol_version, stats) in stats { + let report = state.entry(protocol_version).or_default(); + + report.prover_jobs = stats; + } + Ok(()) + } + + async fn get_proof_compressor_jobs_report( + &self, + state: &mut HashMap, + ) -> anyhow::Result<()> { + let stats = self + .connection_pool + .connection() + .await? + .fri_proof_compressor_dal() + .get_jobs_stats() + .await; + + for (protocol_version, stats) in stats { + let report = state.entry(protocol_version).or_default(); + + report.proof_compressor_jobs = stats; + } + + Ok(()) + } +} + +pub fn get_queue_reporter_router(connection_pool: ConnectionPool) -> Router { + let autoscaler_queue_reporter = AutoscalerQueueReporter::new(connection_pool); + + Router::new().route( + "/queue_report", + get(move || async move { autoscaler_queue_reporter.get_report().await }), + ) +} + +pub enum ProcessorError { + Dal(DalError), + Custom(String), +} + +impl From for ProcessorError { + fn from(err: DalError) -> Self { + ProcessorError::Dal(err) + } +} + +impl From for ProcessorError { + fn from(err: anyhow::Error) -> Self { + ProcessorError::Custom(err.to_string()) + } +} + +impl IntoResponse for ProcessorError { + fn into_response(self) -> Response { + let (status_code, message) = match self { + ProcessorError::Dal(err) => { + tracing::error!("Sqlx error: {:?}", err); + ( + StatusCode::INTERNAL_SERVER_ERROR, + "Failed getting data from database", + ) + } + ProcessorError::Custom(err) => { + tracing::error!("Custom error invoked: {:?}", &err); + (StatusCode::INTERNAL_SERVER_ERROR, "Internal error") + } + }; + (status_code, message).into_response() + } +} diff --git a/prover/crates/bin/prover_job_monitor/src/lib.rs b/prover/crates/bin/prover_job_monitor/src/lib.rs index 60d8be297cfe..0d6a0ebe104c 100644 --- a/prover/crates/bin/prover_job_monitor/src/lib.rs +++ b/prover/crates/bin/prover_job_monitor/src/lib.rs @@ -1,4 +1,5 @@ pub mod archiver; +pub mod autoscaler_queue_reporter; pub mod job_requeuer; pub(crate) mod metrics; pub mod queue_reporter; diff --git a/prover/crates/bin/prover_job_monitor/src/main.rs b/prover/crates/bin/prover_job_monitor/src/main.rs index 734a4bac38a2..9195b92882dd 100644 --- a/prover/crates/bin/prover_job_monitor/src/main.rs +++ b/prover/crates/bin/prover_job_monitor/src/main.rs @@ -1,3 +1,5 @@ +use std::{future::IntoFuture, net::SocketAddr}; + use anyhow::Context as _; use clap::Parser; use tokio::{ @@ -12,6 +14,7 @@ use zksync_core_leftovers::temp_config_store::{load_database_secrets, load_gener use zksync_prover_dal::{ConnectionPool, Prover}; use zksync_prover_job_monitor::{ archiver::{GpuProverArchiver, ProverJobsArchiver}, + autoscaler_queue_reporter::get_queue_reporter_router, job_requeuer::{ProofCompressorJobRequeuer, ProverJobRequeuer, WitnessGeneratorJobRequeuer}, queue_reporter::{ ProofCompressorQueueReporter, ProverQueueReporter, WitnessGeneratorQueueReporter, @@ -85,21 +88,42 @@ async fn main() -> anyhow::Result<()> { let mut tasks = vec![tokio::spawn(exporter_config.run(stop_receiver.clone()))]; tasks.extend(get_tasks( - connection_pool, - prover_job_monitor_config, + connection_pool.clone(), + prover_job_monitor_config.clone(), proof_compressor_config, prover_config, witness_generator_config, prover_group_config, - stop_receiver, + stop_receiver.clone(), )?); let mut tasks = ManagedTasks::new(tasks); + let bind_address = SocketAddr::from(([0, 0, 0, 0], prover_job_monitor_config.http_port)); + + tracing::info!("Starting PJM server on {bind_address}"); + + let listener = tokio::net::TcpListener::bind(bind_address) + .await + .with_context(|| format!("Failed binding PJM server to {bind_address}"))?; + + let mut receiver = stop_receiver.clone(); + let app = axum::serve(listener, get_queue_reporter_router(connection_pool)) + .with_graceful_shutdown(async move { + if receiver.changed().await.is_err() { + tracing::warn!( + "Stop signal sender for PJM server was dropped without sending a signal" + ); + } + tracing::info!("Stop signal received, PJM server is shutting down"); + }) + .into_future(); + tokio::select! { _ = tasks.wait_single() => {}, _ = stop_signal_receiver => { tracing::info!("Stop signal received, shutting down"); } + _ = app => {} } stop_sender.send(true).ok(); tasks.complete(graceful_shutdown_timeout).await; diff --git a/prover/crates/bin/prover_job_monitor/src/queue_reporter/witness_generator_queue_reporter.rs b/prover/crates/bin/prover_job_monitor/src/queue_reporter/witness_generator_queue_reporter.rs index 5f507a753649..914f2e9ca856 100644 --- a/prover/crates/bin/prover_job_monitor/src/queue_reporter/witness_generator_queue_reporter.rs +++ b/prover/crates/bin/prover_job_monitor/src/queue_reporter/witness_generator_queue_reporter.rs @@ -58,7 +58,7 @@ impl Task for WitnessGeneratorQueueReporter { .fri_witness_generator_dal() .get_witness_jobs_stats(round) .await; - for ((round, semantic_protocol_version), job_stats) in stats { + for (semantic_protocol_version, job_stats) in stats { Self::emit_metrics_for_round(round, semantic_protocol_version, &job_stats); } } diff --git a/prover/crates/lib/prover_dal/.sqlx/query-97adb49780c9edde6a3cfda09dadbd694e1781e013247d090a280a1f894de464.json b/prover/crates/lib/prover_dal/.sqlx/query-97adb49780c9edde6a3cfda09dadbd694e1781e013247d090a280a1f894de464.json new file mode 100644 index 000000000000..ce9e492a7d4a --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-97adb49780c9edde6a3cfda09dadbd694e1781e013247d090a280a1f894de464.json @@ -0,0 +1,38 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n protocol_version AS \"protocol_version!\",\n protocol_version_patch AS \"protocol_version_patch!\",\n COUNT(*) FILTER (WHERE status = 'queued') as queued,\n COUNT(*) FILTER (WHERE status = 'in_progress') as in_progress\n FROM\n prover_jobs_fri\n WHERE\n status IN ('queued', 'in_progress')\n AND protocol_version IS NOT NULL\n GROUP BY\n protocol_version,\n protocol_version_patch\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "protocol_version!", + "type_info": "Int4" + }, + { + "ordinal": 1, + "name": "protocol_version_patch!", + "type_info": "Int4" + }, + { + "ordinal": 2, + "name": "queued", + "type_info": "Int8" + }, + { + "ordinal": 3, + "name": "in_progress", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + true, + false, + null, + null + ] + }, + "hash": "97adb49780c9edde6a3cfda09dadbd694e1781e013247d090a280a1f894de464" +} diff --git a/prover/crates/lib/prover_dal/src/fri_prover_dal.rs b/prover/crates/lib/prover_dal/src/fri_prover_dal.rs index 4e68154290da..1b6c43f4c177 100644 --- a/prover/crates/lib/prover_dal/src/fri_prover_dal.rs +++ b/prover/crates/lib/prover_dal/src/fri_prover_dal.rs @@ -6,8 +6,10 @@ use zksync_basic_types::{ AggregationRound, CircuitIdRoundTuple, CircuitProverStatsEntry, ProtocolVersionedCircuitProverStats, }, - protocol_version::{ProtocolSemanticVersion, ProtocolVersionId}, - prover_dal::{FriProverJobMetadata, ProverJobFriInfo, ProverJobStatus, StuckJobs}, + protocol_version::{ProtocolSemanticVersion, ProtocolVersionId, VersionPatch}, + prover_dal::{ + FriProverJobMetadata, JobCountStatistics, ProverJobFriInfo, ProverJobStatus, StuckJobs, + }, L1BatchNumber, }; use zksync_db_connection::{ @@ -445,6 +447,47 @@ impl FriProverDal<'_, '_> { } } + pub async fn get_generic_prover_jobs_stats( + &mut self, + ) -> HashMap { + { + sqlx::query!( + r#" + SELECT + protocol_version AS "protocol_version!", + protocol_version_patch AS "protocol_version_patch!", + COUNT(*) FILTER (WHERE status = 'queued') as queued, + COUNT(*) FILTER (WHERE status = 'in_progress') as in_progress + FROM + prover_jobs_fri + WHERE + status IN ('queued', 'in_progress') + AND protocol_version IS NOT NULL + GROUP BY + protocol_version, + protocol_version_patch + "# + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| { + let protocol_semantic_version = ProtocolSemanticVersion::new( + ProtocolVersionId::try_from(row.protocol_version as u16).unwrap(), + VersionPatch(row.protocol_version_patch as u32), + ); + let key = protocol_semantic_version; + let value = JobCountStatistics { + queued: row.queued.unwrap() as usize, + in_progress: row.in_progress.unwrap() as usize, + }; + (key, value) + }) + .collect() + } + } + pub async fn min_unproved_l1_batch_number(&mut self) -> HashMap<(u8, u8), L1BatchNumber> { { sqlx::query!( diff --git a/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs b/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs index 9958527a98b0..791038be0bb8 100644 --- a/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs +++ b/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs @@ -1378,7 +1378,7 @@ impl FriWitnessGeneratorDal<'_, '_> { pub async fn get_witness_jobs_stats( &mut self, aggregation_round: AggregationRound, - ) -> HashMap<(AggregationRound, ProtocolSemanticVersion), JobCountStatistics> { + ) -> HashMap { let table_name = Self::input_table_name_for(aggregation_round); let sql = format!( r#" @@ -1407,7 +1407,7 @@ impl FriWitnessGeneratorDal<'_, '_> { .unwrap(), VersionPatch(row.get::("protocol_version_patch") as u32), ); - let key = (aggregation_round, protocol_semantic_version); + let key = protocol_semantic_version; let value = JobCountStatistics { queued: row.get::("queued") as usize, in_progress: row.get::("in_progress") as usize, From 38fc824f75e8b0e84f10348d1502fc8a26d12015 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Tomasz=20Grze=C5=9Bkiewicz?= Date: Thu, 19 Sep 2024 18:47:00 +0200 Subject: [PATCH 111/116] fix(eth-sender): print better error message in case of missing blob prices (#2927) Signed-off-by: tomg10 Co-authored-by: perekopskiy --- core/node/eth_sender/src/eth_fees_oracle.rs | 14 +++++++++++++- core/node/eth_sender/src/tester.rs | 14 +++++++------- .../src/l1_gas_price/gas_adjuster/tests.rs | 2 +- zk_toolbox/Cargo.lock | 2 ++ 4 files changed, 23 insertions(+), 9 deletions(-) diff --git a/core/node/eth_sender/src/eth_fees_oracle.rs b/core/node/eth_sender/src/eth_fees_oracle.rs index 271a33d49c32..0c708fee3698 100644 --- a/core/node/eth_sender/src/eth_fees_oracle.rs +++ b/core/node/eth_sender/src/eth_fees_oracle.rs @@ -35,13 +35,24 @@ pub(crate) struct GasAdjusterFeesOracle { } impl GasAdjusterFeesOracle { + fn assert_fee_is_not_zero(&self, value: u64, fee_type: &'static str) { + if value == 0 { + panic!( + "L1 RPC incorrectly reported {fee_type} prices, either it doesn't return them at \ + all or returns 0's, eth-sender cannot continue without proper {fee_type} prices!" + ); + } + } fn calculate_fees_with_blob_sidecar( &self, previous_sent_tx: &Option, ) -> Result { let base_fee_per_gas = self.gas_adjuster.get_blob_tx_base_fee(); + self.assert_fee_is_not_zero(base_fee_per_gas, "base"); let priority_fee_per_gas = self.gas_adjuster.get_blob_tx_priority_fee(); - let blob_base_fee_per_gas = Some(self.gas_adjuster.get_blob_tx_blob_base_fee()); + let blob_base_fee_per_gas = self.gas_adjuster.get_blob_tx_blob_base_fee(); + self.assert_fee_is_not_zero(blob_base_fee_per_gas, "blob"); + let blob_base_fee_per_gas = Some(blob_base_fee_per_gas); if let Some(previous_sent_tx) = previous_sent_tx { // for blob transactions on re-sending need to double all gas prices @@ -72,6 +83,7 @@ impl GasAdjusterFeesOracle { time_in_mempool: u32, ) -> Result { let mut base_fee_per_gas = self.gas_adjuster.get_base_fee(time_in_mempool); + self.assert_fee_is_not_zero(base_fee_per_gas, "base"); if let Some(previous_sent_tx) = previous_sent_tx { self.verify_base_fee_not_too_low_on_resend( previous_sent_tx.id, diff --git a/core/node/eth_sender/src/tester.rs b/core/node/eth_sender/src/tester.rs index c6d993a9c97f..9be1384daae2 100644 --- a/core/node/eth_sender/src/tester.rs +++ b/core/node/eth_sender/src/tester.rs @@ -153,7 +153,7 @@ impl EthSenderTester { .into_iter() .map(|base_fee_per_gas| BaseFees { base_fee_per_gas, - base_fee_per_blob_gas: 0.into(), + base_fee_per_blob_gas: 1.into(), l2_pubdata_price: 0.into(), }) .collect(); @@ -161,8 +161,8 @@ impl EthSenderTester { let gateway = MockSettlementLayer::builder() .with_fee_history( std::iter::repeat_with(|| BaseFees { - base_fee_per_gas: 0, - base_fee_per_blob_gas: 0.into(), + base_fee_per_gas: 1, + base_fee_per_blob_gas: 1.into(), l2_pubdata_price: 0.into(), }) .take(Self::WAIT_CONFIRMATIONS as usize) @@ -181,8 +181,8 @@ impl EthSenderTester { let l2_gateway: MockSettlementLayer = MockSettlementLayer::builder() .with_fee_history( std::iter::repeat_with(|| BaseFees { - base_fee_per_gas: 0, - base_fee_per_blob_gas: 0.into(), + base_fee_per_gas: 1, + base_fee_per_blob_gas: 1.into(), l2_pubdata_price: 0.into(), }) .take(Self::WAIT_CONFIRMATIONS as usize) @@ -201,8 +201,8 @@ impl EthSenderTester { let gateway_blobs = MockSettlementLayer::builder() .with_fee_history( std::iter::repeat_with(|| BaseFees { - base_fee_per_gas: 0, - base_fee_per_blob_gas: 0.into(), + base_fee_per_gas: 1, + base_fee_per_blob_gas: 1.into(), l2_pubdata_price: 0.into(), }) .take(Self::WAIT_CONFIRMATIONS as usize) diff --git a/core/node/fee_model/src/l1_gas_price/gas_adjuster/tests.rs b/core/node/fee_model/src/l1_gas_price/gas_adjuster/tests.rs index 2643e4b3c424..47023203de0e 100644 --- a/core/node/fee_model/src/l1_gas_price/gas_adjuster/tests.rs +++ b/core/node/fee_model/src/l1_gas_price/gas_adjuster/tests.rs @@ -152,7 +152,7 @@ async fn kept_updated_l2(commitment_mode: L1BatchCommitmentMode) { .zip(TEST_PUBDATA_PRICES) .map(|(block, pubdata)| BaseFees { base_fee_per_gas: block, - base_fee_per_blob_gas: 0.into(), + base_fee_per_blob_gas: 1.into(), l2_pubdata_price: pubdata.into(), }) .collect(); diff --git a/zk_toolbox/Cargo.lock b/zk_toolbox/Cargo.lock index fd524865d567..296037094529 100644 --- a/zk_toolbox/Cargo.lock +++ b/zk_toolbox/Cargo.lock @@ -4607,6 +4607,7 @@ version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "678b5a069e50bf00ecd22d0cd8ddf7c236f68581b03db652061ed5eb13a312ff" dependencies = [ + "hex", "serde", "serde_with_macros", ] @@ -6710,6 +6711,7 @@ dependencies = [ "secp256k1", "serde", "serde_json", + "serde_with", "strum", "thiserror", "tracing", From e9d1d905f1ce86f9de2cf39d79be4b5aada4a81d Mon Sep 17 00:00:00 2001 From: Grzegorz Prusak Date: Fri, 20 Sep 2024 03:28:24 +0200 Subject: [PATCH 112/116] feat: added seed_peers to consensus global config (#2920) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It will allow us to announce the recommended default list of peers to all ENs without manual intervention. Fixes BFT-509. --------- Co-authored-by: Bruno França --- core/lib/config/src/configs/consensus.rs | 2 + core/lib/config/src/testonly.rs | 8 +++- core/lib/dal/Cargo.toml | 1 + core/lib/dal/src/consensus/mod.rs | 34 +++++++++++++- core/lib/dal/src/consensus/proto/mod.proto | 7 +++ core/lib/dal/src/consensus_dal.rs | 4 ++ core/lib/protobuf_config/src/consensus.rs | 44 ++++++++++++++----- .../src/proto/core/consensus.proto | 5 ++- core/node/consensus/src/config.rs | 28 +++++++++++- core/node/consensus/src/en.rs | 3 +- core/node/consensus/src/mn.rs | 4 +- core/node/consensus/src/storage/connection.rs | 1 + core/node/consensus/src/testonly.rs | 18 +++++--- core/node/consensus/src/tests/attestation.rs | 1 + core/node/consensus/src/tests/mod.rs | 1 + prover/Cargo.lock | 1 + .../zk_inception/src/utils/consensus.rs | 1 + 17 files changed, 136 insertions(+), 27 deletions(-) diff --git a/core/lib/config/src/configs/consensus.rs b/core/lib/config/src/configs/consensus.rs index 759e13128338..2e277341b07d 100644 --- a/core/lib/config/src/configs/consensus.rs +++ b/core/lib/config/src/configs/consensus.rs @@ -92,6 +92,8 @@ pub struct GenesisSpec { pub leader: ValidatorPublicKey, /// Address of the registry contract. pub registry_address: Option, + /// Recommended list of peers to connect to. + pub seed_peers: BTreeMap, } #[derive(Clone, Debug, PartialEq, Default)] diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index 21141ddefff6..5a5a54304425 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -774,7 +774,9 @@ impl Distribution for EncodeDist { impl Distribution for EncodeDist { fn sample(&self, rng: &mut R) -> configs::consensus::GenesisSpec { - use configs::consensus::{GenesisSpec, ProtocolVersion, ValidatorPublicKey}; + use configs::consensus::{ + GenesisSpec, Host, NodePublicKey, ProtocolVersion, ValidatorPublicKey, + }; GenesisSpec { chain_id: L2ChainId::default(), protocol_version: ProtocolVersion(self.sample(rng)), @@ -782,6 +784,10 @@ impl Distribution for EncodeDist { attesters: self.sample_collect(rng), leader: ValidatorPublicKey(self.sample(rng)), registry_address: self.sample_opt(|| rng.gen()), + seed_peers: self + .sample_range(rng) + .map(|_| (NodePublicKey(self.sample(rng)), Host(self.sample(rng)))) + .collect(), } } } diff --git a/core/lib/dal/Cargo.toml b/core/lib/dal/Cargo.toml index 9c13eeb30147..ccca49525e40 100644 --- a/core/lib/dal/Cargo.toml +++ b/core/lib/dal/Cargo.toml @@ -19,6 +19,7 @@ zksync_utils.workspace = true zksync_system_constants.workspace = true zksync_contracts.workspace = true zksync_types.workspace = true +zksync_concurrency.workspace = true zksync_consensus_roles.workspace = true zksync_consensus_storage.workspace = true zksync_protobuf.workspace = true diff --git a/core/lib/dal/src/consensus/mod.rs b/core/lib/dal/src/consensus/mod.rs index f54938e8ec1a..f01655d56a95 100644 --- a/core/lib/dal/src/consensus/mod.rs +++ b/core/lib/dal/src/consensus/mod.rs @@ -5,8 +5,11 @@ mod testonly; #[cfg(test)] mod tests; +use std::collections::BTreeMap; + use anyhow::{anyhow, Context as _}; -use zksync_consensus_roles::{attester, validator}; +use zksync_concurrency::net; +use zksync_consensus_roles::{attester, node, validator}; use zksync_protobuf::{read_required, required, ProtoFmt, ProtoRepr}; use zksync_types::{ abi, ethabi, @@ -27,6 +30,23 @@ use crate::models::{parse_h160, parse_h256}; pub struct GlobalConfig { pub genesis: validator::Genesis, pub registry_address: Option, + pub seed_peers: BTreeMap, +} + +impl ProtoRepr for proto::NodeAddr { + type Type = (node::PublicKey, net::Host); + fn read(&self) -> anyhow::Result { + Ok(( + read_required(&self.key).context("key")?, + net::Host(required(&self.addr).context("addr")?.clone()), + )) + } + fn build(this: &Self::Type) -> Self { + Self { + key: Some(this.0.build()), + addr: Some(this.1 .0.clone()), + } + } } impl ProtoFmt for GlobalConfig { @@ -41,6 +61,13 @@ impl ProtoFmt for GlobalConfig { .map(|a| parse_h160(a)) .transpose() .context("registry_address")?, + seed_peers: r + .seed_peers + .iter() + .enumerate() + .map(|(i, e)| e.read().context(i)) + .collect::>() + .context("seed_peers")?, }) } @@ -48,6 +75,11 @@ impl ProtoFmt for GlobalConfig { Self::Proto { genesis: Some(self.genesis.build()), registry_address: self.registry_address.map(|a| a.as_bytes().to_vec()), + seed_peers: self + .seed_peers + .iter() + .map(|(k, v)| ProtoRepr::build(&(k.clone(), v.clone()))) + .collect(), } } } diff --git a/core/lib/dal/src/consensus/proto/mod.proto b/core/lib/dal/src/consensus/proto/mod.proto index 3ea49e9c0cd6..ab1245f3ef6a 100644 --- a/core/lib/dal/src/consensus/proto/mod.proto +++ b/core/lib/dal/src/consensus/proto/mod.proto @@ -4,6 +4,7 @@ package zksync.dal; import "zksync/roles/validator.proto"; import "zksync/roles/attester.proto"; +import "zksync/roles/node.proto"; message Payload { // zksync-era ProtocolVersionId @@ -122,9 +123,15 @@ message AttesterCommittee { repeated roles.attester.WeightedAttester members = 1; // required } +message NodeAddr { + optional roles.node.PublicKey key = 1; // required + optional string addr = 2; // required; Host +} + message GlobalConfig { optional roles.validator.Genesis genesis = 1; // required optional bytes registry_address = 2; // optional; H160 + repeated NodeAddr seed_peers = 3; } message AttestationStatus { diff --git a/core/lib/dal/src/consensus_dal.rs b/core/lib/dal/src/consensus_dal.rs index 2dca58e2a6a6..711ce3ddf392 100644 --- a/core/lib/dal/src/consensus_dal.rs +++ b/core/lib/dal/src/consensus_dal.rs @@ -66,6 +66,7 @@ impl ConsensusDal<'_, '_> { return Ok(Some(GlobalConfig { genesis, registry_address: None, + seed_peers: [].into(), })); } Ok(None) @@ -184,6 +185,7 @@ impl ConsensusDal<'_, '_> { } .with_hash(), registry_address: old.registry_address, + seed_peers: old.seed_peers, }; txn.consensus_dal().try_update_global_config(&new).await?; txn.commit().await?; @@ -681,6 +683,7 @@ mod tests { let cfg = GlobalConfig { genesis: genesis.with_hash(), registry_address: Some(rng.gen()), + seed_peers: [].into(), // TODO: rng.gen() for Host }; conn.consensus_dal() .try_update_global_config(&cfg) @@ -715,6 +718,7 @@ mod tests { let cfg = GlobalConfig { genesis: setup.genesis.clone(), registry_address: Some(rng.gen()), + seed_peers: [].into(), }; conn.consensus_dal() .try_update_global_config(&cfg) diff --git a/core/lib/protobuf_config/src/consensus.rs b/core/lib/protobuf_config/src/consensus.rs index f5eb5c5b2f10..a5b552dffc4a 100644 --- a/core/lib/protobuf_config/src/consensus.rs +++ b/core/lib/protobuf_config/src/consensus.rs @@ -71,6 +71,13 @@ impl ProtoRepr for proto::GenesisSpec { .map(|x| parse_h160(x)) .transpose() .context("registry_address")?, + seed_peers: self + .seed_peers + .iter() + .enumerate() + .map(|(i, e)| e.read().context(i)) + .collect::>() + .context("seed_peers")?, }) } fn build(this: &Self::Type) -> Self { @@ -81,6 +88,11 @@ impl ProtoRepr for proto::GenesisSpec { attesters: this.attesters.iter().map(ProtoRepr::build).collect(), leader: Some(this.leader.0.clone()), registry_address: this.registry_address.map(|a| format!("{:?}", a)), + seed_peers: this + .seed_peers + .iter() + .map(|(k, v)| proto::NodeAddr::build(&(k.clone(), v.clone()))) + .collect(), } } } @@ -99,15 +111,25 @@ impl ProtoRepr for proto::RpcConfig { } } +impl ProtoRepr for proto::NodeAddr { + type Type = (NodePublicKey, Host); + fn read(&self) -> anyhow::Result { + Ok(( + NodePublicKey(required(&self.key).context("key")?.clone()), + Host(required(&self.addr).context("addr")?.clone()), + )) + } + fn build(this: &Self::Type) -> Self { + Self { + key: Some(this.0 .0.clone()), + addr: Some(this.1 .0.clone()), + } + } +} + impl ProtoRepr for proto::Config { type Type = ConsensusConfig; fn read(&self) -> anyhow::Result { - let read_addr = |e: &proto::NodeAddr| { - let key = NodePublicKey(required(&e.key).context("key")?.clone()); - let addr = Host(required(&e.addr).context("addr")?.clone()); - anyhow::Ok((key, addr)) - }; - let max_payload_size = required(&self.max_payload_size) .and_then(|x| Ok((*x).try_into()?)) .context("max_payload_size")?; @@ -144,8 +166,9 @@ impl ProtoRepr for proto::Config { .gossip_static_outbound .iter() .enumerate() - .map(|(i, e)| read_addr(e).context(i)) - .collect::>()?, + .map(|(i, e)| e.read().context(i)) + .collect::>() + .context("gossip_static_outbound")?, genesis_spec: read_optional_repr(&self.genesis_spec), rpc: read_optional_repr(&self.rpc_config), }) @@ -168,10 +191,7 @@ impl ProtoRepr for proto::Config { gossip_static_outbound: this .gossip_static_outbound .iter() - .map(|x| proto::NodeAddr { - key: Some(x.0 .0.clone()), - addr: Some(x.1 .0.clone()), - }) + .map(|(k, v)| proto::NodeAddr::build(&(k.clone(), v.clone()))) .collect(), genesis_spec: this.genesis_spec.as_ref().map(ProtoRepr::build), rpc_config: this.rpc.as_ref().map(ProtoRepr::build), diff --git a/core/lib/protobuf_config/src/proto/core/consensus.proto b/core/lib/protobuf_config/src/proto/core/consensus.proto index 835ead1ab65c..6cabc45fc12a 100644 --- a/core/lib/protobuf_config/src/proto/core/consensus.proto +++ b/core/lib/protobuf_config/src/proto/core/consensus.proto @@ -31,10 +31,10 @@ package zksync.core.consensus; import "zksync/std.proto"; -// (public key, ip address) of a gossip network node. +// (public key, host address) of a gossip network node. message NodeAddr { optional string key = 1; // required; NodePublicKey - optional string addr = 2; // required; IpAddr + optional string addr = 2; // required; Host } // Weighted member of a validator committee. @@ -58,6 +58,7 @@ message GenesisSpec { repeated WeightedAttester attesters = 5; // can be empty; attester committee. // Currently not in consensus genesis, but still a part of the global configuration. optional string registry_address = 6; // optional; H160 + repeated NodeAddr seed_peers = 7; } // Per peer connection RPC rate limits. diff --git a/core/node/consensus/src/config.rs b/core/node/consensus/src/config.rs index 22f8fc01192f..cada58b0756f 100644 --- a/core/node/consensus/src/config.rs +++ b/core/node/consensus/src/config.rs @@ -1,5 +1,5 @@ //! Configuration utilities for the consensus component. -use std::collections::HashMap; +use std::collections::{BTreeMap, HashMap}; use anyhow::Context as _; use secrecy::{ExposeSecret as _, Secret}; @@ -44,6 +44,7 @@ pub(super) struct GenesisSpec { pub(super) attesters: Option, pub(super) leader_selection: validator::LeaderSelectionMode, pub(super) registry_address: Option, + pub(super) seed_peers: BTreeMap, } impl GenesisSpec { @@ -55,6 +56,7 @@ impl GenesisSpec { attesters: cfg.genesis.attesters.clone(), leader_selection: cfg.genesis.leader_selection.clone(), registry_address: cfg.registry_address, + seed_peers: cfg.seed_peers.clone(), } } @@ -98,6 +100,19 @@ impl GenesisSpec { Some(attester::Committee::new(attesters).context("attesters")?) }, registry_address: x.registry_address, + seed_peers: x + .seed_peers + .iter() + .map(|(key, addr)| { + anyhow::Ok(( + Text::new(&key.0) + .decode::() + .context("key")?, + net::Host(addr.0.clone()), + )) + }) + .collect::>() + .context("seed_peers")?, }) } } @@ -109,9 +124,18 @@ pub(super) fn node_key(secrets: &ConsensusSecrets) -> anyhow::Result, ) -> anyhow::Result { - let mut gossip_static_outbound = HashMap::new(); + // Always connect to seed peers. + // Once we implement dynamic peer discovery, + // we won't establish a persistent connection to seed peers + // but rather just ask them for more peers. + let mut gossip_static_outbound: HashMap<_, _> = global_config + .seed_peers + .iter() + .map(|(k, v)| (k.clone(), v.clone())) + .collect(); { let mut append = |key: &NodePublicKey, addr: &Host| { gossip_static_outbound.insert( diff --git a/core/node/consensus/src/en.rs b/core/node/consensus/src/en.rs index a52393c0f488..0b78662f8c25 100644 --- a/core/node/consensus/src/en.rs +++ b/core/node/consensus/src/en.rs @@ -125,7 +125,7 @@ impl EN { )); let executor = executor::Executor { - config: config::executor(&cfg, &secrets, build_version)?, + config: config::executor(&cfg, &secrets, &global_config, build_version)?, block_store, batch_store, validator: config::validator_key(&secrets) @@ -304,6 +304,7 @@ impl EN { Ok(consensus_dal::GlobalConfig { genesis: zksync_protobuf::serde::deserialize(&genesis.0).context("deserialize()")?, registry_address: None, + seed_peers: [].into(), }) } diff --git a/core/node/consensus/src/mn.rs b/core/node/consensus/src/mn.rs index 4d428346ebe4..f80bfe58954c 100644 --- a/core/node/consensus/src/mn.rs +++ b/core/node/consensus/src/mn.rs @@ -76,12 +76,12 @@ pub async fn run_main_node( s.spawn_bg(run_attestation_controller( ctx, &pool, - global_config, + global_config.clone(), attestation.clone(), )); let executor = executor::Executor { - config: config::executor(&cfg, &secrets, None)?, + config: config::executor(&cfg, &secrets, &global_config, None)?, block_store, batch_store, validator: Some(executor::Validator { diff --git a/core/node/consensus/src/storage/connection.rs b/core/node/consensus/src/storage/connection.rs index 512b37e81a11..2c297eed7275 100644 --- a/core/node/consensus/src/storage/connection.rs +++ b/core/node/consensus/src/storage/connection.rs @@ -317,6 +317,7 @@ impl<'a> Connection<'a> { } .with_hash(), registry_address: spec.registry_address, + seed_peers: spec.seed_peers.clone(), }; txn.try_update_global_config(ctx, &new) diff --git a/core/node/consensus/src/testonly.rs b/core/node/consensus/src/testonly.rs index 241998f26928..bcd22186a4bc 100644 --- a/core/node/consensus/src/testonly.rs +++ b/core/node/consensus/src/testonly.rs @@ -91,11 +91,8 @@ impl ConfigSet { } } -pub(super) fn new_configs( - rng: &mut impl Rng, - setup: &Setup, - gossip_peers: usize, -) -> Vec { +pub(super) fn new_configs(rng: &mut impl Rng, setup: &Setup, seed_peers: usize) -> Vec { + let net_cfgs = network::testonly::new_configs(rng, setup, 0); let genesis_spec = config::GenesisSpec { chain_id: setup.genesis.chain_id.0.try_into().unwrap(), protocol_version: config::ProtocolVersion(setup.genesis.protocol_version.0), @@ -117,8 +114,17 @@ pub(super) fn new_configs( .collect(), leader: config::ValidatorPublicKey(setup.validator_keys[0].public().encode()), registry_address: None, + seed_peers: net_cfgs[..seed_peers] + .iter() + .map(|c| { + ( + config::NodePublicKey(c.gossip.key.public().encode()), + config::Host(c.public_addr.0.clone()), + ) + }) + .collect(), }; - network::testonly::new_configs(rng, setup, gossip_peers) + net_cfgs .into_iter() .enumerate() .map(|(i, net)| ConfigSet { diff --git a/core/node/consensus/src/tests/attestation.rs b/core/node/consensus/src/tests/attestation.rs index abd35508c7f7..e783dbecdc35 100644 --- a/core/node/consensus/src/tests/attestation.rs +++ b/core/node/consensus/src/tests/attestation.rs @@ -47,6 +47,7 @@ async fn test_attestation_status_api(version: ProtocolVersionId) { &consensus_dal::GlobalConfig { genesis: setup.genesis.clone(), registry_address: None, + seed_peers: [].into(), }, ) .await diff --git a/core/node/consensus/src/tests/mod.rs b/core/node/consensus/src/tests/mod.rs index 91f01f865a2b..aabfff462a81 100644 --- a/core/node/consensus/src/tests/mod.rs +++ b/core/node/consensus/src/tests/mod.rs @@ -49,6 +49,7 @@ async fn test_validator_block_store(version: ProtocolVersionId) { &consensus_dal::GlobalConfig { genesis: setup.genesis.clone(), registry_address: None, + seed_peers: [].into(), }, ) .await diff --git a/prover/Cargo.lock b/prover/Cargo.lock index 5624403d7853..b943de65ce5f 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -7551,6 +7551,7 @@ dependencies = [ "tokio", "tracing", "vise", + "zksync_concurrency", "zksync_consensus_roles", "zksync_consensus_storage", "zksync_contracts", diff --git a/zk_toolbox/crates/zk_inception/src/utils/consensus.rs b/zk_toolbox/crates/zk_inception/src/utils/consensus.rs index 06848334a6e1..0b24bbe5cdc6 100644 --- a/zk_toolbox/crates/zk_inception/src/utils/consensus.rs +++ b/zk_toolbox/crates/zk_inception/src/utils/consensus.rs @@ -95,6 +95,7 @@ pub fn get_genesis_specs( attesters: vec![attester], leader, registry_address: None, + seed_peers: [].into(), } } From 7aa5721d22e253d05d369a60d5bcacbf52021c48 Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Fri, 20 Sep 2024 12:19:19 +0300 Subject: [PATCH 113/116] feat(vm): Do not panic on VM divergence (#2705) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ - Allows to continue batch execution on divergence via new `ShadowLenient` VM mode. - Dumps VM state to logs and optionally a file on divergence. ## Why ❔ Allows to detect divergencies in multiple batches w/o blockers. The dumped VM state will hopefully allow investigating divergencies locally, although this logic isn't implemented yet. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- Cargo.lock | 2 + core/lib/basic_types/src/vm.rs | 3 +- core/lib/multivm/Cargo.toml | 2 +- core/lib/multivm/src/lib.rs | 2 +- core/lib/multivm/src/versions/mod.rs | 5 +- core/lib/multivm/src/versions/shadow.rs | 305 ----------- core/lib/multivm/src/versions/testonly.rs | 93 ++++ core/lib/multivm/src/versions/tests.rs | 276 ++++++++++ .../src/versions/vm_fast/tests/block_tip.rs | 3 +- .../src/versions/vm_fast/tests/code_oracle.rs | 10 +- .../vm_fast/tests/get_used_contracts.rs | 6 +- .../src/versions/vm_fast/tests/l2_blocks.rs | 6 +- .../versions/vm_fast/tests/nonce_holder.rs | 4 +- .../src/versions/vm_fast/tests/precompiles.rs | 5 +- .../src/versions/vm_fast/tests/refunds.rs | 4 +- .../versions/vm_fast/tests/require_eip712.rs | 6 +- .../src/versions/vm_fast/tests/storage.rs | 3 +- .../src/versions/vm_fast/tests/tester/mod.rs | 2 +- .../vm_fast/tests/tester/vm_tester.rs | 101 +--- .../vm_fast/tests/tracing_execution_error.rs | 6 +- .../src/versions/vm_fast/tests/transfer.rs | 25 +- core/lib/multivm/src/versions/vm_fast/vm.rs | 152 +++--- core/lib/multivm/src/versions/vm_latest/vm.rs | 14 +- core/lib/multivm/src/vm_instance.rs | 19 +- core/lib/object_store/src/file.rs | 1 + core/lib/object_store/src/objects.rs | 1 - core/lib/object_store/src/raw.rs | 2 + core/lib/vm_executor/src/batch/factory.rs | 16 + core/lib/vm_interface/Cargo.toml | 1 + core/lib/vm_interface/src/lib.rs | 3 +- core/lib/vm_interface/src/storage/snapshot.rs | 32 +- core/lib/vm_interface/src/utils/dump.rs | 249 +++++++++ core/lib/vm_interface/src/utils/mod.rs | 9 + core/lib/vm_interface/src/utils/shadow.rs | 475 ++++++++++++++++++ core/lib/vm_interface/src/vm.rs | 8 +- .../layers/vm_runner/playground.rs | 4 + core/node/vm_runner/Cargo.toml | 1 + core/node/vm_runner/src/impls/playground.rs | 45 +- core/node/vm_runner/src/tests/playground.rs | 2 + core/tests/vm-benchmark/src/vm.rs | 2 +- docs/guides/external-node/00_quick_start.md | 4 +- prover/Cargo.lock | 2 +- 42 files changed, 1398 insertions(+), 513 deletions(-) delete mode 100644 core/lib/multivm/src/versions/shadow.rs create mode 100644 core/lib/multivm/src/versions/testonly.rs create mode 100644 core/lib/multivm/src/versions/tests.rs create mode 100644 core/lib/vm_interface/src/utils/dump.rs create mode 100644 core/lib/vm_interface/src/utils/mod.rs create mode 100644 core/lib/vm_interface/src/utils/shadow.rs diff --git a/Cargo.lock b/Cargo.lock index 8d062ebb361e..8164d412af55 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -11008,6 +11008,7 @@ dependencies = [ "assert_matches", "async-trait", "hex", + "pretty_assertions", "serde", "serde_json", "thiserror", @@ -11030,6 +11031,7 @@ dependencies = [ "once_cell", "rand 0.8.5", "serde", + "serde_json", "tempfile", "test-casing", "tokio", diff --git a/core/lib/basic_types/src/vm.rs b/core/lib/basic_types/src/vm.rs index c178c853b2dc..c753bbfc8183 100644 --- a/core/lib/basic_types/src/vm.rs +++ b/core/lib/basic_types/src/vm.rs @@ -32,8 +32,9 @@ pub enum FastVmMode { /// Run only the old VM. #[default] Old, - /// Run only the new Vm. + /// Run only the new VM. New, /// Run both the new and old VM and compare their outputs for each transaction execution. + /// The VM will panic on divergence. Shadow, } diff --git a/core/lib/multivm/Cargo.toml b/core/lib/multivm/Cargo.toml index 5e76c10f53e7..2c2cd4f044b9 100644 --- a/core/lib/multivm/Cargo.toml +++ b/core/lib/multivm/Cargo.toml @@ -34,13 +34,13 @@ anyhow.workspace = true hex.workspace = true itertools.workspace = true once_cell.workspace = true -pretty_assertions.workspace = true thiserror.workspace = true tracing.workspace = true vise.workspace = true [dev-dependencies] assert_matches.workspace = true +pretty_assertions.workspace = true tokio = { workspace = true, features = ["time"] } zksync_test_account.workspace = true ethabi.workspace = true diff --git a/core/lib/multivm/src/lib.rs b/core/lib/multivm/src/lib.rs index 77851a1df002..be740d6b3780 100644 --- a/core/lib/multivm/src/lib.rs +++ b/core/lib/multivm/src/lib.rs @@ -22,5 +22,5 @@ pub use crate::{ mod glue; pub mod tracers; pub mod utils; -pub mod versions; +mod versions; mod vm_instance; diff --git a/core/lib/multivm/src/versions/mod.rs b/core/lib/multivm/src/versions/mod.rs index 81358a482f1a..bcb246cece46 100644 --- a/core/lib/multivm/src/versions/mod.rs +++ b/core/lib/multivm/src/versions/mod.rs @@ -1,5 +1,8 @@ -pub mod shadow; mod shared; +#[cfg(test)] +mod testonly; +#[cfg(test)] +mod tests; pub mod vm_1_3_2; pub mod vm_1_4_1; pub mod vm_1_4_2; diff --git a/core/lib/multivm/src/versions/shadow.rs b/core/lib/multivm/src/versions/shadow.rs deleted file mode 100644 index 871258f43b85..000000000000 --- a/core/lib/multivm/src/versions/shadow.rs +++ /dev/null @@ -1,305 +0,0 @@ -use std::{ - collections::{BTreeMap, HashSet}, - fmt, -}; - -use anyhow::Context as _; -use zksync_types::{StorageKey, StorageLog, StorageLogWithPreviousValue, Transaction}; - -use crate::{ - interface::{ - storage::{ImmutableStorageView, ReadStorage, StoragePtr, StorageView}, - BytecodeCompressionResult, CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, - SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, - VmInterfaceHistoryEnabled, VmMemoryMetrics, - }, - vm_fast, -}; - -#[derive(Debug)] -pub struct ShadowVm { - main: T, - shadow: vm_fast::Vm>, -} - -impl VmFactory> for ShadowVm -where - S: ReadStorage, - T: VmFactory>, -{ - fn new( - batch_env: L1BatchEnv, - system_env: SystemEnv, - storage: StoragePtr>, - ) -> Self { - Self { - main: T::new(batch_env.clone(), system_env.clone(), storage.clone()), - shadow: vm_fast::Vm::new(batch_env, system_env, ImmutableStorageView::new(storage)), - } - } -} - -impl VmInterface for ShadowVm -where - S: ReadStorage, - T: VmInterface, -{ - type TracerDispatcher = T::TracerDispatcher; - - fn push_transaction(&mut self, tx: Transaction) { - self.shadow.push_transaction(tx.clone()); - self.main.push_transaction(tx); - } - - fn inspect( - &mut self, - dispatcher: Self::TracerDispatcher, - execution_mode: VmExecutionMode, - ) -> VmExecutionResultAndLogs { - let shadow_result = self.shadow.inspect((), execution_mode); - let main_result = self.main.inspect(dispatcher, execution_mode); - let mut errors = DivergenceErrors::default(); - errors.check_results_match(&main_result, &shadow_result); - errors - .into_result() - .with_context(|| format!("executing VM with mode {execution_mode:?}")) - .unwrap(); - main_result - } - - fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { - self.shadow.start_new_l2_block(l2_block_env); - self.main.start_new_l2_block(l2_block_env); - } - - fn inspect_transaction_with_bytecode_compression( - &mut self, - tracer: Self::TracerDispatcher, - tx: Transaction, - with_compression: bool, - ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { - let tx_hash = tx.hash(); - let main_result = self.main.inspect_transaction_with_bytecode_compression( - tracer, - tx.clone(), - with_compression, - ); - let shadow_result = - self.shadow - .inspect_transaction_with_bytecode_compression((), tx, with_compression); - let mut errors = DivergenceErrors::default(); - errors.check_results_match(&main_result.1, &shadow_result.1); - errors - .into_result() - .with_context(|| { - format!("inspecting transaction {tx_hash:?}, with_compression={with_compression:?}") - }) - .unwrap(); - main_result - } - - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - self.main.record_vm_memory_metrics() - } - - fn finish_batch(&mut self) -> FinishedL1Batch { - let main_batch = self.main.finish_batch(); - let shadow_batch = self.shadow.finish_batch(); - - let mut errors = DivergenceErrors::default(); - errors.check_results_match( - &main_batch.block_tip_execution_result, - &shadow_batch.block_tip_execution_result, - ); - errors.check_final_states_match( - &main_batch.final_execution_state, - &shadow_batch.final_execution_state, - ); - errors.check_match( - "final_bootloader_memory", - &main_batch.final_bootloader_memory, - &shadow_batch.final_bootloader_memory, - ); - errors.check_match( - "pubdata_input", - &main_batch.pubdata_input, - &shadow_batch.pubdata_input, - ); - errors.check_match( - "state_diffs", - &main_batch.state_diffs, - &shadow_batch.state_diffs, - ); - errors.into_result().unwrap(); - main_batch - } -} - -#[must_use = "Should be converted to a `Result`"] -#[derive(Debug, Default)] -pub struct DivergenceErrors(Vec); - -impl DivergenceErrors { - fn check_results_match( - &mut self, - main_result: &VmExecutionResultAndLogs, - shadow_result: &VmExecutionResultAndLogs, - ) { - self.check_match("result", &main_result.result, &shadow_result.result); - self.check_match( - "logs.events", - &main_result.logs.events, - &shadow_result.logs.events, - ); - self.check_match( - "logs.system_l2_to_l1_logs", - &main_result.logs.system_l2_to_l1_logs, - &shadow_result.logs.system_l2_to_l1_logs, - ); - self.check_match( - "logs.user_l2_to_l1_logs", - &main_result.logs.user_l2_to_l1_logs, - &shadow_result.logs.user_l2_to_l1_logs, - ); - let main_logs = UniqueStorageLogs::new(&main_result.logs.storage_logs); - let shadow_logs = UniqueStorageLogs::new(&shadow_result.logs.storage_logs); - self.check_match("logs.storage_logs", &main_logs, &shadow_logs); - self.check_match("refunds", &main_result.refunds, &shadow_result.refunds); - self.check_match( - "statistics.circuit_statistic", - &main_result.statistics.circuit_statistic, - &shadow_result.statistics.circuit_statistic, - ); - self.check_match( - "gas_remaining", - &main_result.statistics.gas_remaining, - &shadow_result.statistics.gas_remaining, - ); - } - - fn check_match(&mut self, context: &str, main: &T, shadow: &T) { - if main != shadow { - let comparison = pretty_assertions::Comparison::new(main, shadow); - let err = anyhow::anyhow!("`{context}` mismatch: {comparison}"); - self.0.push(err); - } - } - - fn check_final_states_match( - &mut self, - main: &CurrentExecutionState, - shadow: &CurrentExecutionState, - ) { - self.check_match("final_state.events", &main.events, &shadow.events); - self.check_match( - "final_state.user_l2_to_l1_logs", - &main.user_l2_to_l1_logs, - &shadow.user_l2_to_l1_logs, - ); - self.check_match( - "final_state.system_logs", - &main.system_logs, - &shadow.system_logs, - ); - self.check_match( - "final_state.storage_refunds", - &main.storage_refunds, - &shadow.storage_refunds, - ); - self.check_match( - "final_state.pubdata_costs", - &main.pubdata_costs, - &shadow.pubdata_costs, - ); - self.check_match( - "final_state.used_contract_hashes", - &main.used_contract_hashes.iter().collect::>(), - &shadow.used_contract_hashes.iter().collect::>(), - ); - - let main_deduplicated_logs = Self::gather_logs(&main.deduplicated_storage_logs); - let shadow_deduplicated_logs = Self::gather_logs(&shadow.deduplicated_storage_logs); - self.check_match( - "deduplicated_storage_logs", - &main_deduplicated_logs, - &shadow_deduplicated_logs, - ); - } - - fn gather_logs(logs: &[StorageLog]) -> BTreeMap { - logs.iter() - .filter(|log| log.is_write()) - .map(|log| (log.key, log)) - .collect() - } - - fn into_result(self) -> anyhow::Result<()> { - if self.0.is_empty() { - Ok(()) - } else { - Err(anyhow::anyhow!( - "divergence between old VM and new VM execution: [{:?}]", - self.0 - )) - } - } -} - -// The new VM doesn't support read logs yet, doesn't order logs by access and deduplicates them -// inside the VM, hence this auxiliary struct. -#[derive(PartialEq)] -struct UniqueStorageLogs(BTreeMap); - -impl fmt::Debug for UniqueStorageLogs { - fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { - let mut map = formatter.debug_map(); - for log in self.0.values() { - map.entry( - &format!("{:?}:{:?}", log.log.key.address(), log.log.key.key()), - &format!("{:?} -> {:?}", log.previous_value, log.log.value), - ); - } - map.finish() - } -} - -impl UniqueStorageLogs { - fn new(logs: &[StorageLogWithPreviousValue]) -> Self { - let mut unique_logs = BTreeMap::::new(); - for log in logs { - if !log.log.is_write() { - continue; - } - if let Some(existing_log) = unique_logs.get_mut(&log.log.key) { - existing_log.log.value = log.log.value; - } else { - unique_logs.insert(log.log.key, *log); - } - } - - // Remove no-op write logs (i.e., X -> X writes) produced by the old VM. - unique_logs.retain(|_, log| log.previous_value != log.log.value); - Self(unique_logs) - } -} - -impl VmInterfaceHistoryEnabled for ShadowVm -where - S: ReadStorage, - T: VmInterfaceHistoryEnabled, -{ - fn make_snapshot(&mut self) { - self.shadow.make_snapshot(); - self.main.make_snapshot(); - } - - fn rollback_to_the_latest_snapshot(&mut self) { - self.shadow.rollback_to_the_latest_snapshot(); - self.main.rollback_to_the_latest_snapshot(); - } - - fn pop_snapshot_no_rollback(&mut self) { - self.shadow.pop_snapshot_no_rollback(); - self.main.pop_snapshot_no_rollback(); - } -} diff --git a/core/lib/multivm/src/versions/testonly.rs b/core/lib/multivm/src/versions/testonly.rs new file mode 100644 index 000000000000..51a4d0842d90 --- /dev/null +++ b/core/lib/multivm/src/versions/testonly.rs @@ -0,0 +1,93 @@ +use zksync_contracts::BaseSystemContracts; +use zksync_test_account::Account; +use zksync_types::{ + block::L2BlockHasher, fee_model::BatchFeeInput, get_code_key, get_is_account_key, + helpers::unix_timestamp_ms, utils::storage_key_for_eth_balance, Address, L1BatchNumber, + L2BlockNumber, L2ChainId, ProtocolVersionId, U256, +}; +use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; + +use crate::{ + interface::{storage::InMemoryStorage, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode}, + vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, +}; + +pub(super) fn default_system_env() -> SystemEnv { + SystemEnv { + zk_porter_available: false, + version: ProtocolVersionId::latest(), + base_system_smart_contracts: BaseSystemContracts::playground(), + bootloader_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, + execution_mode: TxExecutionMode::VerifyExecute, + default_validation_computational_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, + chain_id: L2ChainId::from(270), + } +} + +pub(super) fn default_l1_batch(number: L1BatchNumber) -> L1BatchEnv { + let timestamp = unix_timestamp_ms(); + L1BatchEnv { + previous_batch_hash: None, + number, + timestamp, + fee_input: BatchFeeInput::l1_pegged( + 50_000_000_000, // 50 gwei + 250_000_000, // 0.25 gwei + ), + fee_account: Address::random(), + enforced_base_fee: None, + first_l2_block: L2BlockEnv { + number: 1, + timestamp, + prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), + max_virtual_blocks_to_create: 100, + }, + } +} + +pub(super) fn make_account_rich(storage: &mut InMemoryStorage, account: &Account) { + let key = storage_key_for_eth_balance(&account.address); + storage.set_value(key, u256_to_h256(U256::from(10_u64.pow(19)))); +} + +#[derive(Debug, Clone)] +pub(super) struct ContractToDeploy { + bytecode: Vec, + address: Address, + is_account: bool, +} + +impl ContractToDeploy { + pub fn new(bytecode: Vec, address: Address) -> Self { + Self { + bytecode, + address, + is_account: false, + } + } + + pub fn account(bytecode: Vec, address: Address) -> Self { + Self { + bytecode, + address, + is_account: true, + } + } + + pub fn insert(&self, storage: &mut InMemoryStorage) { + let deployer_code_key = get_code_key(&self.address); + storage.set_value(deployer_code_key, hash_bytecode(&self.bytecode)); + if self.is_account { + let is_account_key = get_is_account_key(&self.address); + storage.set_value(is_account_key, u256_to_h256(1_u32.into())); + } + storage.store_factory_dep(hash_bytecode(&self.bytecode), self.bytecode.clone()); + } + + /// Inserts the contracts into the test environment, bypassing the deployer system contract. + pub fn insert_all(contracts: &[Self], storage: &mut InMemoryStorage) { + for contract in contracts { + contract.insert(storage); + } + } +} diff --git a/core/lib/multivm/src/versions/tests.rs b/core/lib/multivm/src/versions/tests.rs new file mode 100644 index 000000000000..96a85c86816e --- /dev/null +++ b/core/lib/multivm/src/versions/tests.rs @@ -0,0 +1,276 @@ +//! Shadow VM tests. Since there are no real VM implementations in the `vm_interface` crate where `ShadowVm` is defined, +//! these tests are placed here. + +use assert_matches::assert_matches; +use ethabi::Contract; +use zksync_contracts::{ + get_loadnext_contract, load_contract, read_bytecode, + test_contracts::LoadnextContractExecutionParams, +}; +use zksync_test_account::{Account, TxType}; +use zksync_types::{ + block::L2BlockHasher, fee::Fee, AccountTreeId, Address, Execute, L1BatchNumber, L2BlockNumber, + ProtocolVersionId, StorageKey, H256, U256, +}; +use zksync_utils::bytecode::hash_bytecode; + +use crate::{ + interface::{ + storage::{InMemoryStorage, ReadStorage, StorageView}, + utils::{ShadowVm, VmDump}, + ExecutionResult, L1BatchEnv, L2BlockEnv, VmFactory, VmInterface, VmInterfaceExt, + }, + utils::get_max_gas_per_pubdata_byte, + versions::testonly::{ + default_l1_batch, default_system_env, make_account_rich, ContractToDeploy, + }, + vm_fast, + vm_latest::{self, HistoryEnabled}, +}; + +type ReferenceVm = vm_latest::Vm, HistoryEnabled>; +type ShadowedVmFast = crate::vm_instance::ShadowedVmFast; + +fn hash_block(block_env: L2BlockEnv, tx_hashes: &[H256]) -> H256 { + let mut hasher = L2BlockHasher::new( + L2BlockNumber(block_env.number), + block_env.timestamp, + block_env.prev_block_hash, + ); + for &tx_hash in tx_hashes { + hasher.push_tx_hash(tx_hash); + } + hasher.finalize(ProtocolVersionId::latest()) +} + +fn tx_fee(gas_limit: u32) -> Fee { + Fee { + gas_limit: U256::from(gas_limit), + max_fee_per_gas: U256::from(250_000_000), + max_priority_fee_per_gas: U256::from(0), + gas_per_pubdata_limit: U256::from(get_max_gas_per_pubdata_byte( + ProtocolVersionId::latest().into(), + )), + } +} + +#[derive(Debug)] +struct Harness { + alice: Account, + bob: Account, + storage_contract: ContractToDeploy, + storage_contract_abi: Contract, + current_block: L2BlockEnv, +} + +impl Harness { + const STORAGE_CONTRACT_PATH: &'static str = + "etc/contracts-test-data/artifacts-zk/contracts/storage/storage.sol/StorageTester.json"; + const STORAGE_CONTRACT_ADDRESS: Address = Address::repeat_byte(23); + + fn new(l1_batch_env: &L1BatchEnv) -> Self { + Self { + alice: Account::random(), + bob: Account::random(), + storage_contract: ContractToDeploy::new( + read_bytecode(Self::STORAGE_CONTRACT_PATH), + Self::STORAGE_CONTRACT_ADDRESS, + ), + storage_contract_abi: load_contract(Self::STORAGE_CONTRACT_PATH), + current_block: l1_batch_env.first_l2_block, + } + } + + fn setup_storage(&self, storage: &mut InMemoryStorage) { + make_account_rich(storage, &self.alice); + make_account_rich(storage, &self.bob); + + self.storage_contract.insert(storage); + let storage_contract_key = StorageKey::new( + AccountTreeId::new(Self::STORAGE_CONTRACT_ADDRESS), + H256::zero(), + ); + storage.set_value_hashed_enum( + storage_contract_key.hashed_key(), + 999, + H256::from_low_u64_be(42), + ); + } + + fn assert_dump(dump: &mut VmDump) { + assert_eq!(dump.l1_batch_number(), L1BatchNumber(1)); + let tx_counts_per_block: Vec<_> = + dump.l2_blocks.iter().map(|block| block.txs.len()).collect(); + assert_eq!(tx_counts_per_block, [1, 2, 2, 0]); + + let storage_contract_key = StorageKey::new( + AccountTreeId::new(Self::STORAGE_CONTRACT_ADDRESS), + H256::zero(), + ); + let value = dump.storage.read_value(&storage_contract_key); + assert_eq!(value, H256::from_low_u64_be(42)); + let enum_index = dump.storage.get_enumeration_index(&storage_contract_key); + assert_eq!(enum_index, Some(999)); + } + + fn new_block(&mut self, vm: &mut impl VmInterface, tx_hashes: &[H256]) { + self.current_block = L2BlockEnv { + number: self.current_block.number + 1, + timestamp: self.current_block.timestamp + 1, + prev_block_hash: hash_block(self.current_block, tx_hashes), + max_virtual_blocks_to_create: self.current_block.max_virtual_blocks_to_create, + }; + vm.start_new_l2_block(self.current_block); + } + + fn execute_on_vm(&mut self, vm: &mut impl VmInterface) { + let transfer_exec = Execute { + contract_address: Some(self.bob.address()), + calldata: vec![], + value: 1_000_000_000.into(), + factory_deps: vec![], + }; + let transfer_to_bob = self + .alice + .get_l2_tx_for_execute(transfer_exec.clone(), None); + let (compression_result, exec_result) = + vm.execute_transaction_with_bytecode_compression(transfer_to_bob.clone(), true); + compression_result.unwrap(); + assert!(!exec_result.result.is_failed(), "{:#?}", exec_result); + + self.new_block(vm, &[transfer_to_bob.hash()]); + + let out_of_gas_transfer = self.bob.get_l2_tx_for_execute( + transfer_exec.clone(), + Some(tx_fee(200_000)), // high enough to pass validation + ); + let (compression_result, exec_result) = + vm.execute_transaction_with_bytecode_compression(out_of_gas_transfer.clone(), true); + compression_result.unwrap(); + assert_matches!(exec_result.result, ExecutionResult::Revert { .. }); + + let write_fn = self.storage_contract_abi.function("simpleWrite").unwrap(); + let simple_write_tx = self.alice.get_l2_tx_for_execute( + Execute { + contract_address: Some(Self::STORAGE_CONTRACT_ADDRESS), + calldata: write_fn.encode_input(&[]).unwrap(), + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + let (compression_result, exec_result) = + vm.execute_transaction_with_bytecode_compression(simple_write_tx.clone(), true); + compression_result.unwrap(); + assert!(!exec_result.result.is_failed(), "{:#?}", exec_result); + + let storage_contract_key = StorageKey::new( + AccountTreeId::new(Self::STORAGE_CONTRACT_ADDRESS), + H256::zero(), + ); + let storage_logs = &exec_result.logs.storage_logs; + assert!(storage_logs.iter().any(|log| { + log.log.key == storage_contract_key && log.previous_value == H256::from_low_u64_be(42) + })); + + self.new_block(vm, &[out_of_gas_transfer.hash(), simple_write_tx.hash()]); + + let deploy_tx = self.alice.get_deploy_tx( + &get_loadnext_contract().bytecode, + Some(&[ethabi::Token::Uint(100.into())]), + TxType::L2, + ); + let (compression_result, exec_result) = + vm.execute_transaction_with_bytecode_compression(deploy_tx.tx.clone(), true); + compression_result.unwrap(); + assert!(!exec_result.result.is_failed(), "{:#?}", exec_result); + + let load_test_tx = self.bob.get_loadnext_transaction( + deploy_tx.address, + LoadnextContractExecutionParams::default(), + TxType::L2, + ); + let (compression_result, exec_result) = + vm.execute_transaction_with_bytecode_compression(load_test_tx.clone(), true); + compression_result.unwrap(); + assert!(!exec_result.result.is_failed(), "{:#?}", exec_result); + + self.new_block(vm, &[deploy_tx.tx.hash(), load_test_tx.hash()]); + vm.finish_batch(); + } +} + +fn sanity_check_vm() -> (Vm, Harness) +where + Vm: VmFactory>, +{ + let system_env = default_system_env(); + let l1_batch_env = default_l1_batch(L1BatchNumber(1)); + let mut storage = InMemoryStorage::with_system_contracts(hash_bytecode); + let mut harness = Harness::new(&l1_batch_env); + harness.setup_storage(&mut storage); + + let storage = StorageView::new(storage).to_rc_ptr(); + let mut vm = Vm::new(l1_batch_env, system_env, storage); + harness.execute_on_vm(&mut vm); + (vm, harness) +} + +#[test] +fn sanity_check_harness() { + sanity_check_vm::(); +} + +#[test] +fn sanity_check_harness_on_new_vm() { + sanity_check_vm::>(); +} + +#[test] +fn sanity_check_shadow_vm() { + let system_env = default_system_env(); + let l1_batch_env = default_l1_batch(L1BatchNumber(1)); + let mut storage = InMemoryStorage::with_system_contracts(hash_bytecode); + let mut harness = Harness::new(&l1_batch_env); + harness.setup_storage(&mut storage); + + // We need separate storage views since they are mutated by the VM execution + let main_storage = StorageView::new(&storage).to_rc_ptr(); + let shadow_storage = StorageView::new(&storage).to_rc_ptr(); + let mut vm = ShadowVm::<_, ReferenceVm<_>, ReferenceVm<_>>::with_custom_shadow( + l1_batch_env, + system_env, + main_storage, + shadow_storage, + ); + harness.execute_on_vm(&mut vm); +} + +#[test] +fn shadow_vm_basics() { + let (vm, harness) = sanity_check_vm::(); + let mut dump = vm.dump_state(); + Harness::assert_dump(&mut dump); + + // Test standard playback functionality. + let replayed_dump = dump.clone().play_back::>().dump_state(); + pretty_assertions::assert_eq!(replayed_dump, dump); + + // Check that the VM executes identically when reading from the original storage and one restored from the dump. + let mut storage = InMemoryStorage::with_system_contracts(hash_bytecode); + harness.setup_storage(&mut storage); + let storage = StorageView::new(storage).to_rc_ptr(); + + let vm = dump + .clone() + .play_back_custom(|l1_batch_env, system_env, dump_storage| { + ShadowVm::<_, ReferenceVm, ReferenceVm<_>>::with_custom_shadow( + l1_batch_env, + system_env, + storage, + dump_storage, + ) + }); + let new_dump = vm.dump_state(); + pretty_assertions::assert_eq!(new_dump, dump); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/block_tip.rs b/core/lib/multivm/src/versions/vm_fast/tests/block_tip.rs index a96045141380..dd407c616682 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/block_tip.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/block_tip.rs @@ -13,11 +13,12 @@ use zksync_types::{ use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; use super::{ - tester::{default_l1_batch, get_empty_storage, VmTesterBuilder}, + tester::{get_empty_storage, VmTesterBuilder}, utils::{get_complex_upgrade_abi, read_complex_upgrade}, }; use crate::{ interface::{L1BatchEnv, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, + versions::testonly::default_l1_batch, vm_latest::constants::{ BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD, BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD, BOOTLOADER_BATCH_TIP_OVERHEAD, diff --git a/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs b/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs index 5e7b7748fb3a..156af43dcf24 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs @@ -6,6 +6,7 @@ use zksync_utils::{bytecode::hash_bytecode, h256_to_u256, u256_to_h256}; use crate::{ interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, + versions::testonly::ContractToDeploy, vm_fast::{ circuits_tracer::CircuitsTracer, tests::{ @@ -41,10 +42,9 @@ fn test_code_oracle() { .with_empty_in_memory_storage() .with_execution_mode(TxExecutionMode::VerifyExecute) .with_random_rich_accounts(1) - .with_custom_contracts(vec![( + .with_custom_contracts(vec![ContractToDeploy::new( precompile_contract_bytecode, precompiles_contract_address, - false, )]) .with_storage(storage) .build(); @@ -134,10 +134,9 @@ fn test_code_oracle_big_bytecode() { .with_empty_in_memory_storage() .with_execution_mode(TxExecutionMode::VerifyExecute) .with_random_rich_accounts(1) - .with_custom_contracts(vec![( + .with_custom_contracts(vec![ContractToDeploy::new( precompile_contract_bytecode, precompiles_contract_address, - false, )]) .with_storage(storage) .build(); @@ -198,10 +197,9 @@ fn refunds_in_code_oracle() { let mut vm = VmTesterBuilder::new() .with_execution_mode(TxExecutionMode::VerifyExecute) .with_random_rich_accounts(1) - .with_custom_contracts(vec![( + .with_custom_contracts(vec![ContractToDeploy::new( precompile_contract_bytecode.clone(), precompiles_contract_address, - false, )]) .with_storage(storage.clone()) .build(); diff --git a/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs index 746e9be923f2..b8942dcbb6a8 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs @@ -14,6 +14,7 @@ use crate::{ storage::ReadStorage, ExecutionResult, TxExecutionMode, VmExecutionMode, VmExecutionResultAndLogs, VmInterface, VmInterfaceExt, }, + versions::testonly::ContractToDeploy, vm_fast::{ tests::{ tester::{TxType, VmTester, VmTesterBuilder}, @@ -123,7 +124,10 @@ fn execute_proxy_counter(gas: u32) -> (VmTester, ProxyCounterData, VmExecutionRe let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() - .with_custom_contracts(vec![(counter_bytecode, counter_address, false)]) + .with_custom_contracts(vec![ContractToDeploy::new( + counter_bytecode, + counter_address, + )]) .with_execution_mode(TxExecutionMode::VerifyExecute) .with_random_rich_accounts(1) .build(); diff --git a/core/lib/multivm/src/versions/vm_fast/tests/l2_blocks.rs b/core/lib/multivm/src/versions/vm_fast/tests/l2_blocks.rs index a43bb7c0309e..fde94d9da6cd 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/l2_blocks.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/l2_blocks.rs @@ -18,10 +18,8 @@ use crate::{ storage::ReadStorage, ExecutionResult, Halt, L2BlockEnv, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, }, - vm_fast::{ - tests::tester::{default_l1_batch, VmTesterBuilder}, - vm::Vm, - }, + versions::testonly::default_l1_batch, + vm_fast::{tests::tester::VmTesterBuilder, vm::Vm}, vm_latest::{ constants::{TX_OPERATOR_L2_BLOCK_INFO_OFFSET, TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO}, utils::l2_blocks::get_l2_block_hash_key, diff --git a/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs index 2ae43869d7f6..f72e95da9f87 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs @@ -5,6 +5,7 @@ use crate::{ ExecutionResult, Halt, TxExecutionMode, TxRevertReason, VmExecutionMode, VmInterfaceExt, VmRevertReason, }, + versions::testonly::ContractToDeploy, vm_fast::tests::{ tester::{Account, VmTesterBuilder}, utils::read_nonce_holder_tester, @@ -41,10 +42,9 @@ fn test_nonce_holder() { .with_empty_in_memory_storage() .with_execution_mode(TxExecutionMode::VerifyExecute) .with_deployer() - .with_custom_contracts(vec![( + .with_custom_contracts(vec![ContractToDeploy::account( read_nonce_holder_tester().to_vec(), account.address, - true, )]) .with_rich_accounts(vec![account.clone()]) .build(); diff --git a/core/lib/multivm/src/versions/vm_fast/tests/precompiles.rs b/core/lib/multivm/src/versions/vm_fast/tests/precompiles.rs index 28d3ea82da31..5bc3f614d61b 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/precompiles.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/precompiles.rs @@ -4,6 +4,7 @@ use zksync_types::{Address, Execute}; use super::{tester::VmTesterBuilder, utils::read_precompiles_contract}; use crate::{ interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + versions::testonly::ContractToDeploy, vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, }; @@ -18,7 +19,7 @@ fn test_keccak() { .with_deployer() .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contract, address, true)]) + .with_custom_contracts(vec![ContractToDeploy::account(contract, address)]) .build(); // calldata for `doKeccak(1000)`. @@ -55,7 +56,7 @@ fn test_sha256() { .with_deployer() .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contract, address, true)]) + .with_custom_contracts(vec![ContractToDeploy::account(contract, address)]) .build(); // calldata for `doSha256(1000)`. diff --git a/core/lib/multivm/src/versions/vm_fast/tests/refunds.rs b/core/lib/multivm/src/versions/vm_fast/tests/refunds.rs index 1d276533898e..1856995149aa 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/refunds.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/refunds.rs @@ -3,6 +3,7 @@ use zksync_types::{Address, Execute, U256}; use crate::{ interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, + versions::testonly::ContractToDeploy, vm_fast::tests::{ tester::{DeployContractsTx, TxType, VmTesterBuilder}, utils::{read_expensive_contract, read_test_contract}, @@ -172,10 +173,9 @@ fn negative_pubdata_for_transaction() { .with_empty_in_memory_storage() .with_execution_mode(TxExecutionMode::VerifyExecute) .with_random_rich_accounts(1) - .with_custom_contracts(vec![( + .with_custom_contracts(vec![ContractToDeploy::new( expensive_contract_bytecode, expensive_contract_address, - false, )]) .build(); diff --git a/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs index bc0a07381b00..1fd2ebd523b0 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs @@ -12,6 +12,7 @@ use crate::{ interface::{ storage::ReadStorage, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, }, + versions::testonly::ContractToDeploy, vm_fast::tests::{ tester::{Account, VmTester, VmTesterBuilder}, utils::read_many_owners_custom_account_contract, @@ -50,7 +51,10 @@ async fn test_require_eip712() { let (bytecode, contract) = read_many_owners_custom_account_contract(); let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() - .with_custom_contracts(vec![(bytecode, account_abstraction.address, true)]) + .with_custom_contracts(vec![ContractToDeploy::account( + bytecode, + account_abstraction.address, + )]) .with_execution_mode(TxExecutionMode::VerifyExecute) .with_rich_accounts(vec![account_abstraction.clone(), private_account.clone()]) .build(); diff --git a/core/lib/multivm/src/versions/vm_fast/tests/storage.rs b/core/lib/multivm/src/versions/vm_fast/tests/storage.rs index 8258e21366ce..2cfadb640e72 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/storage.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/storage.rs @@ -6,6 +6,7 @@ use crate::{ interface::{ TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, }, + versions::testonly::ContractToDeploy, vm_fast::tests::tester::VmTesterBuilder, }; @@ -23,7 +24,7 @@ fn test_storage(first_tx_calldata: Vec, second_tx_calldata: Vec) -> u32 .with_execution_mode(TxExecutionMode::VerifyExecute) .with_deployer() .with_random_rich_accounts(1) - .with_custom_contracts(vec![(bytecode, test_contract_address, false)]) + .with_custom_contracts(vec![ContractToDeploy::new(bytecode, test_contract_address)]) .build(); let account = &mut vm.rich_accounts[0]; diff --git a/core/lib/multivm/src/versions/vm_fast/tests/tester/mod.rs b/core/lib/multivm/src/versions/vm_fast/tests/tester/mod.rs index 781069ddf499..212e569d5107 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/tester/mod.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/tester/mod.rs @@ -1,5 +1,5 @@ pub(crate) use transaction_test_info::{ExpectedError, TransactionTestInfo, TxModifier}; -pub(crate) use vm_tester::{default_l1_batch, get_empty_storage, VmTester, VmTesterBuilder}; +pub(crate) use vm_tester::{get_empty_storage, VmTester, VmTesterBuilder}; pub(crate) use zksync_test_account::{Account, DeployContractsTx, TxType}; mod transaction_test_info; diff --git a/core/lib/multivm/src/versions/vm_fast/tests/tester/vm_tester.rs b/core/lib/multivm/src/versions/vm_fast/tests/tester/vm_tester.rs index 8071bcf51d4a..dd82b73839b7 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/tester/vm_tester.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/tester/vm_tester.rs @@ -3,13 +3,8 @@ use std::{cell::RefCell, rc::Rc}; use zksync_contracts::BaseSystemContracts; use zksync_test_account::{Account, TxType}; use zksync_types::{ - block::L2BlockHasher, - fee_model::BatchFeeInput, - get_code_key, get_is_account_key, - helpers::unix_timestamp_ms, - utils::{deployed_address_create, storage_key_for_eth_balance}, - AccountTreeId, Address, L1BatchNumber, L2BlockNumber, L2ChainId, Nonce, ProtocolVersionId, - StorageKey, U256, + block::L2BlockHasher, utils::deployed_address_create, AccountTreeId, Address, L1BatchNumber, + L2BlockNumber, Nonce, StorageKey, }; use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; use zksync_vm2::WorldDiff; @@ -20,8 +15,11 @@ use crate::{ L1BatchEnv, L2Block, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, }, - versions::vm_fast::{tests::utils::read_test_contract, vm::Vm}, - vm_latest::{constants::BATCH_COMPUTATIONAL_GAS_LIMIT, utils::l2_blocks::load_last_l2_block}, + versions::{ + testonly::{default_l1_batch, default_system_env, make_account_rich, ContractToDeploy}, + vm_fast::{tests::utils::read_test_contract, vm::Vm}, + }, + vm_latest::utils::l2_blocks::load_last_l2_block, }; pub(crate) struct VmTester { @@ -31,7 +29,7 @@ pub(crate) struct VmTester { pub(crate) test_contract: Option
, pub(crate) fee_account: Address, pub(crate) rich_accounts: Vec, - pub(crate) custom_contracts: Vec, + pub(crate) custom_contracts: Vec, } impl VmTester { @@ -63,10 +61,10 @@ impl VmTester { pub(crate) fn reset_state(&mut self, use_latest_l2_block: bool) { for account in self.rich_accounts.iter_mut() { account.nonce = Nonce(0); - make_account_rich(self.storage.clone(), account); + make_account_rich(&mut self.storage.borrow_mut(), account); } if let Some(deployer) = &self.deployer { - make_account_rich(self.storage.clone(), deployer); + make_account_rich(&mut self.storage.borrow_mut(), deployer); } if !self.custom_contracts.is_empty() { @@ -99,7 +97,7 @@ impl VmTester { }; } - let vm = Vm::new(l1_batch, self.vm.system_env.clone(), storage); + let vm = Vm::custom(l1_batch, self.vm.system_env.clone(), storage); if self.test_contract.is_some() { self.deploy_test_contract(); @@ -108,15 +106,13 @@ impl VmTester { } } -pub(crate) type ContractsToDeploy = (Vec, Address, bool); - pub(crate) struct VmTesterBuilder { storage: Option, l1_batch_env: Option, system_env: SystemEnv, deployer: Option, rich_accounts: Vec, - custom_contracts: Vec, + custom_contracts: Vec, } impl Clone for VmTesterBuilder { @@ -132,21 +128,12 @@ impl Clone for VmTesterBuilder { } } -#[allow(dead_code)] impl VmTesterBuilder { pub(crate) fn new() -> Self { Self { storage: None, l1_batch_env: None, - system_env: SystemEnv { - zk_porter_available: false, - version: ProtocolVersionId::latest(), - base_system_smart_contracts: BaseSystemContracts::playground(), - bootloader_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, - execution_mode: TxExecutionMode::VerifyExecute, - default_validation_computational_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, - chain_id: L2ChainId::from(270), - }, + system_env: default_system_env(), deployer: None, rich_accounts: vec![], custom_contracts: vec![], @@ -158,11 +145,6 @@ impl VmTesterBuilder { self } - pub(crate) fn with_system_env(mut self, system_env: SystemEnv) -> Self { - self.system_env = system_env; - self - } - pub(crate) fn with_storage(mut self, storage: InMemoryStorage) -> Self { self.storage = Some(storage); self @@ -210,7 +192,7 @@ impl VmTesterBuilder { self } - pub(crate) fn with_custom_contracts(mut self, contracts: Vec) -> Self { + pub(crate) fn with_custom_contracts(mut self, contracts: Vec) -> Self { self.custom_contracts = contracts; self } @@ -221,17 +203,17 @@ impl VmTesterBuilder { .unwrap_or_else(|| default_l1_batch(L1BatchNumber(1))); let mut raw_storage = self.storage.unwrap_or_else(get_empty_storage); - insert_contracts(&mut raw_storage, &self.custom_contracts); + ContractToDeploy::insert_all(&self.custom_contracts, &mut raw_storage); let storage_ptr = Rc::new(RefCell::new(raw_storage)); for account in self.rich_accounts.iter() { - make_account_rich(storage_ptr.clone(), account); + make_account_rich(&mut storage_ptr.borrow_mut(), account); } if let Some(deployer) = &self.deployer { - make_account_rich(storage_ptr.clone(), deployer); + make_account_rich(&mut storage_ptr.borrow_mut(), deployer); } let fee_account = l1_batch_env.fee_account; - let vm = Vm::new(l1_batch_env, self.system_env, storage_ptr.clone()); + let vm = Vm::custom(l1_batch_env, self.system_env, storage_ptr.clone()); VmTester { vm, @@ -245,53 +227,6 @@ impl VmTesterBuilder { } } -pub(crate) fn default_l1_batch(number: L1BatchNumber) -> L1BatchEnv { - let timestamp = unix_timestamp_ms(); - L1BatchEnv { - previous_batch_hash: None, - number, - timestamp, - fee_input: BatchFeeInput::l1_pegged( - 50_000_000_000, // 50 gwei - 250_000_000, // 0.25 gwei - ), - fee_account: Address::random(), - enforced_base_fee: None, - first_l2_block: L2BlockEnv { - number: 1, - timestamp, - prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), - max_virtual_blocks_to_create: 100, - }, - } -} - -pub(crate) fn make_account_rich(storage: StoragePtr, account: &Account) { - let key = storage_key_for_eth_balance(&account.address); - storage - .as_ref() - .borrow_mut() - .set_value(key, u256_to_h256(U256::from(10u64.pow(19)))); -} - pub(crate) fn get_empty_storage() -> InMemoryStorage { InMemoryStorage::with_system_contracts(hash_bytecode) } - -// Inserts the contracts into the test environment, bypassing the -// deployer system contract. Besides the reference to storage -// it accepts a `contracts` tuple of information about the contract -// and whether or not it is an account. -fn insert_contracts(raw_storage: &mut InMemoryStorage, contracts: &[ContractsToDeploy]) { - for (contract, address, is_account) in contracts { - let deployer_code_key = get_code_key(address); - raw_storage.set_value(deployer_code_key, hash_bytecode(contract)); - - if *is_account { - let is_account_key = get_is_account_key(address); - raw_storage.set_value(is_account_key, u256_to_h256(1_u32.into())); - } - - raw_storage.store_factory_dep(hash_bytecode(contract), contract.clone()); - } -} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/tracing_execution_error.rs b/core/lib/multivm/src/versions/vm_fast/tests/tracing_execution_error.rs index efa64ea17708..89f0fa236206 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/tracing_execution_error.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/tracing_execution_error.rs @@ -2,6 +2,7 @@ use zksync_types::{Execute, H160}; use crate::{ interface::{TxExecutionMode, TxRevertReason, VmRevertReason}, + versions::testonly::ContractToDeploy, vm_fast::tests::{ tester::{ExpectedError, TransactionTestInfo, VmTesterBuilder}, utils::{get_execute_error_calldata, read_error_contract, BASE_SYSTEM_CONTRACTS}, @@ -14,7 +15,10 @@ fn test_tracing_of_execution_errors() { let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_custom_contracts(vec![(read_error_contract(), contract_address, false)]) + .with_custom_contracts(vec![ContractToDeploy::new( + read_error_contract(), + contract_address, + )]) .with_execution_mode(TxExecutionMode::VerifyExecute) .with_deployer() .with_random_rich_accounts(1) diff --git a/core/lib/multivm/src/versions/vm_fast/tests/transfer.rs b/core/lib/multivm/src/versions/vm_fast/tests/transfer.rs index 662e014ef85b..ef510546f11c 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/transfer.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/transfer.rs @@ -6,6 +6,7 @@ use zksync_utils::u256_to_h256; use crate::{ interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, + versions::testonly::ContractToDeploy, vm_fast::tests::{ tester::{get_empty_storage, VmTesterBuilder}, utils::get_balance, @@ -21,7 +22,7 @@ fn test_send_or_transfer(test_option: TestOptions) { let test_bytecode = read_bytecode( "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", ); - let recipeint_bytecode = read_bytecode( + let recipient_bytecode = read_bytecode( "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/Recipient.json", ); let test_abi = load_contract( @@ -62,8 +63,8 @@ fn test_send_or_transfer(test_option: TestOptions) { .with_deployer() .with_random_rich_accounts(1) .with_custom_contracts(vec![ - (test_bytecode, test_contract_address, false), - (recipeint_bytecode, recipient_address, false), + ContractToDeploy::new(test_bytecode, test_contract_address), + ContractToDeploy::new(recipient_bytecode, recipient_address), ]) .build(); @@ -110,7 +111,7 @@ fn test_reentrancy_protection_send_or_transfer(test_option: TestOptions) { let test_bytecode = read_bytecode( "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", ); - let reentrant_recipeint_bytecode = read_bytecode( + let reentrant_recipient_bytecode = read_bytecode( "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/ReentrantRecipient.json", ); let test_abi = load_contract( @@ -121,7 +122,7 @@ fn test_reentrancy_protection_send_or_transfer(test_option: TestOptions) { ); let test_contract_address = Address::random(); - let reentrant_recipeint_address = Address::random(); + let reentrant_recipient_address = Address::random(); let (value, calldata) = match test_option { TestOptions::Send(value) => ( @@ -130,7 +131,7 @@ fn test_reentrancy_protection_send_or_transfer(test_option: TestOptions) { .function("send") .unwrap() .encode_input(&[ - Token::Address(reentrant_recipeint_address), + Token::Address(reentrant_recipient_address), Token::Uint(value), ]) .unwrap(), @@ -141,7 +142,7 @@ fn test_reentrancy_protection_send_or_transfer(test_option: TestOptions) { .function("transfer") .unwrap() .encode_input(&[ - Token::Address(reentrant_recipeint_address), + Token::Address(reentrant_recipient_address), Token::Uint(value), ]) .unwrap(), @@ -154,12 +155,8 @@ fn test_reentrancy_protection_send_or_transfer(test_option: TestOptions) { .with_deployer() .with_random_rich_accounts(1) .with_custom_contracts(vec![ - (test_bytecode, test_contract_address, false), - ( - reentrant_recipeint_bytecode, - reentrant_recipeint_address, - false, - ), + ContractToDeploy::new(test_bytecode, test_contract_address), + ContractToDeploy::new(reentrant_recipient_bytecode, reentrant_recipient_address), ]) .build(); @@ -167,7 +164,7 @@ fn test_reentrancy_protection_send_or_transfer(test_option: TestOptions) { let account = &mut vm.rich_accounts[0]; let tx1 = account.get_l2_tx_for_execute( Execute { - contract_address: Some(reentrant_recipeint_address), + contract_address: Some(reentrant_recipient_address), calldata: reentrant_recipient_abi .function("setX") .unwrap() diff --git a/core/lib/multivm/src/versions/vm_fast/vm.rs b/core/lib/multivm/src/versions/vm_fast/vm.rs index 66ee04c73fd9..0b6172a4d8a7 100644 --- a/core/lib/multivm/src/versions/vm_fast/vm.rs +++ b/core/lib/multivm/src/versions/vm_fast/vm.rs @@ -11,7 +11,7 @@ use zksync_types::{ BYTES_PER_ENUMERATION_INDEX, }, AccountTreeId, StorageKey, StorageLog, StorageLogKind, StorageLogWithPreviousValue, - BOOTLOADER_ADDRESS, H160, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, + BOOTLOADER_ADDRESS, H160, H256, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, L2_BASE_TOKEN_ADDRESS, U256, }; use zksync_utils::{bytecode::hash_bytecode, h256_to_u256, u256_to_h256}; @@ -31,11 +31,12 @@ use super::{ use crate::{ glue::GlueInto, interface::{ - storage::ReadStorage, BytecodeCompressionError, BytecodeCompressionResult, - CurrentExecutionState, ExecutionResult, FinishedL1Batch, Halt, L1BatchEnv, L2BlockEnv, - Refunds, SystemEnv, TxRevertReason, VmEvent, VmExecutionLogs, VmExecutionMode, - VmExecutionResultAndLogs, VmExecutionStatistics, VmInterface, VmInterfaceHistoryEnabled, - VmMemoryMetrics, VmRevertReason, + storage::{ImmutableStorageView, ReadStorage, StoragePtr, StorageView}, + BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, + ExecutionResult, FinishedL1Batch, Halt, L1BatchEnv, L2BlockEnv, Refunds, SystemEnv, + TxRevertReason, VmEvent, VmExecutionLogs, VmExecutionMode, VmExecutionResultAndLogs, + VmExecutionStatistics, VmFactory, VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, + VmRevertReason, VmTrackingContracts, }, utils::events::extract_l2tol1logs_from_l1_messenger, vm_fast::{ @@ -68,6 +69,65 @@ pub struct Vm { } impl Vm { + pub fn custom(batch_env: L1BatchEnv, system_env: SystemEnv, storage: S) -> Self { + let default_aa_code_hash = system_env + .base_system_smart_contracts + .default_aa + .hash + .into(); + + let program_cache = HashMap::from([World::convert_system_contract_code( + &system_env.base_system_smart_contracts.default_aa, + false, + )]); + + let (_, bootloader) = World::convert_system_contract_code( + &system_env.base_system_smart_contracts.bootloader, + true, + ); + let bootloader_memory = bootloader_initial_memory(&batch_env); + + let mut inner = VirtualMachine::new( + BOOTLOADER_ADDRESS, + bootloader, + H160::zero(), + &[], + system_env.bootloader_gas_limit, + Settings { + default_aa_code_hash, + // this will change after 1.5 + evm_interpreter_code_hash: default_aa_code_hash, + hook_address: get_vm_hook_position(VM_VERSION) * 32, + }, + ); + + inner.current_frame().set_stack_pointer(0); + // The bootloader writes results to high addresses in its heap, so it makes sense to preallocate it. + inner.current_frame().set_heap_bound(u32::MAX); + inner.current_frame().set_aux_heap_bound(u32::MAX); + inner + .current_frame() + .set_exception_handler(INITIAL_FRAME_FORMAL_EH_LOCATION); + + let mut this = Self { + world: World::new(storage, program_cache), + inner, + gas_for_account_validation: system_env.default_validation_computational_gas_limit, + bootloader_state: BootloaderState::new( + system_env.execution_mode, + bootloader_memory.clone(), + batch_env.first_l2_block, + ), + system_env, + batch_env, + snapshot: None, + #[cfg(test)] + enforced_state_diffs: None, + }; + this.write_to_bootloader_heap(bootloader_memory); + this + } + fn run( &mut self, execution_mode: VmExecutionMode, @@ -393,69 +453,6 @@ impl Vm { pub(super) fn gas_remaining(&mut self) -> u32 { self.inner.current_frame().gas() } -} - -// We don't implement `VmFactory` trait because, unlike old VMs, the new VM doesn't require storage to be writable; -// it maintains its own storage cache and a write buffer. -impl Vm { - pub fn new(batch_env: L1BatchEnv, system_env: SystemEnv, storage: S) -> Self { - let default_aa_code_hash = system_env - .base_system_smart_contracts - .default_aa - .hash - .into(); - - let program_cache = HashMap::from([World::convert_system_contract_code( - &system_env.base_system_smart_contracts.default_aa, - false, - )]); - - let (_, bootloader) = World::convert_system_contract_code( - &system_env.base_system_smart_contracts.bootloader, - true, - ); - let bootloader_memory = bootloader_initial_memory(&batch_env); - - let mut inner = VirtualMachine::new( - BOOTLOADER_ADDRESS, - bootloader, - H160::zero(), - &[], - system_env.bootloader_gas_limit, - Settings { - default_aa_code_hash, - // this will change after 1.5 - evm_interpreter_code_hash: default_aa_code_hash, - hook_address: get_vm_hook_position(VM_VERSION) * 32, - }, - ); - - inner.current_frame().set_stack_pointer(0); - // The bootloader writes results to high addresses in its heap, so it makes sense to preallocate it. - inner.current_frame().set_heap_bound(u32::MAX); - inner.current_frame().set_aux_heap_bound(u32::MAX); - inner - .current_frame() - .set_exception_handler(INITIAL_FRAME_FORMAL_EH_LOCATION); - - let mut this = Self { - world: World::new(storage, program_cache), - inner, - gas_for_account_validation: system_env.default_validation_computational_gas_limit, - bootloader_state: BootloaderState::new( - system_env.execution_mode, - bootloader_memory.clone(), - batch_env.first_l2_block, - ), - system_env, - batch_env, - snapshot: None, - #[cfg(test)] - enforced_state_diffs: None, - }; - this.write_to_bootloader_heap(bootloader_memory); - this - } // visible for testing pub(super) fn get_current_execution_state(&self) -> CurrentExecutionState { @@ -488,6 +485,17 @@ impl Vm { } } +impl VmFactory> for Vm> { + fn new( + batch_env: L1BatchEnv, + system_env: SystemEnv, + storage: StoragePtr>, + ) -> Self { + let storage = ImmutableStorageView::new(storage); + Self::custom(batch_env, system_env, storage) + } +} + impl VmInterface for Vm { type TracerDispatcher = (); @@ -673,6 +681,12 @@ impl VmInterfaceHistoryEnabled for Vm { } } +impl VmTrackingContracts for Vm { + fn used_contract_hashes(&self) -> Vec { + self.decommitted_hashes().map(u256_to_h256).collect() + } +} + impl fmt::Debug for Vm { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Vm") diff --git a/core/lib/multivm/src/versions/vm_latest/vm.rs b/core/lib/multivm/src/versions/vm_latest/vm.rs index a445a1d51402..71f7a6352129 100644 --- a/core/lib/multivm/src/versions/vm_latest/vm.rs +++ b/core/lib/multivm/src/versions/vm_latest/vm.rs @@ -2,8 +2,9 @@ use circuit_sequencer_api_1_5_0::sort_storage_access::sort_storage_access_querie use zksync_types::{ l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, vm::VmVersion, - Transaction, + Transaction, H256, }; +use zksync_utils::u256_to_h256; use crate::{ glue::GlueInto, @@ -12,7 +13,7 @@ use crate::{ BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, - VmMemoryMetrics, + VmMemoryMetrics, VmTrackingContracts, }, utils::events::extract_l2tol1logs_from_l1_messenger, vm_latest::{ @@ -238,3 +239,12 @@ impl VmInterfaceHistoryEnabled for Vm { self.snapshots.pop(); } } + +impl VmTrackingContracts for Vm { + fn used_contract_hashes(&self) -> Vec { + self.get_used_contracts() + .into_iter() + .map(u256_to_h256) + .collect() + } +} diff --git a/core/lib/multivm/src/vm_instance.rs b/core/lib/multivm/src/vm_instance.rs index cedb4bc8276d..5e254b09b30f 100644 --- a/core/lib/multivm/src/vm_instance.rs +++ b/core/lib/multivm/src/vm_instance.rs @@ -4,15 +4,19 @@ use crate::{ glue::history_mode::HistoryMode, interface::{ storage::{ImmutableStorageView, ReadStorage, StoragePtr, StorageView}, + utils::ShadowVm, BytecodeCompressionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, }, tracers::TracerDispatcher, - versions::shadow::ShadowVm, }; -pub type ShadowedFastVm = ShadowVm, H>>; +pub(crate) type ShadowedVmFast = ShadowVm< + S, + crate::vm_latest::Vm, H>, + crate::vm_fast::Vm>, +>; #[derive(Debug)] pub enum VmInstance { @@ -26,7 +30,7 @@ pub enum VmInstance { Vm1_4_2(crate::vm_1_4_2::Vm, H>), Vm1_5_0(crate::vm_latest::Vm, H>), VmFast(crate::vm_fast::Vm>), - ShadowedVmFast(ShadowedFastVm), + ShadowedVmFast(ShadowedVmFast), } macro_rules! dispatch_vm { @@ -222,10 +226,15 @@ impl VmInstance { FastVmMode::Old => Self::new(l1_batch_env, system_env, storage_view), FastVmMode::New => { let storage = ImmutableStorageView::new(storage_view); - Self::VmFast(crate::vm_fast::Vm::new(l1_batch_env, system_env, storage)) + Self::VmFast(crate::vm_fast::Vm::custom( + l1_batch_env, + system_env, + storage, + )) } FastVmMode::Shadow => { - Self::ShadowedVmFast(ShadowVm::new(l1_batch_env, system_env, storage_view)) + let vm = ShadowVm::new(l1_batch_env, system_env, storage_view); + Self::ShadowedVmFast(vm) } }, _ => Self::new(l1_batch_env, system_env, storage_view), diff --git a/core/lib/object_store/src/file.rs b/core/lib/object_store/src/file.rs index decba534d23e..308cd65427fb 100644 --- a/core/lib/object_store/src/file.rs +++ b/core/lib/object_store/src/file.rs @@ -43,6 +43,7 @@ impl FileBackedObjectStore { Bucket::ProofsFri, Bucket::StorageSnapshot, Bucket::TeeVerifierInput, + Bucket::VmDumps, ] { let bucket_path = format!("{base_dir}/{bucket}"); fs::create_dir_all(&bucket_path).await?; diff --git a/core/lib/object_store/src/objects.rs b/core/lib/object_store/src/objects.rs index f5bb3706d9d4..ff5fae2a81f0 100644 --- a/core/lib/object_store/src/objects.rs +++ b/core/lib/object_store/src/objects.rs @@ -95,7 +95,6 @@ where type Key<'a> = SnapshotStorageLogsStorageKey; fn encode_key(key: Self::Key<'_>) -> String { - // FIXME: should keys be separated by version? format!( "snapshot_l1_batch_{}_storage_logs_part_{:0>4}.proto.gzip", key.l1_batch_number, key.chunk_id diff --git a/core/lib/object_store/src/raw.rs b/core/lib/object_store/src/raw.rs index 3c5a89f160a5..740e8d76e246 100644 --- a/core/lib/object_store/src/raw.rs +++ b/core/lib/object_store/src/raw.rs @@ -20,6 +20,7 @@ pub enum Bucket { StorageSnapshot, DataAvailability, TeeVerifierInput, + VmDumps, } impl Bucket { @@ -39,6 +40,7 @@ impl Bucket { Self::StorageSnapshot => "storage_logs_snapshots", Self::DataAvailability => "data_availability", Self::TeeVerifierInput => "tee_verifier_inputs", + Self::VmDumps => "vm_dumps", } } } diff --git a/core/lib/vm_executor/src/batch/factory.rs b/core/lib/vm_executor/src/batch/factory.rs index d6f7555b7672..62bab29fea82 100644 --- a/core/lib/vm_executor/src/batch/factory.rs +++ b/core/lib/vm_executor/src/batch/factory.rs @@ -7,6 +7,7 @@ use zksync_multivm::{ interface::{ executor::{BatchExecutor, BatchExecutorFactory}, storage::{ReadStorage, StorageView, StorageViewStats}, + utils::DivergenceHandler, BatchTransactionExecutionResult, ExecutionResult, FinishedL1Batch, Halt, L1BatchEnv, L2BlockEnv, SystemEnv, VmInterface, VmInterfaceHistoryEnabled, }, @@ -36,6 +37,7 @@ pub struct MainBatchExecutorFactory { optional_bytecode_compression: bool, fast_vm_mode: FastVmMode, observe_storage_metrics: bool, + divergence_handler: Option, } impl MainBatchExecutorFactory { @@ -45,6 +47,7 @@ impl MainBatchExecutorFactory { optional_bytecode_compression, fast_vm_mode: FastVmMode::Old, observe_storage_metrics: false, + divergence_handler: None, } } @@ -64,6 +67,11 @@ impl MainBatchExecutorFactory { pub fn observe_storage_metrics(&mut self) { self.observe_storage_metrics = true; } + + pub fn set_divergence_handler(&mut self, handler: DivergenceHandler) { + tracing::info!("Set VM divergence handler"); + self.divergence_handler = Some(handler); + } } impl BatchExecutorFactory for MainBatchExecutorFactory { @@ -81,6 +89,7 @@ impl BatchExecutorFactory for MainBatchExecu optional_bytecode_compression: self.optional_bytecode_compression, fast_vm_mode: self.fast_vm_mode, observe_storage_metrics: self.observe_storage_metrics, + divergence_handler: self.divergence_handler.clone(), commands: commands_receiver, _storage: PhantomData, }; @@ -103,6 +112,7 @@ struct CommandReceiver { optional_bytecode_compression: bool, fast_vm_mode: FastVmMode, observe_storage_metrics: bool, + divergence_handler: Option, commands: mpsc::Receiver, _storage: PhantomData, } @@ -126,6 +136,12 @@ impl CommandReceiver { let mut batch_finished = false; let mut prev_storage_stats = StorageViewStats::default(); + if let VmInstance::ShadowedVmFast(vm) = &mut vm { + if let Some(handler) = self.divergence_handler.take() { + vm.set_divergence_handler(handler); + } + } + while let Some(cmd) = self.commands.blocking_recv() { match cmd { Command::ExecuteTx(tx, resp) => { diff --git a/core/lib/vm_interface/Cargo.toml b/core/lib/vm_interface/Cargo.toml index 694576dca3b0..8bff19ddc475 100644 --- a/core/lib/vm_interface/Cargo.toml +++ b/core/lib/vm_interface/Cargo.toml @@ -18,6 +18,7 @@ zksync_types.workspace = true anyhow.workspace = true async-trait.workspace = true hex.workspace = true +pretty_assertions.workspace = true serde.workspace = true thiserror.workspace = true tracing.workspace = true diff --git a/core/lib/vm_interface/src/lib.rs b/core/lib/vm_interface/src/lib.rs index 2b30f82e0ce5..645e3e7c856e 100644 --- a/core/lib/vm_interface/src/lib.rs +++ b/core/lib/vm_interface/src/lib.rs @@ -37,10 +37,11 @@ pub use crate::{ }, tracer, }, - vm::{VmFactory, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled}, + vm::{VmFactory, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, VmTrackingContracts}, }; pub mod executor; pub mod storage; mod types; +pub mod utils; mod vm; diff --git a/core/lib/vm_interface/src/storage/snapshot.rs b/core/lib/vm_interface/src/storage/snapshot.rs index a0175ff478a3..78b57a31f13e 100644 --- a/core/lib/vm_interface/src/storage/snapshot.rs +++ b/core/lib/vm_interface/src/storage/snapshot.rs @@ -12,7 +12,7 @@ use super::ReadStorage; /// In contrast, `StorageSnapshot` cannot be modified once created and is intended to represent a complete or almost complete snapshot /// for a particular VM execution. It can serve as a preloaded cache for a certain [`ReadStorage`] implementation /// that significantly reduces the number of storage accesses. -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct StorageSnapshot { // `Option` encompasses entire map value for more efficient serialization storage: HashMap>, @@ -60,6 +60,36 @@ impl StorageSnapshot { } } +/// When used as a storage, a snapshot is assumed to be *complete*; [`ReadStorage`] methods will panic when called +/// with storage slots not present in the snapshot. +impl ReadStorage for StorageSnapshot { + fn read_value(&mut self, key: &StorageKey) -> StorageValue { + let entry = self + .storage + .get(&key.hashed_key()) + .unwrap_or_else(|| panic!("attempted to read from unknown storage slot: {key:?}")); + entry.unwrap_or_default().0 + } + + fn is_write_initial(&mut self, key: &StorageKey) -> bool { + let entry = self.storage.get(&key.hashed_key()).unwrap_or_else(|| { + panic!("attempted to check initialness for unknown storage slot: {key:?}") + }); + entry.is_none() + } + + fn load_factory_dep(&mut self, hash: H256) -> Option> { + self.factory_deps.get(&hash).map(|bytes| bytes.0.clone()) + } + + fn get_enumeration_index(&mut self, key: &StorageKey) -> Option { + let entry = self.storage.get(&key.hashed_key()).unwrap_or_else(|| { + panic!("attempted to get enum index for unknown storage slot: {key:?}") + }); + entry.map(|(_, idx)| idx) + } +} + /// [`StorageSnapshot`] wrapper implementing [`ReadStorage`] trait. Created using [`with_fallback()`](StorageSnapshot::with_fallback()). /// /// # Why fallback? diff --git a/core/lib/vm_interface/src/utils/dump.rs b/core/lib/vm_interface/src/utils/dump.rs new file mode 100644 index 000000000000..f7dce38ee899 --- /dev/null +++ b/core/lib/vm_interface/src/utils/dump.rs @@ -0,0 +1,249 @@ +use std::collections::HashMap; + +use serde::{Deserialize, Serialize}; +use zksync_types::{block::L2BlockExecutionData, L1BatchNumber, L2BlockNumber, Transaction, H256}; + +use crate::{ + storage::{ReadStorage, StoragePtr, StorageSnapshot, StorageView}, + BytecodeCompressionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, + VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, + VmMemoryMetrics, VmTrackingContracts, +}; + +fn create_storage_snapshot( + storage: &StoragePtr>, + used_contract_hashes: Vec, +) -> StorageSnapshot { + let mut storage = storage.borrow_mut(); + let storage_cache = storage.cache(); + let mut storage_slots: HashMap<_, _> = storage_cache + .read_storage_keys() + .into_iter() + .map(|(key, value)| { + let enum_index = storage.get_enumeration_index(&key); + let value_and_index = enum_index.map(|idx| (value, idx)); + (key.hashed_key(), value_and_index) + }) + .collect(); + + // Normally, all writes are internally read in order to calculate their gas costs, so the code below + // is defensive programming. + for (key, _) in storage_cache.initial_writes() { + let hashed_key = key.hashed_key(); + if storage_slots.contains_key(&hashed_key) { + continue; + } + + let enum_index = storage.get_enumeration_index(&key); + let value_and_index = enum_index.map(|idx| (storage.read_value(&key), idx)); + storage_slots.insert(hashed_key, value_and_index); + } + + let factory_deps = used_contract_hashes + .into_iter() + .filter_map(|hash| Some((hash, storage.load_factory_dep(hash)?))) + .collect(); + + StorageSnapshot::new(storage_slots, factory_deps) +} + +/// VM dump allowing to re-run the VM on the same inputs. Can be (de)serialized. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct VmDump { + pub l1_batch_env: L1BatchEnv, + pub system_env: SystemEnv, + pub l2_blocks: Vec, + pub storage: StorageSnapshot, +} + +impl VmDump { + pub fn l1_batch_number(&self) -> L1BatchNumber { + self.l1_batch_env.number + } + + /// Plays back this dump on the specified VM. + pub fn play_back>>(self) -> Vm { + self.play_back_custom(Vm::new) + } + + /// Plays back this dump on a VM created using the provided closure. + #[doc(hidden)] // too low-level + pub fn play_back_custom( + self, + create_vm: impl FnOnce(L1BatchEnv, SystemEnv, StoragePtr>) -> Vm, + ) -> Vm { + let storage = StorageView::new(self.storage).to_rc_ptr(); + let mut vm = create_vm(self.l1_batch_env, self.system_env, storage); + + for (i, l2_block) in self.l2_blocks.into_iter().enumerate() { + if i > 0 { + // First block is already set. + vm.start_new_l2_block(L2BlockEnv { + number: l2_block.number.0, + timestamp: l2_block.timestamp, + prev_block_hash: l2_block.prev_block_hash, + max_virtual_blocks_to_create: l2_block.virtual_blocks, + }); + } + + for tx in l2_block.txs { + let tx_hash = tx.hash(); + let (compression_result, _) = + vm.execute_transaction_with_bytecode_compression(tx, true); + if let Err(err) = compression_result { + panic!("Failed compressing bytecodes for transaction {tx_hash:?}: {err}"); + } + } + } + vm.finish_batch(); + vm + } +} + +#[derive(Debug, Clone, Copy)] +struct L2BlocksSnapshot { + block_count: usize, + tx_count_in_last_block: usize, +} + +/// VM wrapper that can create [`VmDump`]s during execution. +#[derive(Debug)] +pub(super) struct DumpingVm { + storage: StoragePtr>, + inner: Vm, + l1_batch_env: L1BatchEnv, + system_env: SystemEnv, + l2_blocks: Vec, + l2_blocks_snapshot: Option, +} + +impl DumpingVm { + fn last_block_mut(&mut self) -> &mut L2BlockExecutionData { + self.l2_blocks.last_mut().unwrap() + } + + fn record_transaction(&mut self, tx: Transaction) { + self.last_block_mut().txs.push(tx); + } + + pub fn dump_state(&self) -> VmDump { + VmDump { + l1_batch_env: self.l1_batch_env.clone(), + system_env: self.system_env.clone(), + l2_blocks: self.l2_blocks.clone(), + storage: create_storage_snapshot(&self.storage, self.inner.used_contract_hashes()), + } + } +} + +impl VmInterface for DumpingVm { + type TracerDispatcher = Vm::TracerDispatcher; + + fn push_transaction(&mut self, tx: Transaction) { + self.record_transaction(tx.clone()); + self.inner.push_transaction(tx); + } + + fn inspect( + &mut self, + dispatcher: Self::TracerDispatcher, + execution_mode: VmExecutionMode, + ) -> VmExecutionResultAndLogs { + self.inner.inspect(dispatcher, execution_mode) + } + + fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { + self.l2_blocks.push(L2BlockExecutionData { + number: L2BlockNumber(l2_block_env.number), + timestamp: l2_block_env.timestamp, + prev_block_hash: l2_block_env.prev_block_hash, + virtual_blocks: l2_block_env.max_virtual_blocks_to_create, + txs: vec![], + }); + self.inner.start_new_l2_block(l2_block_env); + } + + fn inspect_transaction_with_bytecode_compression( + &mut self, + tracer: Self::TracerDispatcher, + tx: Transaction, + with_compression: bool, + ) -> (BytecodeCompressionResult, VmExecutionResultAndLogs) { + self.record_transaction(tx.clone()); + self.inner + .inspect_transaction_with_bytecode_compression(tracer, tx, with_compression) + } + + fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { + self.inner.record_vm_memory_metrics() + } + + fn finish_batch(&mut self) -> FinishedL1Batch { + self.inner.finish_batch() + } +} + +impl VmInterfaceHistoryEnabled for DumpingVm +where + S: ReadStorage, + Vm: VmInterfaceHistoryEnabled + VmTrackingContracts, +{ + fn make_snapshot(&mut self) { + self.l2_blocks_snapshot = Some(L2BlocksSnapshot { + block_count: self.l2_blocks.len(), + tx_count_in_last_block: self.last_block_mut().txs.len(), + }); + self.inner.make_snapshot(); + } + + fn rollback_to_the_latest_snapshot(&mut self) { + self.inner.rollback_to_the_latest_snapshot(); + let snapshot = self + .l2_blocks_snapshot + .take() + .expect("rollback w/o snapshot"); + self.l2_blocks.truncate(snapshot.block_count); + assert_eq!( + self.l2_blocks.len(), + snapshot.block_count, + "L2 blocks were removed after creating a snapshot" + ); + self.last_block_mut() + .txs + .truncate(snapshot.tx_count_in_last_block); + } + + fn pop_snapshot_no_rollback(&mut self) { + self.inner.pop_snapshot_no_rollback(); + self.l2_blocks_snapshot = None; + } +} + +impl VmFactory> for DumpingVm +where + S: ReadStorage, + Vm: VmFactory> + VmTrackingContracts, +{ + fn new( + l1_batch_env: L1BatchEnv, + system_env: SystemEnv, + storage: StoragePtr>, + ) -> Self { + let inner = Vm::new(l1_batch_env.clone(), system_env.clone(), storage.clone()); + let first_block = L2BlockExecutionData { + number: L2BlockNumber(l1_batch_env.first_l2_block.number), + timestamp: l1_batch_env.first_l2_block.timestamp, + prev_block_hash: l1_batch_env.first_l2_block.prev_block_hash, + virtual_blocks: l1_batch_env.first_l2_block.max_virtual_blocks_to_create, + txs: vec![], + }; + Self { + l1_batch_env, + system_env, + l2_blocks: vec![first_block], + l2_blocks_snapshot: None, + storage, + inner, + } + } +} diff --git a/core/lib/vm_interface/src/utils/mod.rs b/core/lib/vm_interface/src/utils/mod.rs new file mode 100644 index 000000000000..80a51c7b144f --- /dev/null +++ b/core/lib/vm_interface/src/utils/mod.rs @@ -0,0 +1,9 @@ +//! Miscellaneous VM utils. + +pub use self::{ + dump::VmDump, + shadow::{DivergenceErrors, DivergenceHandler, ShadowVm}, +}; + +mod dump; +mod shadow; diff --git a/core/lib/vm_interface/src/utils/shadow.rs b/core/lib/vm_interface/src/utils/shadow.rs new file mode 100644 index 000000000000..7dfe31f6b686 --- /dev/null +++ b/core/lib/vm_interface/src/utils/shadow.rs @@ -0,0 +1,475 @@ +use std::{ + cell::RefCell, + collections::{BTreeMap, BTreeSet}, + fmt, + sync::Arc, +}; + +use zksync_types::{StorageKey, StorageLog, StorageLogWithPreviousValue, Transaction}; + +use super::dump::{DumpingVm, VmDump}; +use crate::{ + storage::{ReadStorage, StoragePtr, StorageView}, + BytecodeCompressionResult, CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, + SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, + VmInterfaceHistoryEnabled, VmMemoryMetrics, VmTrackingContracts, +}; + +/// Handler for VM divergences. +#[derive(Clone)] +pub struct DivergenceHandler(Arc); + +impl fmt::Debug for DivergenceHandler { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter + .debug_tuple("DivergenceHandler") + .field(&"_") + .finish() + } +} + +/// Default handler that panics. +impl Default for DivergenceHandler { + fn default() -> Self { + Self(Arc::new(|err, _| { + // There's no easy way to output the VM dump; it's too large to be logged. + panic!("{err}"); + })) + } +} + +impl DivergenceHandler { + /// Creates a new handler from the provided closure. + pub fn new(f: impl Fn(DivergenceErrors, VmDump) + Send + Sync + 'static) -> Self { + Self(Arc::new(f)) + } + + fn handle(&self, err: DivergenceErrors, dump: VmDump) { + self.0(err, dump); + } +} + +#[derive(Debug)] +struct VmWithReporting { + vm: Shadow, + divergence_handler: DivergenceHandler, +} + +impl VmWithReporting { + fn report(self, err: DivergenceErrors, dump: VmDump) { + tracing::error!("{err}"); + self.divergence_handler.handle(err, dump); + tracing::warn!( + "New VM is dropped; following VM actions will be executed only on the main VM" + ); + } +} + +/// Shadowed VM that executes 2 VMs for each operation and compares their outputs. +/// +/// If a divergence is detected, the VM state is dumped using [a pluggable handler](Self::set_dump_handler()), +/// after which the VM drops the shadowed VM (since it's assumed that its state can contain arbitrary garbage at this point). +#[derive(Debug)] +pub struct ShadowVm { + main: DumpingVm, + shadow: RefCell>>, +} + +impl ShadowVm +where + S: ReadStorage, + Main: VmTrackingContracts, + Shadow: VmInterface, +{ + /// Sets the divergence handler to be used by this VM. + pub fn set_divergence_handler(&mut self, handler: DivergenceHandler) { + if let Some(shadow) = self.shadow.get_mut() { + shadow.divergence_handler = handler; + } + } + + /// Mutable ref is not necessary, but it automatically drops potential borrows. + fn report(&mut self, err: DivergenceErrors) { + self.report_shared(err); + } + + /// The caller is responsible for dropping any `shadow` borrows beforehand. + fn report_shared(&self, err: DivergenceErrors) { + self.shadow + .take() + .unwrap() + .report(err, self.main.dump_state()); + } + + /// Dumps the current VM state. + pub fn dump_state(&self) -> VmDump { + self.main.dump_state() + } +} + +impl ShadowVm +where + S: ReadStorage, + Main: VmFactory> + VmTrackingContracts, + Shadow: VmInterface, +{ + /// Creates a VM with a custom shadow storage. + pub fn with_custom_shadow( + batch_env: L1BatchEnv, + system_env: SystemEnv, + storage: StoragePtr>, + shadow_storage: StoragePtr, + ) -> Self + where + Shadow: VmFactory, + { + let main = DumpingVm::new(batch_env.clone(), system_env.clone(), storage.clone()); + let shadow = Shadow::new(batch_env.clone(), system_env.clone(), shadow_storage); + let shadow = VmWithReporting { + vm: shadow, + divergence_handler: DivergenceHandler::default(), + }; + Self { + main, + shadow: RefCell::new(Some(shadow)), + } + } +} + +impl VmFactory> for ShadowVm +where + S: ReadStorage, + Main: VmFactory> + VmTrackingContracts, + Shadow: VmFactory>, +{ + fn new( + batch_env: L1BatchEnv, + system_env: SystemEnv, + storage: StoragePtr>, + ) -> Self { + Self::with_custom_shadow(batch_env, system_env, storage.clone(), storage) + } +} + +/// **Important.** This doesn't properly handle tracers; they are not passed to the shadow VM! +impl VmInterface for ShadowVm +where + S: ReadStorage, + Main: VmTrackingContracts, + Shadow: VmInterface, +{ + type TracerDispatcher =
::TracerDispatcher; + + fn push_transaction(&mut self, tx: Transaction) { + if let Some(shadow) = self.shadow.get_mut() { + shadow.vm.push_transaction(tx.clone()); + } + self.main.push_transaction(tx); + } + + fn inspect( + &mut self, + dispatcher: Self::TracerDispatcher, + execution_mode: VmExecutionMode, + ) -> VmExecutionResultAndLogs { + let main_result = self.main.inspect(dispatcher, execution_mode); + if let Some(shadow) = self.shadow.get_mut() { + let shadow_result = shadow + .vm + .inspect(Shadow::TracerDispatcher::default(), execution_mode); + let mut errors = DivergenceErrors::new(); + errors.check_results_match(&main_result, &shadow_result); + + if let Err(err) = errors.into_result() { + let ctx = format!("executing VM with mode {execution_mode:?}"); + self.report(err.context(ctx)); + } + } + main_result + } + + fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { + self.main.start_new_l2_block(l2_block_env); + if let Some(shadow) = self.shadow.get_mut() { + shadow.vm.start_new_l2_block(l2_block_env); + } + } + + fn inspect_transaction_with_bytecode_compression( + &mut self, + tracer: Self::TracerDispatcher, + tx: Transaction, + with_compression: bool, + ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { + let tx_hash = tx.hash(); + let (main_bytecodes_result, main_tx_result) = self + .main + .inspect_transaction_with_bytecode_compression(tracer, tx.clone(), with_compression); + // Extend lifetime to `'static` so that the result isn't mutably borrowed from the main VM. + // Unfortunately, there's no way to express that this borrow is actually immutable, which would allow not extending the lifetime unless there's a divergence. + let main_bytecodes_result = + main_bytecodes_result.map(|bytecodes| bytecodes.into_owned().into()); + + if let Some(shadow) = self.shadow.get_mut() { + let shadow_result = shadow.vm.inspect_transaction_with_bytecode_compression( + Shadow::TracerDispatcher::default(), + tx, + with_compression, + ); + let mut errors = DivergenceErrors::new(); + errors.check_results_match(&main_tx_result, &shadow_result.1); + if let Err(err) = errors.into_result() { + let ctx = format!( + "inspecting transaction {tx_hash:?}, with_compression={with_compression:?}" + ); + self.report(err.context(ctx)); + } + } + (main_bytecodes_result, main_tx_result) + } + + fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { + self.main.record_vm_memory_metrics() + } + + fn finish_batch(&mut self) -> FinishedL1Batch { + let main_batch = self.main.finish_batch(); + if let Some(shadow) = self.shadow.get_mut() { + let shadow_batch = shadow.vm.finish_batch(); + let mut errors = DivergenceErrors::new(); + errors.check_results_match( + &main_batch.block_tip_execution_result, + &shadow_batch.block_tip_execution_result, + ); + errors.check_final_states_match( + &main_batch.final_execution_state, + &shadow_batch.final_execution_state, + ); + errors.check_match( + "final_bootloader_memory", + &main_batch.final_bootloader_memory, + &shadow_batch.final_bootloader_memory, + ); + errors.check_match( + "pubdata_input", + &main_batch.pubdata_input, + &shadow_batch.pubdata_input, + ); + errors.check_match( + "state_diffs", + &main_batch.state_diffs, + &shadow_batch.state_diffs, + ); + + if let Err(err) = errors.into_result() { + self.report(err); + } + } + main_batch + } +} + +#[derive(Debug)] +pub struct DivergenceErrors { + divergences: Vec, + context: Option, +} + +impl fmt::Display for DivergenceErrors { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + if let Some(context) = &self.context { + write!( + formatter, + "VM execution diverged: {context}: [{}]", + self.divergences.join(", ") + ) + } else { + write!( + formatter, + "VM execution diverged: [{}]", + self.divergences.join(", ") + ) + } + } +} + +impl DivergenceErrors { + fn new() -> Self { + Self { + divergences: vec![], + context: None, + } + } + + fn context(mut self, context: String) -> Self { + self.context = Some(context); + self + } + + fn check_results_match( + &mut self, + main_result: &VmExecutionResultAndLogs, + shadow_result: &VmExecutionResultAndLogs, + ) { + self.check_match("result", &main_result.result, &shadow_result.result); + self.check_match( + "logs.events", + &main_result.logs.events, + &shadow_result.logs.events, + ); + self.check_match( + "logs.system_l2_to_l1_logs", + &main_result.logs.system_l2_to_l1_logs, + &shadow_result.logs.system_l2_to_l1_logs, + ); + self.check_match( + "logs.user_l2_to_l1_logs", + &main_result.logs.user_l2_to_l1_logs, + &shadow_result.logs.user_l2_to_l1_logs, + ); + let main_logs = UniqueStorageLogs::new(&main_result.logs.storage_logs); + let shadow_logs = UniqueStorageLogs::new(&shadow_result.logs.storage_logs); + self.check_match("logs.storage_logs", &main_logs, &shadow_logs); + self.check_match("refunds", &main_result.refunds, &shadow_result.refunds); + self.check_match( + "statistics.circuit_statistic", + &main_result.statistics.circuit_statistic, + &shadow_result.statistics.circuit_statistic, + ); + self.check_match( + "gas_remaining", + &main_result.statistics.gas_remaining, + &shadow_result.statistics.gas_remaining, + ); + } + + fn check_match(&mut self, context: &str, main: &T, shadow: &T) { + if main != shadow { + let comparison = pretty_assertions::Comparison::new(main, shadow); + let err = format!("`{context}` mismatch: {comparison}"); + self.divergences.push(err); + } + } + + fn check_final_states_match( + &mut self, + main: &CurrentExecutionState, + shadow: &CurrentExecutionState, + ) { + self.check_match("final_state.events", &main.events, &shadow.events); + self.check_match( + "final_state.user_l2_to_l1_logs", + &main.user_l2_to_l1_logs, + &shadow.user_l2_to_l1_logs, + ); + self.check_match( + "final_state.system_logs", + &main.system_logs, + &shadow.system_logs, + ); + self.check_match( + "final_state.storage_refunds", + &main.storage_refunds, + &shadow.storage_refunds, + ); + self.check_match( + "final_state.pubdata_costs", + &main.pubdata_costs, + &shadow.pubdata_costs, + ); + self.check_match( + "final_state.used_contract_hashes", + &main.used_contract_hashes.iter().collect::>(), + &shadow.used_contract_hashes.iter().collect::>(), + ); + + let main_deduplicated_logs = Self::gather_logs(&main.deduplicated_storage_logs); + let shadow_deduplicated_logs = Self::gather_logs(&shadow.deduplicated_storage_logs); + self.check_match( + "deduplicated_storage_logs", + &main_deduplicated_logs, + &shadow_deduplicated_logs, + ); + } + + fn gather_logs(logs: &[StorageLog]) -> BTreeMap { + logs.iter() + .filter(|log| log.is_write()) + .map(|log| (log.key, log)) + .collect() + } + + fn into_result(self) -> Result<(), Self> { + if self.divergences.is_empty() { + Ok(()) + } else { + Err(self) + } + } +} + +// The new VM doesn't support read logs yet, doesn't order logs by access and deduplicates them +// inside the VM, hence this auxiliary struct. +#[derive(PartialEq)] +struct UniqueStorageLogs(BTreeMap); + +impl fmt::Debug for UniqueStorageLogs { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut map = formatter.debug_map(); + for log in self.0.values() { + map.entry( + &format!("{:?}:{:?}", log.log.key.address(), log.log.key.key()), + &format!("{:?} -> {:?}", log.previous_value, log.log.value), + ); + } + map.finish() + } +} + +impl UniqueStorageLogs { + fn new(logs: &[StorageLogWithPreviousValue]) -> Self { + let mut unique_logs = BTreeMap::::new(); + for log in logs { + if !log.log.is_write() { + continue; + } + if let Some(existing_log) = unique_logs.get_mut(&log.log.key) { + existing_log.log.value = log.log.value; + } else { + unique_logs.insert(log.log.key, *log); + } + } + + // Remove no-op write logs (i.e., X -> X writes) produced by the old VM. + unique_logs.retain(|_, log| log.previous_value != log.log.value); + Self(unique_logs) + } +} + +impl VmInterfaceHistoryEnabled for ShadowVm +where + S: ReadStorage, + Main: VmInterfaceHistoryEnabled + VmTrackingContracts, + Shadow: VmInterfaceHistoryEnabled, +{ + fn make_snapshot(&mut self) { + if let Some(shadow) = self.shadow.get_mut() { + shadow.vm.make_snapshot(); + } + self.main.make_snapshot(); + } + + fn rollback_to_the_latest_snapshot(&mut self) { + if let Some(shadow) = self.shadow.get_mut() { + shadow.vm.rollback_to_the_latest_snapshot(); + } + self.main.rollback_to_the_latest_snapshot(); + } + + fn pop_snapshot_no_rollback(&mut self) { + if let Some(shadow) = self.shadow.get_mut() { + shadow.vm.pop_snapshot_no_rollback(); + } + self.main.pop_snapshot_no_rollback(); + } +} diff --git a/core/lib/vm_interface/src/vm.rs b/core/lib/vm_interface/src/vm.rs index f70be52bd86a..a380f0659e67 100644 --- a/core/lib/vm_interface/src/vm.rs +++ b/core/lib/vm_interface/src/vm.rs @@ -11,7 +11,7 @@ //! Generally speaking, in most cases, the tracer dispatcher is a wrapper around `Vec>`, //! where `VmTracer` is a trait implemented for a specific VM version. -use zksync_types::Transaction; +use zksync_types::{Transaction, H256}; use crate::{ storage::StoragePtr, BytecodeCompressionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, @@ -103,3 +103,9 @@ pub trait VmInterfaceHistoryEnabled: VmInterface { /// (i.e., the VM must not panic in this case). fn pop_snapshot_no_rollback(&mut self); } + +/// VM that tracks decommitment of bytecodes during execution. This is required to create a [`VmDump`]. +pub trait VmTrackingContracts: VmInterface { + /// Returns hashes of all decommitted bytecodes. + fn used_contract_hashes(&self) -> Vec; +} diff --git a/core/node/node_framework/src/implementations/layers/vm_runner/playground.rs b/core/node/node_framework/src/implementations/layers/vm_runner/playground.rs index ee1be98319b3..e4eb8b38a690 100644 --- a/core/node/node_framework/src/implementations/layers/vm_runner/playground.rs +++ b/core/node/node_framework/src/implementations/layers/vm_runner/playground.rs @@ -13,6 +13,7 @@ use zksync_vm_runner::{ use crate::{ implementations::resources::{ healthcheck::AppHealthCheckResource, + object_store::ObjectStoreResource, pools::{PoolResource, ReplicaPool}, }, StopReceiver, Task, TaskId, WiringError, WiringLayer, @@ -38,6 +39,7 @@ impl VmPlaygroundLayer { pub struct Input { // We use a replica pool because VM playground doesn't write anything to the DB by design. pub replica_pool: PoolResource, + pub dumps_object_store: Option, #[context(default)] pub app_health: AppHealthCheckResource, } @@ -65,6 +67,7 @@ impl WiringLayer for VmPlaygroundLayer { async fn wire(self, input: Self::Input) -> Result { let Input { replica_pool, + dumps_object_store, app_health, } = input; @@ -95,6 +98,7 @@ impl WiringLayer for VmPlaygroundLayer { }; let (playground, tasks) = VmPlayground::new( connection_pool, + dumps_object_store.map(|resource| resource.0), self.config.fast_vm_mode, storage, self.zksync_network_id, diff --git a/core/node/vm_runner/Cargo.toml b/core/node/vm_runner/Cargo.toml index ceb11a982477..9c235ad6b291 100644 --- a/core/node/vm_runner/Cargo.toml +++ b/core/node/vm_runner/Cargo.toml @@ -24,6 +24,7 @@ zksync_vm_executor.workspace = true zksync_health_check.workspace = true serde.workspace = true +serde_json.workspace = true tokio = { workspace = true, features = ["time"] } anyhow.workspace = true async-trait.workspace = true diff --git a/core/node/vm_runner/src/impls/playground.rs b/core/node/vm_runner/src/impls/playground.rs index dc21d5a32036..4bab43d1d0f4 100644 --- a/core/node/vm_runner/src/impls/playground.rs +++ b/core/node/vm_runner/src/impls/playground.rs @@ -1,4 +1,5 @@ use std::{ + hash::{DefaultHasher, Hash, Hasher}, io, num::NonZeroU32, path::{Path, PathBuf}, @@ -14,10 +15,14 @@ use tokio::{ }; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_health_check::{Health, HealthStatus, HealthUpdater, ReactiveHealthCheck}; +use zksync_object_store::{Bucket, ObjectStore}; use zksync_state::RocksdbStorage; use zksync_types::{vm::FastVmMode, L1BatchNumber, L2ChainId}; use zksync_vm_executor::batch::MainBatchExecutorFactory; -use zksync_vm_interface::{L1BatchEnv, L2BlockEnv, SystemEnv}; +use zksync_vm_interface::{ + utils::{DivergenceHandler, VmDump}, + L1BatchEnv, L2BlockEnv, SystemEnv, +}; use crate::{ storage::{PostgresLoader, StorageLoader}, @@ -95,6 +100,7 @@ impl VmPlayground { /// Creates a new playground. pub async fn new( pool: ConnectionPool, + dumps_object_store: Option>, vm_mode: FastVmMode, storage: VmPlaygroundStorageOptions, chain_id: L2ChainId, @@ -130,6 +136,22 @@ impl VmPlayground { let mut batch_executor_factory = MainBatchExecutorFactory::new(false, false); batch_executor_factory.set_fast_vm_mode(vm_mode); batch_executor_factory.observe_storage_metrics(); + let handle = tokio::runtime::Handle::current(); + if let Some(store) = dumps_object_store { + tracing::info!("Using object store for VM dumps: {store:?}"); + + let handler = DivergenceHandler::new(move |err, dump| { + let err_message = err.to_string(); + if let Err(err) = handle.block_on(Self::dump_vm_state(&*store, &err_message, &dump)) + { + let l1_batch_number = dump.l1_batch_number(); + tracing::error!( + "Saving VM dump for L1 batch #{l1_batch_number} failed: {err:#}" + ); + } + }); + batch_executor_factory.set_divergence_handler(handler); + } let io = VmPlaygroundIo { cursor_file_path, @@ -176,6 +198,27 @@ impl VmPlayground { )) } + async fn dump_vm_state( + object_store: &dyn ObjectStore, + err_message: &str, + dump: &VmDump, + ) -> anyhow::Result<()> { + // Deduplicate VM dumps by the error hash so that we don't create a lot of dumps for the same error. + let mut hasher = DefaultHasher::new(); + err_message.hash(&mut hasher); + let err_hash = hasher.finish(); + let batch_number = dump.l1_batch_number().0; + let dump_filename = format!("shadow_vm_dump_batch{batch_number:08}_{err_hash:x}.json"); + + tracing::info!("Dumping diverged VM state to `{dump_filename}`"); + let dump = serde_json::to_string(&dump).context("failed serializing VM dump")?; + object_store + .put_raw(Bucket::VmDumps, &dump_filename, dump.into_bytes()) + .await + .context("failed putting VM dump to object store")?; + Ok(()) + } + /// Returns a health check for this component. pub fn health_check(&self) -> ReactiveHealthCheck { self.io.health_updater.subscribe() diff --git a/core/node/vm_runner/src/tests/playground.rs b/core/node/vm_runner/src/tests/playground.rs index aaaf4b45b1a4..92cd149f405f 100644 --- a/core/node/vm_runner/src/tests/playground.rs +++ b/core/node/vm_runner/src/tests/playground.rs @@ -74,6 +74,7 @@ async fn run_playground( let (playground, playground_tasks) = VmPlayground::new( pool.clone(), + None, FastVmMode::Shadow, storage, genesis_params.config().l2_chain_id, @@ -255,6 +256,7 @@ async fn using_larger_window_size(window_size: u32) { }; let (playground, playground_tasks) = VmPlayground::new( pool.clone(), + None, FastVmMode::Shadow, VmPlaygroundStorageOptions::from(&rocksdb_dir), genesis_params.config().l2_chain_id, diff --git a/core/tests/vm-benchmark/src/vm.rs b/core/tests/vm-benchmark/src/vm.rs index f3c00667c7dd..55196413de89 100644 --- a/core/tests/vm-benchmark/src/vm.rs +++ b/core/tests/vm-benchmark/src/vm.rs @@ -88,7 +88,7 @@ impl BenchmarkingVmFactory for Fast { system_env: SystemEnv, storage: &'static InMemoryStorage, ) -> Self::Instance { - vm_fast::Vm::new(batch_env, system_env, storage) + vm_fast::Vm::custom(batch_env, system_env, storage) } } diff --git a/docs/guides/external-node/00_quick_start.md b/docs/guides/external-node/00_quick_start.md index 776e8a56e497..5eb601e3d590 100644 --- a/docs/guides/external-node/00_quick_start.md +++ b/docs/guides/external-node/00_quick_start.md @@ -65,8 +65,8 @@ The HTTP JSON-RPC API can be accessed on port `3060` and WebSocket API can be ac > [!NOTE] > -> To stop historical DB growth, you can enable DB pruning by uncommenting `EN_PRUNING_ENABLED: true` in docker compose file, -> you can read more about pruning in +> To stop historical DB growth, you can enable DB pruning by uncommenting `EN_PRUNING_ENABLED: true` in docker compose +> file, you can read more about pruning in > [08_pruning.md](https://github.com/matter-labs/zksync-era/blob/main/docs/guides/external-node/08_pruning.md) - 32 GB of RAM and a relatively modern CPU diff --git a/prover/Cargo.lock b/prover/Cargo.lock index b943de65ce5f..38c2ca162c43 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -7685,7 +7685,6 @@ dependencies = [ "hex", "itertools 0.10.5", "once_cell", - "pretty_assertions", "thiserror", "tracing", "vise", @@ -8132,6 +8131,7 @@ dependencies = [ "anyhow", "async-trait", "hex", + "pretty_assertions", "serde", "thiserror", "tracing", From 1cf959da12d2b6369f34a67ccc2575b4b173d75a Mon Sep 17 00:00:00 2001 From: Artem Fomiuk <88630083+Artemka374@users.noreply.github.com> Date: Fri, 20 Sep 2024 14:12:13 +0300 Subject: [PATCH 114/116] feat(prover): Use query macro instead string literals for queries (#2930) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ In some places our sqlx queries are using string literals instead of query macros. This PR changes this behaviour in places it is possible. ## Why ❔ To prevent possible SQL injections. It also will cache the queries, which should make them faster. ## Checklist - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. - [ ] Code has been formatted via `zk fmt` and `zk lint`. --- ...37891598dc79b5ed11258b2c90c3f282929ad.json | 16 +++ ...0d001cdf5bc7ba988b742571ec90a938434e3.json | 17 +++ ...a468057599be1e6c6c96a947c33df53a68224.json | 15 +++ ...2e61157bf58aec70903623afc9da24d46a336.json | 16 +++ ...1b931c0d8dbc6835dfac20107ea7412ce9fbb.json | 15 +++ ...9a67936d572f8046d3a1c7a4f100ff209d81d.json | 18 +++ ...6ea738d209eb07e7a6bcbdb33e25b3347a08c.json | 47 ++++++++ ...07edd87cb16a79c13e7d4291d99736e51d3b9.json | 15 +++ ...4d935674a5aff2f34421d753b4d1a9dea5b12.json | 47 ++++++++ ...700302981be0afef31a8864613484f8521f9e.json | 19 +++ ...d68c598eada8e60938a8e4b5cd32b53f5a574.json | 47 ++++++++ .../crates/lib/prover_dal/src/cli_test_dal.rs | 113 ++++++++++-------- .../src/fri_witness_generator_dal.rs | 111 +++++++++-------- 13 files changed, 392 insertions(+), 104 deletions(-) create mode 100644 prover/crates/lib/prover_dal/.sqlx/query-0eac6f7b2d799059328584029b437891598dc79b5ed11258b2c90c3f282929ad.json create mode 100644 prover/crates/lib/prover_dal/.sqlx/query-1926cf432237684de2383179a6d0d001cdf5bc7ba988b742571ec90a938434e3.json create mode 100644 prover/crates/lib/prover_dal/.sqlx/query-39f60c638d445c5dbf23e01fd89a468057599be1e6c6c96a947c33df53a68224.json create mode 100644 prover/crates/lib/prover_dal/.sqlx/query-3a9ffd4d88f2cfac22835aac2512e61157bf58aec70903623afc9da24d46a336.json create mode 100644 prover/crates/lib/prover_dal/.sqlx/query-3bb8fbd9e83703887e0a3c196031b931c0d8dbc6835dfac20107ea7412ce9fbb.json create mode 100644 prover/crates/lib/prover_dal/.sqlx/query-434f7cb51a7d22948cd26e962679a67936d572f8046d3a1c7a4f100ff209d81d.json create mode 100644 prover/crates/lib/prover_dal/.sqlx/query-548414f8148740c991c345e5fd46ea738d209eb07e7a6bcbdb33e25b3347a08c.json create mode 100644 prover/crates/lib/prover_dal/.sqlx/query-63cf7038e6c48af8ed9afc7d6ea07edd87cb16a79c13e7d4291d99736e51d3b9.json create mode 100644 prover/crates/lib/prover_dal/.sqlx/query-75c1affbca0901edd5d0e2f12ef4d935674a5aff2f34421d753b4d1a9dea5b12.json create mode 100644 prover/crates/lib/prover_dal/.sqlx/query-aabcfa9005b8e1d84cfa083a47a700302981be0afef31a8864613484f8521f9e.json create mode 100644 prover/crates/lib/prover_dal/.sqlx/query-c19fc4c8e4b3a3ef4f9c0f4c22ed68c598eada8e60938a8e4b5cd32b53f5a574.json diff --git a/prover/crates/lib/prover_dal/.sqlx/query-0eac6f7b2d799059328584029b437891598dc79b5ed11258b2c90c3f282929ad.json b/prover/crates/lib/prover_dal/.sqlx/query-0eac6f7b2d799059328584029b437891598dc79b5ed11258b2c90c3f282929ad.json new file mode 100644 index 000000000000..61518273b4d3 --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-0eac6f7b2d799059328584029b437891598dc79b5ed11258b2c90c3f282929ad.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n leaf_aggregation_witness_jobs_fri (\n l1_batch_number,\n circuit_id,\n status,\n number_of_basic_circuits,\n created_at,\n updated_at\n )\n VALUES\n ($1, $2, 'waiting_for_proofs', 2, NOW(), NOW())\n ON CONFLICT (l1_batch_number, circuit_id) DO\n UPDATE\n SET status = $3\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int2", + "Text" + ] + }, + "nullable": [] + }, + "hash": "0eac6f7b2d799059328584029b437891598dc79b5ed11258b2c90c3f282929ad" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-1926cf432237684de2383179a6d0d001cdf5bc7ba988b742571ec90a938434e3.json b/prover/crates/lib/prover_dal/.sqlx/query-1926cf432237684de2383179a6d0d001cdf5bc7ba988b742571ec90a938434e3.json new file mode 100644 index 000000000000..4015a22ff3fd --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-1926cf432237684de2383179a6d0d001cdf5bc7ba988b742571ec90a938434e3.json @@ -0,0 +1,17 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE leaf_aggregation_witness_jobs_fri \n SET status = $1, attempts = $2\n WHERE l1_batch_number = $3\n AND circuit_id = $4", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Int2", + "Int8", + "Int2" + ] + }, + "nullable": [] + }, + "hash": "1926cf432237684de2383179a6d0d001cdf5bc7ba988b742571ec90a938434e3" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-39f60c638d445c5dbf23e01fd89a468057599be1e6c6c96a947c33df53a68224.json b/prover/crates/lib/prover_dal/.sqlx/query-39f60c638d445c5dbf23e01fd89a468057599be1e6c6c96a947c33df53a68224.json new file mode 100644 index 000000000000..5cec4d7d7d03 --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-39f60c638d445c5dbf23e01fd89a468057599be1e6c6c96a947c33df53a68224.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n recursion_tip_witness_jobs_fri (\n l1_batch_number,\n status,\n number_of_final_node_jobs,\n created_at,\n updated_at\n )\n VALUES\n ($1, 'waiting_for_proofs',1, NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO\n UPDATE\n SET status = $2\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Text" + ] + }, + "nullable": [] + }, + "hash": "39f60c638d445c5dbf23e01fd89a468057599be1e6c6c96a947c33df53a68224" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-3a9ffd4d88f2cfac22835aac2512e61157bf58aec70903623afc9da24d46a336.json b/prover/crates/lib/prover_dal/.sqlx/query-3a9ffd4d88f2cfac22835aac2512e61157bf58aec70903623afc9da24d46a336.json new file mode 100644 index 000000000000..063ae8fc90a3 --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-3a9ffd4d88f2cfac22835aac2512e61157bf58aec70903623afc9da24d46a336.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n node_aggregation_witness_jobs_fri (\n l1_batch_number,\n circuit_id,\n status,\n created_at,\n updated_at\n )\n VALUES\n ($1, $2, 'waiting_for_proofs', NOW(), NOW())\n ON CONFLICT (l1_batch_number, circuit_id, depth) DO\n UPDATE\n SET status = $3\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int2", + "Text" + ] + }, + "nullable": [] + }, + "hash": "3a9ffd4d88f2cfac22835aac2512e61157bf58aec70903623afc9da24d46a336" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-3bb8fbd9e83703887e0a3c196031b931c0d8dbc6835dfac20107ea7412ce9fbb.json b/prover/crates/lib/prover_dal/.sqlx/query-3bb8fbd9e83703887e0a3c196031b931c0d8dbc6835dfac20107ea7412ce9fbb.json new file mode 100644 index 000000000000..693905084151 --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-3bb8fbd9e83703887e0a3c196031b931c0d8dbc6835dfac20107ea7412ce9fbb.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n proof_compression_jobs_fri (\n l1_batch_number,\n status,\n created_at,\n updated_at\n )\n VALUES\n ($1, $2, NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO\n UPDATE\n SET status = $2\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Text" + ] + }, + "nullable": [] + }, + "hash": "3bb8fbd9e83703887e0a3c196031b931c0d8dbc6835dfac20107ea7412ce9fbb" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-434f7cb51a7d22948cd26e962679a67936d572f8046d3a1c7a4f100ff209d81d.json b/prover/crates/lib/prover_dal/.sqlx/query-434f7cb51a7d22948cd26e962679a67936d572f8046d3a1c7a4f100ff209d81d.json new file mode 100644 index 000000000000..7615523f92f1 --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-434f7cb51a7d22948cd26e962679a67936d572f8046d3a1c7a4f100ff209d81d.json @@ -0,0 +1,18 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE prover_jobs_fri SET status = $1\n WHERE l1_batch_number = $2\n AND sequence_number = $3\n AND aggregation_round = $4\n AND circuit_id = $5", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Int8", + "Int4", + "Int2", + "Int2" + ] + }, + "nullable": [] + }, + "hash": "434f7cb51a7d22948cd26e962679a67936d572f8046d3a1c7a4f100ff209d81d" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-548414f8148740c991c345e5fd46ea738d209eb07e7a6bcbdb33e25b3347a08c.json b/prover/crates/lib/prover_dal/.sqlx/query-548414f8148740c991c345e5fd46ea738d209eb07e7a6bcbdb33e25b3347a08c.json new file mode 100644 index 000000000000..8f5b046b974f --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-548414f8148740c991c345e5fd46ea738d209eb07e7a6bcbdb33e25b3347a08c.json @@ -0,0 +1,47 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE witness_inputs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n l1_batch_number = $1\n AND attempts >= $2\n AND (status = 'in_progress' OR status = 'failed')\n RETURNING\n l1_batch_number,\n status,\n attempts,\n error,\n picked_by\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "status", + "type_info": "Text" + }, + { + "ordinal": 2, + "name": "attempts", + "type_info": "Int2" + }, + { + "ordinal": 3, + "name": "error", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "picked_by", + "type_info": "Text" + } + ], + "parameters": { + "Left": [ + "Int8", + "Int2" + ] + }, + "nullable": [ + false, + false, + false, + true, + true + ] + }, + "hash": "548414f8148740c991c345e5fd46ea738d209eb07e7a6bcbdb33e25b3347a08c" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-63cf7038e6c48af8ed9afc7d6ea07edd87cb16a79c13e7d4291d99736e51d3b9.json b/prover/crates/lib/prover_dal/.sqlx/query-63cf7038e6c48af8ed9afc7d6ea07edd87cb16a79c13e7d4291d99736e51d3b9.json new file mode 100644 index 000000000000..208b23d939f8 --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-63cf7038e6c48af8ed9afc7d6ea07edd87cb16a79c13e7d4291d99736e51d3b9.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n scheduler_witness_jobs_fri (\n l1_batch_number,\n scheduler_partial_input_blob_url,\n status,\n created_at,\n updated_at\n )\n VALUES\n ($1, '', 'waiting_for_proofs', NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO\n UPDATE\n SET status = $2\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Text" + ] + }, + "nullable": [] + }, + "hash": "63cf7038e6c48af8ed9afc7d6ea07edd87cb16a79c13e7d4291d99736e51d3b9" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-75c1affbca0901edd5d0e2f12ef4d935674a5aff2f34421d753b4d1a9dea5b12.json b/prover/crates/lib/prover_dal/.sqlx/query-75c1affbca0901edd5d0e2f12ef4d935674a5aff2f34421d753b4d1a9dea5b12.json new file mode 100644 index 000000000000..14463ecbe426 --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-75c1affbca0901edd5d0e2f12ef4d935674a5aff2f34421d753b4d1a9dea5b12.json @@ -0,0 +1,47 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE scheduler_witness_jobs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n l1_batch_number = $1\n AND attempts >= $2\n AND (status = 'in_progress' OR status = 'failed')\n RETURNING\n l1_batch_number,\n status,\n attempts,\n error,\n picked_by\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "status", + "type_info": "Text" + }, + { + "ordinal": 2, + "name": "attempts", + "type_info": "Int2" + }, + { + "ordinal": 3, + "name": "error", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "picked_by", + "type_info": "Text" + } + ], + "parameters": { + "Left": [ + "Int8", + "Int2" + ] + }, + "nullable": [ + false, + false, + false, + true, + true + ] + }, + "hash": "75c1affbca0901edd5d0e2f12ef4d935674a5aff2f34421d753b4d1a9dea5b12" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-aabcfa9005b8e1d84cfa083a47a700302981be0afef31a8864613484f8521f9e.json b/prover/crates/lib/prover_dal/.sqlx/query-aabcfa9005b8e1d84cfa083a47a700302981be0afef31a8864613484f8521f9e.json new file mode 100644 index 000000000000..3d60050c92ed --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-aabcfa9005b8e1d84cfa083a47a700302981be0afef31a8864613484f8521f9e.json @@ -0,0 +1,19 @@ +{ + "db_name": "PostgreSQL", + "query": "UPDATE prover_jobs_fri \n SET status = $1, attempts = $2\n WHERE l1_batch_number = $3\n AND sequence_number =$4\n AND aggregation_round = $5\n AND circuit_id = $6", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Int2", + "Int8", + "Int4", + "Int2", + "Int2" + ] + }, + "nullable": [] + }, + "hash": "aabcfa9005b8e1d84cfa083a47a700302981be0afef31a8864613484f8521f9e" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-c19fc4c8e4b3a3ef4f9c0f4c22ed68c598eada8e60938a8e4b5cd32b53f5a574.json b/prover/crates/lib/prover_dal/.sqlx/query-c19fc4c8e4b3a3ef4f9c0f4c22ed68c598eada8e60938a8e4b5cd32b53f5a574.json new file mode 100644 index 000000000000..3c4c8d7a29f3 --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-c19fc4c8e4b3a3ef4f9c0f4c22ed68c598eada8e60938a8e4b5cd32b53f5a574.json @@ -0,0 +1,47 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE recursion_tip_witness_jobs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n l1_batch_number = $1\n AND attempts >= $2\n AND (status = 'in_progress' OR status = 'failed')\n RETURNING\n l1_batch_number,\n status,\n attempts,\n error,\n picked_by\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "status", + "type_info": "Text" + }, + { + "ordinal": 2, + "name": "attempts", + "type_info": "Int2" + }, + { + "ordinal": 3, + "name": "error", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "picked_by", + "type_info": "Text" + } + ], + "parameters": { + "Left": [ + "Int8", + "Int2" + ] + }, + "nullable": [ + false, + false, + false, + true, + true + ] + }, + "hash": "c19fc4c8e4b3a3ef4f9c0f4c22ed68c598eada8e60938a8e4b5cd32b53f5a574" +} diff --git a/prover/crates/lib/prover_dal/src/cli_test_dal.rs b/prover/crates/lib/prover_dal/src/cli_test_dal.rs index 069fa9c6a41c..19fe0e4f57b0 100644 --- a/prover/crates/lib/prover_dal/src/cli_test_dal.rs +++ b/prover/crates/lib/prover_dal/src/cli_test_dal.rs @@ -20,14 +20,18 @@ impl CliTestDal<'_, '_> { batch_number: L1BatchNumber, sequence_number: usize, ) { - sqlx::query(&format!( - "UPDATE prover_jobs_fri SET status = '{}' - WHERE l1_batch_number = {} - AND sequence_number = {} - AND aggregation_round = {} - AND circuit_id = {}", - status, batch_number.0, sequence_number, aggregation_round, circuit_id, - )) + sqlx::query!( + "UPDATE prover_jobs_fri SET status = $1 + WHERE l1_batch_number = $2 + AND sequence_number = $3 + AND aggregation_round = $4 + AND circuit_id = $5", + status.to_string(), + batch_number.0 as i64, + sequence_number as i64, + aggregation_round as i16, + circuit_id as i64, + ) .execute(self.storage.conn()) .await .unwrap(); @@ -39,7 +43,7 @@ impl CliTestDal<'_, '_> { batch_number: L1BatchNumber, circuit_id: u8, ) { - sqlx::query(&format!( + sqlx::query!( " INSERT INTO leaf_aggregation_witness_jobs_fri ( @@ -51,13 +55,15 @@ impl CliTestDal<'_, '_> { updated_at ) VALUES - ({}, {}, 'waiting_for_proofs', 2, NOW(), NOW()) + ($1, $2, 'waiting_for_proofs', 2, NOW(), NOW()) ON CONFLICT (l1_batch_number, circuit_id) DO UPDATE - SET status = '{}' + SET status = $3 ", - batch_number.0, circuit_id, status - )) + batch_number.0 as i64, + circuit_id as i16, + status.to_string() + ) .execute(self.storage.conn()) .await .unwrap(); @@ -69,7 +75,7 @@ impl CliTestDal<'_, '_> { batch_number: L1BatchNumber, circuit_id: u8, ) { - sqlx::query(&format!( + sqlx::query!( " INSERT INTO node_aggregation_witness_jobs_fri ( @@ -80,20 +86,22 @@ impl CliTestDal<'_, '_> { updated_at ) VALUES - ({}, {}, 'waiting_for_proofs', NOW(), NOW()) + ($1, $2, 'waiting_for_proofs', NOW(), NOW()) ON CONFLICT (l1_batch_number, circuit_id, depth) DO UPDATE - SET status = '{}' + SET status = $3 ", - batch_number.0, circuit_id, status, - )) + batch_number.0 as i64, + circuit_id as i16, + status.to_string(), + ) .execute(self.storage.conn()) .await .unwrap(); } pub async fn insert_rt_job(&mut self, status: WitnessJobStatus, batch_number: L1BatchNumber) { - sqlx::query(&format!( + sqlx::query!( " INSERT INTO recursion_tip_witness_jobs_fri ( @@ -104,13 +112,14 @@ impl CliTestDal<'_, '_> { updated_at ) VALUES - ({}, 'waiting_for_proofs',1, NOW(), NOW()) + ($1, 'waiting_for_proofs',1, NOW(), NOW()) ON CONFLICT (l1_batch_number) DO UPDATE - SET status = '{}' + SET status = $2 ", - batch_number.0, status, - )) + batch_number.0 as i64, + status.to_string(), + ) .execute(self.storage.conn()) .await .unwrap(); @@ -121,7 +130,7 @@ impl CliTestDal<'_, '_> { status: WitnessJobStatus, batch_number: L1BatchNumber, ) { - sqlx::query(&format!( + sqlx::query!( " INSERT INTO scheduler_witness_jobs_fri ( @@ -132,13 +141,14 @@ impl CliTestDal<'_, '_> { updated_at ) VALUES - ({}, '', 'waiting_for_proofs', NOW(), NOW()) + ($1, '', 'waiting_for_proofs', NOW(), NOW()) ON CONFLICT (l1_batch_number) DO UPDATE - SET status = '{}' + SET status = $2 ", - batch_number.0, status, - )) + batch_number.0 as i64, + status.to_string(), + ) .execute(self.storage.conn()) .await .unwrap(); @@ -149,7 +159,7 @@ impl CliTestDal<'_, '_> { status: ProofCompressionJobStatus, batch_number: L1BatchNumber, ) { - sqlx::query(&format!( + sqlx::query!( " INSERT INTO proof_compression_jobs_fri ( @@ -159,13 +169,14 @@ impl CliTestDal<'_, '_> { updated_at ) VALUES - ({}, '{}', NOW(), NOW()) + ($1, $2, NOW(), NOW()) ON CONFLICT (l1_batch_number) DO UPDATE - SET status = '{}' + SET status = $2 ", - batch_number.0, status, status, - )) + batch_number.0 as i64, + status.to_string(), + ) .execute(self.storage.conn()) .await .unwrap(); @@ -180,15 +191,20 @@ impl CliTestDal<'_, '_> { batch_number: L1BatchNumber, sequence_number: usize, ) { - sqlx::query(&format!( + sqlx::query!( "UPDATE prover_jobs_fri - SET status = '{}', attempts = {} - WHERE l1_batch_number = {} - AND sequence_number = {} - AND aggregation_round = {} - AND circuit_id = {}", - status, attempts, batch_number.0, sequence_number, aggregation_round, circuit_id, - )) + SET status = $1, attempts = $2 + WHERE l1_batch_number = $3 + AND sequence_number =$4 + AND aggregation_round = $5 + AND circuit_id = $6", + status.to_string(), + attempts as i64, + batch_number.0 as i64, + sequence_number as i64, + aggregation_round as i64, + circuit_id as i16, + ) .execute(self.storage.conn()) .await .unwrap(); @@ -201,13 +217,16 @@ impl CliTestDal<'_, '_> { circuit_id: u8, batch_number: L1BatchNumber, ) { - sqlx::query(&format!( + sqlx::query!( "UPDATE leaf_aggregation_witness_jobs_fri - SET status = '{}', attempts = {} - WHERE l1_batch_number = {} - AND circuit_id = {}", - status, attempts, batch_number.0, circuit_id, - )) + SET status = $1, attempts = $2 + WHERE l1_batch_number = $3 + AND circuit_id = $4", + status.to_string(), + attempts as i64, + batch_number.0 as i64, + circuit_id as i16, + ) .execute(self.storage.conn()) .await .unwrap(); diff --git a/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs b/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs index 791038be0bb8..66e34f7f8e75 100644 --- a/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs +++ b/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs @@ -1709,7 +1709,7 @@ impl FriWitnessGeneratorDal<'_, '_> { block_number: L1BatchNumber, max_attempts: u32, ) -> Vec { - let query = format!( + sqlx::query!( r#" UPDATE witness_inputs_fri SET @@ -1717,8 +1717,8 @@ impl FriWitnessGeneratorDal<'_, '_> { updated_at = NOW(), processing_started_at = NOW() WHERE - l1_batch_number = {} - AND attempts >= {} + l1_batch_number = $1 + AND attempts >= $2 AND (status = 'in_progress' OR status = 'failed') RETURNING l1_batch_number, @@ -1728,22 +1728,21 @@ impl FriWitnessGeneratorDal<'_, '_> { picked_by "#, i64::from(block_number.0), - max_attempts - ); - sqlx::query(&query) - .fetch_all(self.storage.conn()) - .await - .unwrap() - .into_iter() - .map(|row| StuckJobs { - id: row.get::("l1_batch_number") as u64, - status: row.get("status"), - attempts: row.get::("attempts") as u64, - circuit_id: None, - error: row.get("error"), - picked_by: row.get("picked_by"), - }) - .collect() + max_attempts as i64 + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| StuckJobs { + id: row.l1_batch_number as u64, + status: row.status, + attempts: row.attempts as u64, + circuit_id: None, + error: row.error, + picked_by: row.picked_by, + }) + .collect() } pub async fn requeue_stuck_leaf_aggregation_jobs_for_batch( @@ -1777,7 +1776,7 @@ impl FriWitnessGeneratorDal<'_, '_> { block_number: L1BatchNumber, max_attempts: u32, ) -> Vec { - let query = format!( + sqlx::query!( r#" UPDATE recursion_tip_witness_jobs_fri SET @@ -1785,8 +1784,8 @@ impl FriWitnessGeneratorDal<'_, '_> { updated_at = NOW(), processing_started_at = NOW() WHERE - l1_batch_number = {} - AND attempts >= {} + l1_batch_number = $1 + AND attempts >= $2 AND (status = 'in_progress' OR status = 'failed') RETURNING l1_batch_number, @@ -1796,22 +1795,21 @@ impl FriWitnessGeneratorDal<'_, '_> { picked_by "#, i64::from(block_number.0), - max_attempts - ); - sqlx::query(&query) - .fetch_all(self.storage.conn()) - .await - .unwrap() - .into_iter() - .map(|row| StuckJobs { - id: row.get::("l1_batch_number") as u64, - status: row.get("status"), - attempts: row.get::("attempts") as u64, - circuit_id: None, - error: row.get("error"), - picked_by: row.get("picked_by"), - }) - .collect() + max_attempts as i64 + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| StuckJobs { + id: row.l1_batch_number as u64, + status: row.status, + attempts: row.attempts as u64, + circuit_id: None, + error: row.error, + picked_by: row.picked_by, + }) + .collect() } pub async fn requeue_stuck_scheduler_jobs_for_batch( @@ -1819,7 +1817,7 @@ impl FriWitnessGeneratorDal<'_, '_> { block_number: L1BatchNumber, max_attempts: u32, ) -> Vec { - let query = format!( + sqlx::query!( r#" UPDATE scheduler_witness_jobs_fri SET @@ -1827,8 +1825,8 @@ impl FriWitnessGeneratorDal<'_, '_> { updated_at = NOW(), processing_started_at = NOW() WHERE - l1_batch_number = {} - AND attempts >= {} + l1_batch_number = $1 + AND attempts >= $2 AND (status = 'in_progress' OR status = 'failed') RETURNING l1_batch_number, @@ -1838,22 +1836,21 @@ impl FriWitnessGeneratorDal<'_, '_> { picked_by "#, i64::from(block_number.0), - max_attempts - ); - sqlx::query(&query) - .fetch_all(self.storage.conn()) - .await - .unwrap() - .into_iter() - .map(|row| StuckJobs { - id: row.get::("l1_batch_number") as u64, - status: row.get("status"), - attempts: row.get::("attempts") as u64, - circuit_id: None, - error: row.get("error"), - picked_by: row.get("picked_by"), - }) - .collect() + max_attempts as i64 + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| StuckJobs { + id: row.l1_batch_number as u64, + status: row.status, + attempts: row.attempts as u64, + circuit_id: None, + error: row.error, + picked_by: row.picked_by, + }) + .collect() } async fn requeue_stuck_jobs_for_batch_in_aggregation_round( From 48317e640a00b016bf7bf782cc94fccaf077ed6d Mon Sep 17 00:00:00 2001 From: EmilLuta Date: Fri, 20 Sep 2024 13:36:36 +0200 Subject: [PATCH 115/116] feat(circuit_prover): Add circuit prover (#2908) ### Summary This PR introduces a new component `circuit_prover`, which is multiple WVGs & GPU prover running together, groupless. The changes are non-destructive, old setup and new setup must work together in-tandem. ### What? Circuit prover is a component that runs X WVGs alongside a GPU prover. Making full use of CPUs on the GPU machine, WVGs as a component can be removed altogether. Prover groups are not needed anymore. Based on empiric testing we can (almost -- there will be follow-up fixes to make it efficient) fully run everything on a single machine. The current implementation can sunset the old setup. Current metrics show that circuit prover is > 60% efficient than old one (but quirks are needed for node proofs to unlock it -- will be treated as a follow-up). The purpose is to have the `circuit_prover` deprecate old `prover_fri` & `witness_vector_generator`. ### Why? The changes will allow us to reduce our infrastructure footprint by ~2x and fix plenty of issues we had in the past. Namely: - fully decoupled of GCP - better resource utilization & reduce costs - reduce overall infrastructure needs (which solves the GPU unavailability we've been facing) - reduce complexity & other inefficiencies (no more prover groups!) - and more ### Ask We want to unblock folks running on AWS. This PR is done as is to speed up release process on DevOps side, as it's the longest pole. NOTE: This is the first PR out of a longer set of PRs. Comments are more than welcome, but the following concerns will be addressed in follow-up PRs and are out of scope for this PR: - tracing implementation is subpar; in fact, I'm confident that most metrics could be done via traces - there's a lot of code duplication (both old/new prover, but runner interface between new WVG & CP) - tests - concern separation between job scheduling & job execution - job priority based on resource consumption - other nits (such as no README, constants being hard-coded instead of configuration, etc.) ### Reviewer suggestion This is basically a merge between `prover_fri`, `witness_vector_generation` and `JobProcessor`. Checking code alongside should give you a better view of what's going on. Sorry for making this hard. :/ --- .github/workflows/ci-common-reusable.yml | 1 + .github/workflows/ci-core-lint-reusable.yml | 1 + .github/workflows/ci-prover-reusable.yml | 1 + core/lib/basic_types/src/prover_dal.rs | 2 +- docker/witness-generator/Dockerfile | 2 +- docker/witness-vector-generator/Dockerfile | 2 + docs/guides/setup-dev.md | 26 ++ prover/Cargo.lock | 29 ++ prover/Cargo.toml | 1 + prover/crates/bin/circuit_prover/Cargo.toml | 38 ++ .../crates/bin/circuit_prover/src/backoff.rs | 39 ++ .../bin/circuit_prover/src/circuit_prover.rs | 397 ++++++++++++++++++ prover/crates/bin/circuit_prover/src/lib.rs | 13 + prover/crates/bin/circuit_prover/src/main.rs | 201 +++++++++ .../crates/bin/circuit_prover/src/metrics.rs | 80 ++++ prover/crates/bin/circuit_prover/src/types.rs | 31 ++ .../src/witness_vector_generator.rs | 345 +++++++++++++++ .../prover_fri/src/prover_job_processor.rs | 6 +- .../witness_vector_generator/src/generator.rs | 2 +- prover/crates/lib/keystore/Cargo.toml | 2 + prover/crates/lib/keystore/src/keystore.rs | 60 ++- .../lib/keystore/src/setup_data_generator.rs | 8 +- ...fcddbb47c5e9965443f118f8edd7d562734a2.json | 60 +++ .../lib/prover_dal/src/fri_prover_dal.rs | 72 ++++ .../crates/lib/prover_fri_types/src/keys.rs | 16 +- prover/crates/lib/prover_fri_types/src/lib.rs | 69 ++- 26 files changed, 1484 insertions(+), 20 deletions(-) create mode 100644 prover/crates/bin/circuit_prover/Cargo.toml create mode 100644 prover/crates/bin/circuit_prover/src/backoff.rs create mode 100644 prover/crates/bin/circuit_prover/src/circuit_prover.rs create mode 100644 prover/crates/bin/circuit_prover/src/lib.rs create mode 100644 prover/crates/bin/circuit_prover/src/main.rs create mode 100644 prover/crates/bin/circuit_prover/src/metrics.rs create mode 100644 prover/crates/bin/circuit_prover/src/types.rs create mode 100644 prover/crates/bin/circuit_prover/src/witness_vector_generator.rs create mode 100644 prover/crates/lib/prover_dal/.sqlx/query-7d20c0bf35625185c1f6c675aa8fcddbb47c5e9965443f118f8edd7d562734a2.json diff --git a/.github/workflows/ci-common-reusable.yml b/.github/workflows/ci-common-reusable.yml index 2e5d36feebff..3d28df592e98 100644 --- a/.github/workflows/ci-common-reusable.yml +++ b/.github/workflows/ci-common-reusable.yml @@ -22,6 +22,7 @@ jobs: echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env echo "RUSTC_WRAPPER=sccache" >> .env + echo "RUSTFLAGS=--cfg=no_cuda" >> .env - name: Start services run: | diff --git a/.github/workflows/ci-core-lint-reusable.yml b/.github/workflows/ci-core-lint-reusable.yml index 404f0966b405..85e4be3ff5e3 100644 --- a/.github/workflows/ci-core-lint-reusable.yml +++ b/.github/workflows/ci-core-lint-reusable.yml @@ -19,6 +19,7 @@ jobs: echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env echo "RUSTC_WRAPPER=sccache" >> .env + echo "RUSTFLAGS=--cfg=no_cuda" >> .env echo "prover_url=postgres://postgres:notsecurepassword@localhost:5432/zksync_local_prover" >> $GITHUB_ENV echo "core_url=postgres://postgres:notsecurepassword@localhost:5432/zksync_local" >> $GITHUB_ENV diff --git a/.github/workflows/ci-prover-reusable.yml b/.github/workflows/ci-prover-reusable.yml index 367a86c5f40f..6fa987b1cecf 100644 --- a/.github/workflows/ci-prover-reusable.yml +++ b/.github/workflows/ci-prover-reusable.yml @@ -57,6 +57,7 @@ jobs: echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env echo "RUSTC_WRAPPER=sccache" >> .env + echo "RUSTFLAGS=--cfg=no_cuda" >> .env - name: Start services run: | diff --git a/core/lib/basic_types/src/prover_dal.rs b/core/lib/basic_types/src/prover_dal.rs index 36f6c89135a0..bec5a55ced1f 100644 --- a/core/lib/basic_types/src/prover_dal.rs +++ b/core/lib/basic_types/src/prover_dal.rs @@ -9,7 +9,7 @@ use crate::{ basic_fri_types::AggregationRound, protocol_version::ProtocolVersionId, L1BatchNumber, }; -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Copy)] pub struct FriProverJobMetadata { pub id: u32, pub block_number: L1BatchNumber, diff --git a/docker/witness-generator/Dockerfile b/docker/witness-generator/Dockerfile index 9211c3e23e53..06d836c9fa58 100644 --- a/docker/witness-generator/Dockerfile +++ b/docker/witness-generator/Dockerfile @@ -1,7 +1,7 @@ FROM ghcr.io/matter-labs/zksync-build-base:latest AS builder ARG DEBIAN_FRONTEND=noninteractive -ARG RUST_FLAGS="" +ARG RUST_FLAGS="--cfg=no_cuda" ENV RUSTFLAGS=${RUST_FLAGS} # set of args for use of sccache diff --git a/docker/witness-vector-generator/Dockerfile b/docker/witness-vector-generator/Dockerfile index 93d8dd308a58..eb46b459c695 100644 --- a/docker/witness-vector-generator/Dockerfile +++ b/docker/witness-vector-generator/Dockerfile @@ -1,6 +1,8 @@ FROM ghcr.io/matter-labs/zksync-build-base:latest AS builder ARG DEBIAN_FRONTEND=noninteractive +ARG RUST_FLAGS="--cfg=no_cuda" +ENV RUSTFLAGS=${RUST_FLAGS} # set of args for use of sccache ARG SCCACHE_GCS_BUCKET="" diff --git a/docs/guides/setup-dev.md b/docs/guides/setup-dev.md index 10eb329628c1..7781e65e5bfb 100644 --- a/docs/guides/setup-dev.md +++ b/docs/guides/setup-dev.md @@ -48,6 +48,10 @@ cargo install sqlx-cli --version 0.8.1 # Foundry curl -L https://foundry.paradigm.xyz | bash foundryup --branch master + +# Non GPU setup, can be skipped if the machine has a GPU configured for provers +echo "export RUSTFLAGS='--cfg=no_cuda'" >> ~/.bashrc + # You will need to reload your `*rc` file here # Clone the repo to the desired location @@ -237,6 +241,28 @@ Go to the zksync folder and run `nix develop`. After it finishes, you are in a s [Foundry](https://book.getfoundry.sh/getting-started/installation) can be utilized for deploying smart contracts. For commands related to deployment, you can pass flags for Foundry integration. +## Non-GPU setup + +Circuit Prover requires a GPU (& CUDA bindings) to run. If you still want to be able to build everything locally on +non-GPU setup, you'll need to change your rustflags. + +For a single run, it's enough to export it on the shell: + +``` +export RUSTFLAGS='--cfg=no_cuda' +``` + +For persistent runs, you can either echo it in your ~/.rc file (discouraged), or configure it for your taste in +`config.toml`. + +For project level configuration, edit `/path/to/zksync/.cargo/config.toml`. For global cargo setup, +`~/.cargo/config.toml`. Add the following: + +```toml +[build] +rustflags = ["--cfg=no_cuda"] +``` + ## Environment Edit the lines below and add them to your shell profile file (e.g. `~/.bash_profile`, `~/.zshrc`): diff --git a/prover/Cargo.lock b/prover/Cargo.lock index 38c2ca162c43..88c0d1114fc4 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -7362,6 +7362,33 @@ dependencies = [ "zksync_pairing", ] +[[package]] +name = "zksync_circuit_prover" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-trait", + "bincode", + "clap 4.5.4", + "shivini", + "tokio", + "tokio-util", + "tracing", + "vise", + "zkevm_test_harness", + "zksync_config", + "zksync_core_leftovers", + "zksync_env_config", + "zksync_object_store", + "zksync_prover_dal", + "zksync_prover_fri_types", + "zksync_prover_fri_utils", + "zksync_prover_keystore", + "zksync_queued_job_processor", + "zksync_types", + "zksync_utils", +] + [[package]] name = "zksync_concurrency" version = "0.1.1" @@ -7969,6 +7996,7 @@ dependencies = [ "anyhow", "bincode", "circuit_definitions", + "futures 0.3.30", "hex", "md5", "once_cell", @@ -7976,6 +8004,7 @@ dependencies = [ "serde_json", "sha3 0.10.8", "shivini", + "tokio", "tracing", "zkevm_test_harness", "zksync_basic_types", diff --git a/prover/Cargo.toml b/prover/Cargo.toml index fd171b254d5a..b21ad800afac 100644 --- a/prover/Cargo.toml +++ b/prover/Cargo.toml @@ -51,6 +51,7 @@ structopt = "0.3.26" strum = { version = "0.26" } tempfile = "3" tokio = "1" +tokio-util = "0.7.11" toml_edit = "0.14.4" tracing = "0.1" tracing-subscriber = "0.3" diff --git a/prover/crates/bin/circuit_prover/Cargo.toml b/prover/crates/bin/circuit_prover/Cargo.toml new file mode 100644 index 000000000000..a5751a4cd9a6 --- /dev/null +++ b/prover/crates/bin/circuit_prover/Cargo.toml @@ -0,0 +1,38 @@ +[package] +name = "zksync_circuit_prover" +version.workspace = true +edition.workspace = true +authors.workspace = true +homepage.workspace = true +repository.workspace = true +license.workspace = true +keywords.workspace = true +categories.workspace = true + +[dependencies] +tokio = { workspace = true, features = ["macros", "time"] } +tokio-util.workspace = true +anyhow.workspace = true +async-trait.workspace = true +tracing.workspace = true +bincode.workspace = true +clap = { workspace = true, features = ["derive"] } + +zksync_config.workspace = true +zksync_object_store.workspace = true +zksync_prover_dal.workspace = true +zksync_prover_fri_types.workspace = true +zksync_prover_fri_utils.workspace = true +zksync_queued_job_processor.workspace = true +zksync_types.workspace = true +zksync_prover_keystore = { workspace = true, features = ["gpu"] } +zksync_env_config.workspace = true +zksync_core_leftovers.workspace = true +zksync_utils.workspace = true + +vise.workspace = true +shivini = { workspace = true, features = [ + "circuit_definitions", + "zksync", +] } +zkevm_test_harness.workspace = true diff --git a/prover/crates/bin/circuit_prover/src/backoff.rs b/prover/crates/bin/circuit_prover/src/backoff.rs new file mode 100644 index 000000000000..6ddb3d94be35 --- /dev/null +++ b/prover/crates/bin/circuit_prover/src/backoff.rs @@ -0,0 +1,39 @@ +use std::{ops::Mul, time::Duration}; + +/// Backoff - convenience structure that takes care of backoff timings. +#[derive(Debug, Clone)] +pub struct Backoff { + base_delay: Duration, + current_delay: Duration, + max_delay: Duration, +} + +impl Backoff { + /// The delay multiplication coefficient. + // Currently it's hardcoded, but could be provided in the constructor. + const DELAY_MULTIPLIER: u32 = 2; + + /// Create a backoff with base_delay (first delay) and max_delay (maximum delay possible). + pub fn new(base_delay: Duration, max_delay: Duration) -> Self { + Backoff { + base_delay, + current_delay: base_delay, + max_delay, + } + } + + /// Get current delay, handling future delays if needed + pub fn delay(&mut self) -> Duration { + let delay = self.current_delay; + self.current_delay = self + .current_delay + .mul(Self::DELAY_MULTIPLIER) + .min(self.max_delay); + delay + } + + /// Reset the backoff time for to base delay + pub fn reset(&mut self) { + self.current_delay = self.base_delay; + } +} diff --git a/prover/crates/bin/circuit_prover/src/circuit_prover.rs b/prover/crates/bin/circuit_prover/src/circuit_prover.rs new file mode 100644 index 000000000000..1a5f8aa0d974 --- /dev/null +++ b/prover/crates/bin/circuit_prover/src/circuit_prover.rs @@ -0,0 +1,397 @@ +use std::{sync::Arc, time::Instant}; + +use anyhow::Context; +use shivini::{ + gpu_proof_config::GpuProofConfig, gpu_prove_from_external_witness_data, ProverContext, + ProverContextConfig, +}; +use tokio::{sync::mpsc::Receiver, task::JoinHandle}; +use tokio_util::sync::CancellationToken; +use zkevm_test_harness::prover_utils::{verify_base_layer_proof, verify_recursion_layer_proof}; +use zksync_object_store::ObjectStore; +use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; +use zksync_prover_fri_types::{ + circuit_definitions::{ + base_layer_proof_config, + boojum::{ + cs::implementations::{pow::NoPow, witness::WitnessVec}, + field::goldilocks::GoldilocksField, + worker::Worker, + }, + circuit_definitions::{ + base_layer::ZkSyncBaseLayerProof, recursion_layer::ZkSyncRecursionLayerProof, + }, + recursion_layer_proof_config, + }, + CircuitWrapper, FriProofWrapper, ProverArtifacts, WitnessVectorArtifactsTemp, +}; +use zksync_prover_keystore::GoldilocksGpuProverSetupData; +use zksync_types::protocol_version::ProtocolSemanticVersion; +use zksync_utils::panic_extractor::try_extract_panic_message; + +use crate::{ + metrics::CIRCUIT_PROVER_METRICS, + types::{DefaultTranscript, DefaultTreeHasher, Proof, VerificationKey}, + SetupDataCache, +}; + +/// In charge of proving circuits, given a Witness Vector source. +/// Both job runner & job executor. +#[derive(Debug)] +pub struct CircuitProver { + connection_pool: ConnectionPool, + object_store: Arc, + protocol_version: ProtocolSemanticVersion, + /// Witness Vector source receiver + receiver: Receiver, + /// Setup Data used for proving & proof verification + setup_data_cache: SetupDataCache, +} + +impl CircuitProver { + pub fn new( + connection_pool: ConnectionPool, + object_store: Arc, + protocol_version: ProtocolSemanticVersion, + receiver: Receiver, + max_allocation: Option, + setup_data_cache: SetupDataCache, + ) -> anyhow::Result<(Self, ProverContext)> { + // VRAM allocation + let prover_context = match max_allocation { + Some(max_allocation) => ProverContext::create_with_config( + ProverContextConfig::default().with_maximum_device_allocation(max_allocation), + ) + .context("failed initializing fixed gpu prover context")?, + None => ProverContext::create().context("failed initializing gpu prover context")?, + }; + Ok(( + Self { + connection_pool, + object_store, + protocol_version, + receiver, + setup_data_cache, + }, + prover_context, + )) + } + + /// Continuously polls `receiver` for Witness Vectors and proves them. + /// All job executions are persisted. + pub async fn run(mut self, cancellation_token: CancellationToken) -> anyhow::Result<()> { + while !cancellation_token.is_cancelled() { + let time = Instant::now(); + + let artifact = self + .receiver + .recv() + .await + .context("no Witness Vector Generators are available")?; + tracing::info!( + "Circuit Prover received job {:?} after: {:?}", + artifact.prover_job.job_id, + time.elapsed() + ); + CIRCUIT_PROVER_METRICS.job_wait_time.observe(time.elapsed()); + + self.prove(artifact, cancellation_token.clone()) + .await + .context("failed to prove circuit proof")?; + } + tracing::info!("Circuit Prover shut down."); + Ok(()) + } + + /// Proves a job, with persistence of execution. + async fn prove( + &self, + artifact: WitnessVectorArtifactsTemp, + cancellation_token: CancellationToken, + ) -> anyhow::Result<()> { + let time = Instant::now(); + let block_number = artifact.prover_job.block_number; + let job_id = artifact.prover_job.job_id; + let job_start_time = artifact.time; + let setup_data_key = artifact.prover_job.setup_data_key.crypto_setup_key(); + let setup_data = self + .setup_data_cache + .get(&setup_data_key) + .context(format!( + "failed to get setup data for key {setup_data_key:?}" + ))? + .clone(); + let task = tokio::task::spawn_blocking(move || { + let _span = tracing::info_span!("prove_circuit_proof", %block_number).entered(); + Self::prove_circuit_proof(artifact, setup_data).context("failed to prove circuit") + }); + + self.finish_task( + job_id, + time, + job_start_time, + task, + cancellation_token.clone(), + ) + .await?; + tracing::info!( + "Circuit Prover finished job {:?} in: {:?}", + job_id, + time.elapsed() + ); + CIRCUIT_PROVER_METRICS + .job_finished_time + .observe(time.elapsed()); + CIRCUIT_PROVER_METRICS + .full_proving_time + .observe(job_start_time.elapsed()); + Ok(()) + } + + /// Proves a job using crypto primitives (proof generation & proof verification). + #[tracing::instrument( + name = "Prover::prove_circuit_proof", + skip_all, + fields(l1_batch = % witness_vector_artifacts.prover_job.block_number) + )] + pub fn prove_circuit_proof( + witness_vector_artifacts: WitnessVectorArtifactsTemp, + setup_data: Arc, + ) -> anyhow::Result { + let time = Instant::now(); + let WitnessVectorArtifactsTemp { + witness_vector, + prover_job, + .. + } = witness_vector_artifacts; + + let job_id = prover_job.job_id; + let circuit_wrapper = prover_job.circuit_wrapper; + let block_number = prover_job.block_number; + + let (proof, circuit_id) = + Self::generate_proof(&circuit_wrapper, witness_vector, &setup_data) + .context(format!("failed to generate proof for job id {job_id}"))?; + + Self::verify_proof(&circuit_wrapper, &proof, &setup_data.vk).context(format!( + "failed to verify proof with job_id {job_id}, circuit_id: {circuit_id}" + ))?; + + let proof_wrapper = match &circuit_wrapper { + CircuitWrapper::Base(_) => { + FriProofWrapper::Base(ZkSyncBaseLayerProof::from_inner(circuit_id, proof)) + } + CircuitWrapper::Recursive(_) => { + FriProofWrapper::Recursive(ZkSyncRecursionLayerProof::from_inner(circuit_id, proof)) + } + CircuitWrapper::BasePartial(_) => { + return Self::partial_proof_error(); + } + }; + CIRCUIT_PROVER_METRICS + .crypto_primitives_time + .observe(time.elapsed()); + Ok(ProverArtifacts::new(block_number, proof_wrapper)) + } + + /// Generates a proof from crypto primitives. + fn generate_proof( + circuit_wrapper: &CircuitWrapper, + witness_vector: WitnessVec, + setup_data: &Arc, + ) -> anyhow::Result<(Proof, u8)> { + let time = Instant::now(); + + let worker = Worker::new(); + + let (gpu_proof_config, proof_config, circuit_id) = match circuit_wrapper { + CircuitWrapper::Base(circuit) => ( + GpuProofConfig::from_base_layer_circuit(circuit), + base_layer_proof_config(), + circuit.numeric_circuit_type(), + ), + CircuitWrapper::Recursive(circuit) => ( + GpuProofConfig::from_recursive_layer_circuit(circuit), + recursion_layer_proof_config(), + circuit.numeric_circuit_type(), + ), + CircuitWrapper::BasePartial(_) => { + return Self::partial_proof_error(); + } + }; + + let proof = + gpu_prove_from_external_witness_data::( + &gpu_proof_config, + &witness_vector, + proof_config, + &setup_data.setup, + &setup_data.vk, + (), + &worker, + ) + .context("crypto primitive: failed to generate proof")?; + CIRCUIT_PROVER_METRICS + .generate_proof_time + .observe(time.elapsed()); + Ok((proof.into(), circuit_id)) + } + + /// Verifies a proof from crypto primitives + fn verify_proof( + circuit_wrapper: &CircuitWrapper, + proof: &Proof, + verification_key: &VerificationKey, + ) -> anyhow::Result<()> { + let time = Instant::now(); + + let is_valid = match circuit_wrapper { + CircuitWrapper::Base(base_circuit) => { + verify_base_layer_proof::(base_circuit, proof, verification_key) + } + CircuitWrapper::Recursive(recursive_circuit) => { + verify_recursion_layer_proof::(recursive_circuit, proof, verification_key) + } + CircuitWrapper::BasePartial(_) => { + return Self::partial_proof_error(); + } + }; + + CIRCUIT_PROVER_METRICS + .verify_proof_time + .observe(time.elapsed()); + + if !is_valid { + return Err(anyhow::anyhow!("crypto primitive: failed to verify proof")); + } + Ok(()) + } + + /// This code path should never trigger. All proofs are hydrated during Witness Vector Generator. + /// If this triggers, it means that proof hydration in Witness Vector Generator was not done -- logic bug. + fn partial_proof_error() -> anyhow::Result { + Err(anyhow::anyhow!("received unexpected dehydrated proof")) + } + + /// Runs task to completion and persists result. + /// NOTE: Task may be cancelled mid-flight. + async fn finish_task( + &self, + job_id: u32, + time: Instant, + job_start_time: Instant, + task: JoinHandle>, + cancellation_token: CancellationToken, + ) -> anyhow::Result<()> { + tokio::select! { + _ = cancellation_token.cancelled() => { + tracing::info!("Stop signal received, shutting down Circuit Prover..."); + return Ok(()) + } + result = task => { + let error_message = match result { + Ok(Ok(prover_artifact)) => { + tracing::info!("Circuit Prover executed job {:?} in: {:?}", job_id, time.elapsed()); + CIRCUIT_PROVER_METRICS.execution_time.observe(time.elapsed()); + self + .save_result(job_id, job_start_time, prover_artifact) + .await.context("failed to save result")?; + return Ok(()) + } + Ok(Err(error)) => error.to_string(), + Err(error) => try_extract_panic_message(error), + }; + tracing::error!( + "Circuit Prover failed on job {:?} with error {:?}", + job_id, + error_message + ); + + self.save_failure(job_id, error_message).await.context("failed to save failure")?; + } + } + + Ok(()) + } + + /// Persists proof generated. + /// Job metadata is saved to database, whilst artifacts go to object store. + async fn save_result( + &self, + job_id: u32, + job_start_time: Instant, + artifacts: ProverArtifacts, + ) -> anyhow::Result<()> { + let time = Instant::now(); + let mut connection = self + .connection_pool + .connection() + .await + .context("failed to get db connection")?; + let proof = artifacts.proof_wrapper; + + let (_circuit_type, is_scheduler_proof) = match &proof { + FriProofWrapper::Base(base) => (base.numeric_circuit_type(), false), + FriProofWrapper::Recursive(recursive_circuit) => match recursive_circuit { + ZkSyncRecursionLayerProof::SchedulerCircuit(_) => { + (recursive_circuit.numeric_circuit_type(), true) + } + _ => (recursive_circuit.numeric_circuit_type(), false), + }, + }; + + let upload_time = Instant::now(); + let blob_url = self + .object_store + .put(job_id, &proof) + .await + .context("failed to upload to object store")?; + CIRCUIT_PROVER_METRICS + .artifact_upload_time + .observe(upload_time.elapsed()); + + let mut transaction = connection + .start_transaction() + .await + .context("failed to start db transaction")?; + transaction + .fri_prover_jobs_dal() + .save_proof(job_id, job_start_time.elapsed(), &blob_url) + .await; + if is_scheduler_proof { + transaction + .fri_proof_compressor_dal() + .insert_proof_compression_job( + artifacts.block_number, + &blob_url, + self.protocol_version, + ) + .await; + } + transaction + .commit() + .await + .context("failed to commit db transaction")?; + + tracing::info!( + "Circuit Prover saved job {:?} after {:?}", + job_id, + time.elapsed() + ); + CIRCUIT_PROVER_METRICS.save_time.observe(time.elapsed()); + + Ok(()) + } + + /// Persists job execution error to database. + async fn save_failure(&self, job_id: u32, error: String) -> anyhow::Result<()> { + self.connection_pool + .connection() + .await + .context("failed to get db connection")? + .fri_prover_jobs_dal() + .save_proof_error(job_id, error) + .await; + Ok(()) + } +} diff --git a/prover/crates/bin/circuit_prover/src/lib.rs b/prover/crates/bin/circuit_prover/src/lib.rs new file mode 100644 index 000000000000..7d7ce1d96686 --- /dev/null +++ b/prover/crates/bin/circuit_prover/src/lib.rs @@ -0,0 +1,13 @@ +#![allow(incomplete_features)] // We have to use generic const exprs. +#![feature(generic_const_exprs)] +pub use backoff::Backoff; +pub use circuit_prover::CircuitProver; +pub use metrics::PROVER_BINARY_METRICS; +pub use types::{FinalizationHintsCache, SetupDataCache}; +pub use witness_vector_generator::WitnessVectorGenerator; + +mod backoff; +mod circuit_prover; +mod metrics; +mod types; +mod witness_vector_generator; diff --git a/prover/crates/bin/circuit_prover/src/main.rs b/prover/crates/bin/circuit_prover/src/main.rs new file mode 100644 index 000000000000..e26f29ca995d --- /dev/null +++ b/prover/crates/bin/circuit_prover/src/main.rs @@ -0,0 +1,201 @@ +use std::{ + path::PathBuf, + sync::Arc, + time::{Duration, Instant}, +}; + +use anyhow::Context as _; +use clap::Parser; +use tokio_util::sync::CancellationToken; +use zksync_circuit_prover::{ + Backoff, CircuitProver, FinalizationHintsCache, SetupDataCache, WitnessVectorGenerator, + PROVER_BINARY_METRICS, +}; +use zksync_config::{ + configs::{FriProverConfig, ObservabilityConfig}, + ObjectStoreConfig, +}; +use zksync_core_leftovers::temp_config_store::{load_database_secrets, load_general_config}; +use zksync_object_store::{ObjectStore, ObjectStoreFactory}; +use zksync_prover_dal::{ConnectionPool, Prover}; +use zksync_prover_fri_types::PROVER_PROTOCOL_SEMANTIC_VERSION; +use zksync_prover_keystore::keystore::Keystore; +use zksync_utils::wait_for_tasks::ManagedTasks; + +#[derive(Debug, Parser)] +#[command(author = "Matter Labs", version)] +struct Cli { + #[arg(long)] + pub(crate) config_path: Option, + #[arg(long)] + pub(crate) secrets_path: Option, + /// Number of WVG jobs to run in parallel. + /// Default value is 1. + #[arg(long, default_value_t = 1)] + pub(crate) witness_vector_generator_count: usize, + /// Max VRAM to allocate. Useful if you want to limit the size of VRAM used. + /// None corresponds to allocating all available VRAM. + #[arg(long)] + pub(crate) max_allocation: Option, +} + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + let time = Instant::now(); + let opt = Cli::parse(); + + let (observability_config, prover_config, object_store_config) = load_configs(opt.config_path)?; + + let _observability_guard = observability_config + .install() + .context("failed to install observability")?; + + let wvg_count = opt.witness_vector_generator_count as u32; + + let (connection_pool, object_store, setup_data_cache, hints) = load_resources( + opt.secrets_path, + object_store_config, + prover_config.setup_data_path.into(), + wvg_count, + ) + .await + .context("failed to load configs")?; + + PROVER_BINARY_METRICS.start_up.observe(time.elapsed()); + + let cancellation_token = CancellationToken::new(); + let backoff = Backoff::new(Duration::from_secs(5), Duration::from_secs(30)); + + let mut tasks = vec![]; + + let (sender, receiver) = tokio::sync::mpsc::channel(5); + + tracing::info!("Starting {wvg_count} Witness Vector Generators."); + + for _ in 0..wvg_count { + let wvg = WitnessVectorGenerator::new( + object_store.clone(), + connection_pool.clone(), + PROVER_PROTOCOL_SEMANTIC_VERSION, + sender.clone(), + hints.clone(), + ); + tasks.push(tokio::spawn( + wvg.run(cancellation_token.clone(), backoff.clone()), + )); + } + + // NOTE: Prover Context is the way VRAM is allocated. If it is dropped, the claim on VRAM allocation is dropped as well. + // It has to be kept until prover dies. Whilst it may be kept in prover struct, during cancellation, prover can `drop`, but the thread doing the processing can still be alive. + // This setup prevents segmentation faults and other nasty behavior during shutdown. + let (prover, _prover_context) = CircuitProver::new( + connection_pool, + object_store, + PROVER_PROTOCOL_SEMANTIC_VERSION, + receiver, + opt.max_allocation, + setup_data_cache, + ) + .context("failed to create circuit prover")?; + tasks.push(tokio::spawn(prover.run(cancellation_token.clone()))); + + let mut tasks = ManagedTasks::new(tasks); + tokio::select! { + _ = tasks.wait_single() => {}, + result = tokio::signal::ctrl_c() => { + match result { + Ok(_) => { + tracing::info!("Stop signal received, shutting down..."); + cancellation_token.cancel(); + }, + Err(_err) => { + tracing::error!("failed to set up ctrl c listener"); + } + } + } + } + PROVER_BINARY_METRICS.run_time.observe(time.elapsed()); + tasks.complete(Duration::from_secs(5)).await; + + Ok(()) +} + +/// Loads configs necessary for proving. +/// - observability config - for observability setup +/// - prover config - necessary for setup data +/// - object store config - for retrieving artifacts for WVG & CP +fn load_configs( + config_path: Option, +) -> anyhow::Result<(ObservabilityConfig, FriProverConfig, ObjectStoreConfig)> { + tracing::info!("loading configs..."); + let general_config = + load_general_config(config_path).context("failed loading general config")?; + let observability_config = general_config + .observability + .context("failed loading observability config")?; + let prover_config = general_config + .prover_config + .context("failed loading prover config")?; + let object_store_config = prover_config + .prover_object_store + .clone() + .context("failed loading prover object store config")?; + tracing::info!("Loaded configs."); + Ok((observability_config, prover_config, object_store_config)) +} + +/// Loads resources necessary for proving. +/// - connection pool - necessary to pick & store jobs from database +/// - object store - necessary for loading and storing artifacts to object store +/// - setup data - necessary for circuit proving +/// - finalization hints - necessary for generating witness vectors +async fn load_resources( + secrets_path: Option, + object_store_config: ObjectStoreConfig, + setup_data_path: PathBuf, + wvg_count: u32, +) -> anyhow::Result<( + ConnectionPool, + Arc, + SetupDataCache, + FinalizationHintsCache, +)> { + let database_secrets = + load_database_secrets(secrets_path).context("failed to load database secrets")?; + let database_url = database_secrets + .prover_url + .context("no prover DB URl present")?; + + // 1 connection for the prover and one for each vector generator + let max_connections = 1 + wvg_count; + let connection_pool = ConnectionPool::::builder(database_url, max_connections) + .build() + .await + .context("failed to build connection pool")?; + + let object_store = ObjectStoreFactory::new(object_store_config) + .create_store() + .await + .context("failed to create object store")?; + + tracing::info!("Loading mappings from disk..."); + + let keystore = Keystore::locate().with_setup_path(Some(setup_data_path)); + let setup_data_cache = keystore + .load_all_setup_key_mapping() + .await + .context("failed to load setup key mapping")?; + let finalization_hints = keystore + .load_all_finalization_hints_mapping() + .await + .context("failed to load finalization hints mapping")?; + + tracing::info!("Loaded mappings from disk."); + + Ok(( + connection_pool, + object_store, + setup_data_cache, + finalization_hints, + )) +} diff --git a/prover/crates/bin/circuit_prover/src/metrics.rs b/prover/crates/bin/circuit_prover/src/metrics.rs new file mode 100644 index 000000000000..e9f445914795 --- /dev/null +++ b/prover/crates/bin/circuit_prover/src/metrics.rs @@ -0,0 +1,80 @@ +use std::time::Duration; + +use vise::{Buckets, Histogram, Metrics}; + +#[derive(Debug, Metrics)] +#[metrics(prefix = "prover_binary")] +pub struct ProverBinaryMetrics { + /// How long does it take for prover to load data before it can produce proofs? + #[metrics(buckets = Buckets::LATENCIES)] + pub start_up: Histogram, + /// How long has the prover been running? + #[metrics(buckets = Buckets::LATENCIES)] + pub run_time: Histogram, +} + +#[vise::register] +pub static PROVER_BINARY_METRICS: vise::Global = vise::Global::new(); + +#[derive(Debug, Metrics)] +#[metrics(prefix = "witness_vector_generator")] +pub struct WitnessVectorGeneratorMetrics { + /// How long does witness vector generator waits before a job is available? + #[metrics(buckets = Buckets::LATENCIES)] + pub job_wait_time: Histogram, + /// How long does it take to load object store artifacts for a witness vector job? + #[metrics(buckets = Buckets::LATENCIES)] + pub artifact_download_time: Histogram, + /// How long does the crypto witness generation primitive take? + #[metrics(buckets = Buckets::LATENCIES)] + pub crypto_primitive_time: Histogram, + /// How long does it take for a job to be executed, from the moment it's loaded? + #[metrics(buckets = Buckets::LATENCIES)] + pub execution_time: Histogram, + /// How long does it take to send a job to prover? + /// This is relevant because prover queue can apply back-pressure. + #[metrics(buckets = Buckets::LATENCIES)] + pub send_time: Histogram, + /// How long does it take for a job to be considered finished, from the moment it's been loaded? + #[metrics(buckets = Buckets::LATENCIES)] + pub job_finished_time: Histogram, +} + +#[vise::register] +pub static WITNESS_VECTOR_GENERATOR_METRICS: vise::Global = + vise::Global::new(); + +#[derive(Debug, Metrics)] +#[metrics(prefix = "circuit_prover")] +pub struct CircuitProverMetrics { + /// How long does circuit prover wait before a job is available? + #[metrics(buckets = Buckets::LATENCIES)] + pub job_wait_time: Histogram, + /// How long does the crypto primitives (proof generation & verification) take? + #[metrics(buckets = Buckets::LATENCIES)] + pub crypto_primitives_time: Histogram, + /// How long does proof generation (crypto primitive) take? + #[metrics(buckets = Buckets::LATENCIES)] + pub generate_proof_time: Histogram, + /// How long does verify proof (crypto primitive) take? + #[metrics(buckets = Buckets::LATENCIES)] + pub verify_proof_time: Histogram, + /// How long does it take for a job to be executed, from the moment it's loaded? + #[metrics(buckets = Buckets::LATENCIES)] + pub execution_time: Histogram, + /// How long does it take to upload proof to object store? + #[metrics(buckets = Buckets::LATENCIES)] + pub artifact_upload_time: Histogram, + /// How long does it take to save a job? + #[metrics(buckets = Buckets::LATENCIES)] + pub save_time: Histogram, + /// How long does it take for a job to be considered finished, from the moment it's been loaded? + #[metrics(buckets = Buckets::LATENCIES)] + pub job_finished_time: Histogram, + /// How long does it take a job to go from witness generation to having the proof saved? + #[metrics(buckets = Buckets::LATENCIES)] + pub full_proving_time: Histogram, +} + +#[vise::register] +pub static CIRCUIT_PROVER_METRICS: vise::Global = vise::Global::new(); diff --git a/prover/crates/bin/circuit_prover/src/types.rs b/prover/crates/bin/circuit_prover/src/types.rs new file mode 100644 index 000000000000..52cdd48b6b50 --- /dev/null +++ b/prover/crates/bin/circuit_prover/src/types.rs @@ -0,0 +1,31 @@ +use std::{collections::HashMap, sync::Arc}; + +use zksync_prover_fri_types::{ + circuit_definitions::boojum::{ + algebraic_props::{ + round_function::AbsorptionModeOverwrite, sponge::GoldilocksPoseidon2Sponge, + }, + cs::implementations::{ + proof::Proof as CryptoProof, setup::FinalizationHintsForProver, + transcript::GoldilocksPoisedon2Transcript, + verifier::VerificationKey as CryptoVerificationKey, + }, + field::goldilocks::{GoldilocksExt2, GoldilocksField}, + }, + ProverServiceDataKey, +}; +use zksync_prover_keystore::GoldilocksGpuProverSetupData; + +// prover types +pub type DefaultTranscript = GoldilocksPoisedon2Transcript; +pub type DefaultTreeHasher = GoldilocksPoseidon2Sponge; + +type F = GoldilocksField; +type H = GoldilocksPoseidon2Sponge; +type Ext = GoldilocksExt2; +pub type Proof = CryptoProof; +pub type VerificationKey = CryptoVerificationKey; + +// cache types +pub type SetupDataCache = HashMap>; +pub type FinalizationHintsCache = HashMap>; diff --git a/prover/crates/bin/circuit_prover/src/witness_vector_generator.rs b/prover/crates/bin/circuit_prover/src/witness_vector_generator.rs new file mode 100644 index 000000000000..cb2d2a256df9 --- /dev/null +++ b/prover/crates/bin/circuit_prover/src/witness_vector_generator.rs @@ -0,0 +1,345 @@ +use std::{collections::HashMap, sync::Arc, time::Instant}; + +use anyhow::Context; +use tokio::{sync::mpsc::Sender, task::JoinHandle}; +use tokio_util::sync::CancellationToken; +use zksync_object_store::ObjectStore; +use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; +use zksync_prover_fri_types::{ + circuit_definitions::{ + boojum::{ + cs::implementations::setup::FinalizationHintsForProver, + field::goldilocks::GoldilocksField, + gadgets::queue::full_state_queue::FullStateCircuitQueueRawWitness, + }, + circuit_definitions::base_layer::ZkSyncBaseLayerCircuit, + }, + get_current_pod_name, + keys::RamPermutationQueueWitnessKey, + CircuitAuxData, CircuitWrapper, ProverJob, ProverServiceDataKey, RamPermutationQueueWitness, + WitnessVectorArtifactsTemp, +}; +use zksync_types::{protocol_version::ProtocolSemanticVersion, L1BatchNumber}; +use zksync_utils::panic_extractor::try_extract_panic_message; + +use crate::{metrics::WITNESS_VECTOR_GENERATOR_METRICS, Backoff, FinalizationHintsCache}; + +/// In charge of generating Witness Vectors and sending them to Circuit Prover. +/// Both job runner & job executor. +#[derive(Debug)] +pub struct WitnessVectorGenerator { + object_store: Arc, + connection_pool: ConnectionPool, + protocol_version: ProtocolSemanticVersion, + /// Finalization Hints used for Witness Vector generation + finalization_hints_cache: FinalizationHintsCache, + /// Witness Vector sender for Circuit Prover + sender: Sender, + pod_name: String, +} + +impl WitnessVectorGenerator { + pub fn new( + object_store: Arc, + connection_pool: ConnectionPool, + protocol_version: ProtocolSemanticVersion, + sender: Sender, + finalization_hints: HashMap>, + ) -> Self { + Self { + object_store, + connection_pool, + protocol_version, + finalization_hints_cache: finalization_hints, + sender, + pod_name: get_current_pod_name(), + } + } + + /// Continuously polls database for new prover jobs and generates witness vectors for them. + /// All job executions are persisted. + pub async fn run( + self, + cancellation_token: CancellationToken, + mut backoff: Backoff, + ) -> anyhow::Result<()> { + let mut get_job_timer = Instant::now(); + while !cancellation_token.is_cancelled() { + if let Some(prover_job) = self + .get_job() + .await + .context("failed to get next witness generation job")? + { + tracing::info!( + "Witness Vector Generator received job {:?} after: {:?}", + prover_job.job_id, + get_job_timer.elapsed() + ); + WITNESS_VECTOR_GENERATOR_METRICS + .job_wait_time + .observe(get_job_timer.elapsed()); + if let e @ Err(_) = self.generate(prover_job, cancellation_token.clone()).await { + // this means that the witness vector receiver is closed, no need to report the error, just return + if cancellation_token.is_cancelled() { + return Ok(()); + } + e.context("failed to generate witness")? + } + + // waiting for a job timer starts as soon as the other is finished + get_job_timer = Instant::now(); + backoff.reset(); + continue; + }; + self.backoff(&mut backoff, cancellation_token.clone()).await; + } + tracing::info!("Witness Vector Generator shut down."); + Ok(()) + } + + /// Retrieves a prover job from database, loads artifacts from object store and hydrates them. + async fn get_job(&self) -> anyhow::Result> { + let mut connection = self + .connection_pool + .connection() + .await + .context("failed to get db connection")?; + let prover_job_metadata = match connection + .fri_prover_jobs_dal() + .get_job(self.protocol_version, &self.pod_name) + .await + { + None => return Ok(None), + Some(job) => job, + }; + + let time = Instant::now(); + let circuit_wrapper = self + .object_store + .get(prover_job_metadata.into()) + .await + .context("failed to get circuit_wrapper from object store")?; + let artifact = match circuit_wrapper { + a @ CircuitWrapper::Base(_) => a, + a @ CircuitWrapper::Recursive(_) => a, + CircuitWrapper::BasePartial((circuit, aux_data)) => self + .fill_witness(circuit, aux_data, prover_job_metadata.block_number) + .await + .context("failed to fill witness")?, + }; + WITNESS_VECTOR_GENERATOR_METRICS + .artifact_download_time + .observe(time.elapsed()); + + let setup_data_key = ProverServiceDataKey { + circuit_id: prover_job_metadata.circuit_id, + round: prover_job_metadata.aggregation_round, + } + .crypto_setup_key(); + let prover_job = ProverJob::new( + prover_job_metadata.block_number, + prover_job_metadata.id, + artifact, + setup_data_key, + ); + Ok(Some(prover_job)) + } + + /// Prover artifact hydration. + async fn fill_witness( + &self, + circuit: ZkSyncBaseLayerCircuit, + aux_data: CircuitAuxData, + l1_batch_number: L1BatchNumber, + ) -> anyhow::Result { + if let ZkSyncBaseLayerCircuit::RAMPermutation(circuit_instance) = circuit { + let sorted_witness_key = RamPermutationQueueWitnessKey { + block_number: l1_batch_number, + circuit_subsequence_number: aux_data.circuit_subsequence_number as usize, + is_sorted: true, + }; + let sorted_witness: RamPermutationQueueWitness = self + .object_store + .get(sorted_witness_key) + .await + .context("failed to load sorted witness key")?; + + let unsorted_witness_key = RamPermutationQueueWitnessKey { + block_number: l1_batch_number, + circuit_subsequence_number: aux_data.circuit_subsequence_number as usize, + is_sorted: false, + }; + let unsorted_witness: RamPermutationQueueWitness = self + .object_store + .get(unsorted_witness_key) + .await + .context("failed to load unsorted witness key")?; + + let mut witness = circuit_instance.witness.take().unwrap(); + witness.unsorted_queue_witness = FullStateCircuitQueueRawWitness { + elements: unsorted_witness.witness.into(), + }; + witness.sorted_queue_witness = FullStateCircuitQueueRawWitness { + elements: sorted_witness.witness.into(), + }; + circuit_instance.witness.store(Some(witness)); + + return Ok(CircuitWrapper::Base( + ZkSyncBaseLayerCircuit::RAMPermutation(circuit_instance), + )); + } + Err(anyhow::anyhow!( + "unexpected circuit received with partial witness, expected RAM permutation, got {:?}", + circuit.short_description() + )) + } + + /// Generates witness vector, with persistence of execution. + async fn generate( + &self, + prover_job: ProverJob, + cancellation_token: CancellationToken, + ) -> anyhow::Result<()> { + let start_time = Instant::now(); + let finalization_hints = self + .finalization_hints_cache + .get(&prover_job.setup_data_key) + .context(format!( + "failed to get finalization hints for key {:?}", + &prover_job.setup_data_key + ))? + .clone(); + let job_id = prover_job.job_id; + let task = tokio::task::spawn_blocking(move || { + let block_number = prover_job.block_number; + let _span = tracing::info_span!("witness_vector_generator", %block_number).entered(); + Self::generate_witness_vector(prover_job, finalization_hints) + }); + + self.finish_task(job_id, start_time, task, cancellation_token.clone()) + .await?; + + tracing::info!( + "Witness Vector Generator finished job {:?} in: {:?}", + job_id, + start_time.elapsed() + ); + WITNESS_VECTOR_GENERATOR_METRICS + .job_finished_time + .observe(start_time.elapsed()); + Ok(()) + } + + /// Generates witness vector using crypto primitives. + #[tracing::instrument( + skip_all, + fields(l1_batch = % prover_job.block_number) + )] + pub fn generate_witness_vector( + prover_job: ProverJob, + finalization_hints: Arc, + ) -> anyhow::Result { + let time = Instant::now(); + let cs = match prover_job.circuit_wrapper.clone() { + CircuitWrapper::Base(base_circuit) => { + base_circuit.synthesis::(&finalization_hints) + } + CircuitWrapper::Recursive(recursive_circuit) => { + recursive_circuit.synthesis::(&finalization_hints) + } + // circuit must be hydrated during `get_job` + CircuitWrapper::BasePartial(_) => { + return Err(anyhow::anyhow!("received unexpected dehydrated proof")); + } + }; + WITNESS_VECTOR_GENERATOR_METRICS + .crypto_primitive_time + .observe(time.elapsed()); + Ok(WitnessVectorArtifactsTemp::new( + cs.witness.unwrap(), + prover_job, + time, + )) + } + + /// Runs task to completion and persists result. + /// NOTE: Task may be cancelled mid-flight. + async fn finish_task( + &self, + job_id: u32, + time: Instant, + task: JoinHandle>, + cancellation_token: CancellationToken, + ) -> anyhow::Result<()> { + tokio::select! { + _ = cancellation_token.cancelled() => { + tracing::info!("Stop signal received, shutting down Witness Vector Generator..."); + return Ok(()) + } + result = task => { + let error_message = match result { + Ok(Ok(witness_vector)) => { + tracing::info!("Witness Vector Generator executed job {:?} in: {:?}", job_id, time.elapsed()); + WITNESS_VECTOR_GENERATOR_METRICS.execution_time.observe(time.elapsed()); + self + .save_result(witness_vector, job_id) + .await + .context("failed to save result")?; + return Ok(()) + } + Ok(Err(error)) => error.to_string(), + Err(error) => try_extract_panic_message(error), + }; + tracing::error!("Witness Vector Generator failed on job {job_id:?} with error {error_message:?}"); + + self.save_failure(job_id, error_message).await.context("failed to save failure")?; + } + } + + Ok(()) + } + + /// Sends proof to Circuit Prover. + async fn save_result( + &self, + artifacts: WitnessVectorArtifactsTemp, + job_id: u32, + ) -> anyhow::Result<()> { + let time = Instant::now(); + self.sender + .send(artifacts) + .await + .context("failed to send witness vector to prover")?; + tracing::info!( + "Witness Vector Generator sent job {:?} after {:?}", + job_id, + time.elapsed() + ); + WITNESS_VECTOR_GENERATOR_METRICS + .send_time + .observe(time.elapsed()); + Ok(()) + } + + /// Persists job execution error to database + async fn save_failure(&self, job_id: u32, error: String) -> anyhow::Result<()> { + self.connection_pool + .connection() + .await + .context("failed to get db connection")? + .fri_prover_jobs_dal() + .save_proof_error(job_id, error) + .await; + Ok(()) + } + + /// Backs off, whilst being cancellation aware. + async fn backoff(&self, backoff: &mut Backoff, cancellation_token: CancellationToken) { + let backoff_duration = backoff.delay(); + tracing::info!("Backing off for {:?}...", backoff_duration); + // Error here corresponds to a timeout w/o receiving task cancel; we're OK with this. + tokio::time::timeout(backoff_duration, cancellation_token.cancelled()) + .await + .ok(); + } +} diff --git a/prover/crates/bin/prover_fri/src/prover_job_processor.rs b/prover/crates/bin/prover_fri/src/prover_job_processor.rs index bbfb1d5a8322..5e8740d1b728 100644 --- a/prover/crates/bin/prover_fri/src/prover_job_processor.rs +++ b/prover/crates/bin/prover_fri/src/prover_job_processor.rs @@ -90,7 +90,7 @@ impl Prover { let started_at = Instant::now(); let artifact: GoldilocksProverSetupData = self .keystore - .load_cpu_setup_data_for_circuit_type(key.clone()) + .load_cpu_setup_data_for_circuit_type(key) .context("get_cpu_setup_data_for_circuit_type()")?; METRICS.gpu_setup_data_load_time[&key.circuit_id.to_string()] .observe(started_at.elapsed()); @@ -226,7 +226,7 @@ impl JobProcessor for Prover { _started_at: Instant, ) -> JoinHandle> { let config = Arc::clone(&self.config); - let setup_data = self.get_setup_data(job.setup_data_key.clone()); + let setup_data = self.get_setup_data(job.setup_data_key); tokio::task::spawn_blocking(move || { let block_number = job.block_number; let _span = tracing::info_span!("cpu_prove", %block_number).entered(); @@ -307,7 +307,7 @@ pub fn load_setup_data_cache( for prover_setup_metadata in prover_setup_metadata_list { let key = setup_metadata_to_setup_data_key(&prover_setup_metadata); let setup_data = keystore - .load_cpu_setup_data_for_circuit_type(key.clone()) + .load_cpu_setup_data_for_circuit_type(key) .context("get_cpu_setup_data_for_circuit_type()")?; cache.insert(key, Arc::new(setup_data)); } diff --git a/prover/crates/bin/witness_vector_generator/src/generator.rs b/prover/crates/bin/witness_vector_generator/src/generator.rs index 6695905c07e3..646dd8ffda78 100644 --- a/prover/crates/bin/witness_vector_generator/src/generator.rs +++ b/prover/crates/bin/witness_vector_generator/src/generator.rs @@ -70,7 +70,7 @@ impl WitnessVectorGenerator { keystore: &Keystore, ) -> anyhow::Result { let finalization_hints = keystore - .load_finalization_hints(job.setup_data_key.clone()) + .load_finalization_hints(job.setup_data_key) .context("get_finalization_hints()")?; let cs = match job.circuit_wrapper.clone() { CircuitWrapper::Base(base_circuit) => { diff --git a/prover/crates/lib/keystore/Cargo.toml b/prover/crates/lib/keystore/Cargo.toml index 617030754f8b..4d9addc26bc0 100644 --- a/prover/crates/lib/keystore/Cargo.toml +++ b/prover/crates/lib/keystore/Cargo.toml @@ -27,6 +27,8 @@ once_cell.workspace = true md5.workspace = true sha3.workspace = true hex.workspace = true +tokio.workspace = true +futures = { workspace = true, features = ["compat"] } [features] default = [] diff --git a/prover/crates/lib/keystore/src/keystore.rs b/prover/crates/lib/keystore/src/keystore.rs index 28ce989287cc..6225943e3cd7 100644 --- a/prover/crates/lib/keystore/src/keystore.rs +++ b/prover/crates/lib/keystore/src/keystore.rs @@ -1,7 +1,9 @@ use std::{ + collections::HashMap, fs::{self, File}, io::Read, path::{Path, PathBuf}, + sync::Arc, }; use anyhow::Context as _; @@ -14,7 +16,7 @@ use circuit_definitions::{ }, zkevm_circuits::scheduler::aux::BaseLayerCircuitType, }; -use serde::{Deserialize, Serialize}; +use serde::{de::DeserializeOwned, Deserialize, Serialize}; use zkevm_test_harness::data_source::{in_memory_data_source::InMemoryDataSource, SetupDataSource}; use zksync_basic_types::basic_fri_types::AggregationRound; use zksync_prover_fri_types::ProverServiceDataKey; @@ -24,6 +26,7 @@ use zksync_utils::env::Workspace; use crate::GoldilocksGpuProverSetupData; use crate::{GoldilocksProverSetupData, VkCommitments}; +#[derive(Debug, Clone, Copy)] pub enum ProverServiceDataType { VerificationKey, SetupData, @@ -209,7 +212,7 @@ impl Keystore { key: ProverServiceDataKey, hint: &FinalizationHintsForProver, ) -> anyhow::Result<()> { - let filepath = self.get_file_path(key.clone(), ProverServiceDataType::FinalizationHints); + let filepath = self.get_file_path(key, ProverServiceDataType::FinalizationHints); tracing::info!("saving finalization hints for {:?} to: {:?}", key, filepath); let serialized = @@ -267,7 +270,7 @@ impl Keystore { &self, key: ProverServiceDataKey, ) -> anyhow::Result { - let filepath = self.get_file_path(key.clone(), ProverServiceDataType::SetupData); + let filepath = self.get_file_path(key, ProverServiceDataType::SetupData); let mut file = File::open(filepath.clone()) .with_context(|| format!("Failed reading setup-data from path: {filepath:?}"))?; @@ -286,7 +289,7 @@ impl Keystore { &self, key: ProverServiceDataKey, ) -> anyhow::Result { - let filepath = self.get_file_path(key.clone(), ProverServiceDataType::SetupData); + let filepath = self.get_file_path(key, ProverServiceDataType::SetupData); let mut file = File::open(filepath.clone()) .with_context(|| format!("Failed reading setup-data from path: {filepath:?}"))?; @@ -301,7 +304,7 @@ impl Keystore { } pub fn is_setup_data_present(&self, key: &ProverServiceDataKey) -> bool { - Path::new(&self.get_file_path(key.clone(), ProverServiceDataType::SetupData)).exists() + Path::new(&self.get_file_path(*key, ProverServiceDataType::SetupData)).exists() } pub fn save_setup_data_for_circuit_type( @@ -309,7 +312,7 @@ impl Keystore { key: ProverServiceDataKey, serialized_setup_data: &Vec, ) -> anyhow::Result<()> { - let filepath = self.get_file_path(key.clone(), ProverServiceDataType::SetupData); + let filepath = self.get_file_path(key, ProverServiceDataType::SetupData); tracing::info!("saving {:?} setup data to: {:?}", key, filepath); std::fs::write(filepath.clone(), serialized_setup_data) .with_context(|| format!("Failed saving setup-data at path: {filepath:?}")) @@ -465,4 +468,49 @@ impl Keystore { pub fn save_commitments(&self, commitments: &VkCommitments) -> anyhow::Result<()> { Self::save_json_pretty(self.get_base_path().join("commitments.json"), &commitments) } + + /// Async loads mapping of all circuits to setup key, if successful + pub async fn load_all_setup_key_mapping( + &self, + ) -> anyhow::Result>> { + self.load_key_mapping(ProverServiceDataType::SetupData) + .await + } + + /// Async loads mapping of all circuits to finalization hints, if successful + pub async fn load_all_finalization_hints_mapping( + &self, + ) -> anyhow::Result>> { + self.load_key_mapping(ProverServiceDataType::FinalizationHints) + .await + } + + /// Async function that loads mapping from disk. + /// Whilst IO is not parallelizable, ser/de is. + async fn load_key_mapping( + &self, + data_type: ProverServiceDataType, + ) -> anyhow::Result>> { + let mut mapping: HashMap> = HashMap::new(); + + // Load each file in parallel. Note that FS access is not necessarily parallel, but + // deserialization is. For larger files, it makes a big difference. + // Note: `collect` is important, because iterators are lazy, and otherwise we won't actually + // spawn threads. + let handles: Vec<_> = ProverServiceDataKey::all() + .into_iter() + .map(|key| { + let filepath = self.get_file_path(key, data_type); + tokio::task::spawn_blocking(move || { + let data = Self::load_bincode_from_file(filepath)?; + anyhow::Ok((key, Arc::new(data))) + }) + }) + .collect(); + for handle in futures::future::join_all(handles).await { + let (key, setup_data) = handle.context("future loading key panicked")??; + mapping.insert(key, setup_data); + } + Ok(mapping) + } } diff --git a/prover/crates/lib/keystore/src/setup_data_generator.rs b/prover/crates/lib/keystore/src/setup_data_generator.rs index e69184ee9364..c4790d67feaa 100644 --- a/prover/crates/lib/keystore/src/setup_data_generator.rs +++ b/prover/crates/lib/keystore/src/setup_data_generator.rs @@ -33,7 +33,7 @@ pub fn generate_setup_data_common( let (finalization, vk) = if circuit.is_base_layer() { ( - Some(keystore.load_finalization_hints(circuit.clone())?), + Some(keystore.load_finalization_hints(circuit)?), data_source .get_base_layer_vk(circuit.circuit_id) .unwrap() @@ -41,7 +41,7 @@ pub fn generate_setup_data_common( ) } else { ( - Some(keystore.load_finalization_hints(circuit.clone())?), + Some(keystore.load_finalization_hints(circuit)?), data_source .get_recursion_layer_vk(circuit.circuit_id) .unwrap() @@ -86,7 +86,7 @@ pub trait SetupDataGenerator { ); return Ok("Skipped".to_string()); } - let serialized = self.generate_setup_data(circuit.clone())?; + let serialized = self.generate_setup_data(circuit)?; let digest = md5::compute(&serialized); if !dry_run { @@ -109,7 +109,7 @@ pub trait SetupDataGenerator { .iter() .map(|circuit| { let digest = self - .generate_and_write_setup_data(circuit.clone(), dry_run, recompute_if_missing) + .generate_and_write_setup_data(*circuit, dry_run, recompute_if_missing) .context(circuit.name()) .unwrap(); (circuit.name(), digest) diff --git a/prover/crates/lib/prover_dal/.sqlx/query-7d20c0bf35625185c1f6c675aa8fcddbb47c5e9965443f118f8edd7d562734a2.json b/prover/crates/lib/prover_dal/.sqlx/query-7d20c0bf35625185c1f6c675aa8fcddbb47c5e9965443f118f8edd7d562734a2.json new file mode 100644 index 000000000000..140b8f126750 --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-7d20c0bf35625185c1f6c675aa8fcddbb47c5e9965443f118f8edd7d562734a2.json @@ -0,0 +1,60 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE prover_jobs_fri\n SET\n status = 'in_progress',\n attempts = attempts + 1,\n updated_at = NOW(),\n processing_started_at = NOW(),\n picked_by = $3\n WHERE\n id = (\n SELECT\n id\n FROM\n prover_jobs_fri\n WHERE\n status = 'queued'\n AND protocol_version = $1\n AND protocol_version_patch = $2\n ORDER BY\n l1_batch_number ASC,\n aggregation_round ASC,\n circuit_id ASC,\n id ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n prover_jobs_fri.id,\n prover_jobs_fri.l1_batch_number,\n prover_jobs_fri.circuit_id,\n prover_jobs_fri.aggregation_round,\n prover_jobs_fri.sequence_number,\n prover_jobs_fri.depth,\n prover_jobs_fri.is_node_final_proof\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "l1_batch_number", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "circuit_id", + "type_info": "Int2" + }, + { + "ordinal": 3, + "name": "aggregation_round", + "type_info": "Int2" + }, + { + "ordinal": 4, + "name": "sequence_number", + "type_info": "Int4" + }, + { + "ordinal": 5, + "name": "depth", + "type_info": "Int4" + }, + { + "ordinal": 6, + "name": "is_node_final_proof", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Int4", + "Int4", + "Text" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + false + ] + }, + "hash": "7d20c0bf35625185c1f6c675aa8fcddbb47c5e9965443f118f8edd7d562734a2" +} diff --git a/prover/crates/lib/prover_dal/src/fri_prover_dal.rs b/prover/crates/lib/prover_dal/src/fri_prover_dal.rs index 1b6c43f4c177..1a3b8de0ce4b 100644 --- a/prover/crates/lib/prover_dal/src/fri_prover_dal.rs +++ b/prover/crates/lib/prover_dal/src/fri_prover_dal.rs @@ -51,6 +51,78 @@ impl FriProverDal<'_, '_> { drop(latency); } + /// Retrieves the next prover job to be proven. Called by WVGs. + /// + /// Prover jobs must be thought of as ordered. + /// Prover must prioritize proving such jobs that will make the chain move forward the fastest. + /// Current ordering: + /// - pick the lowest batch + /// - within the lowest batch, look at the lowest aggregation level (move up the proof tree) + /// - pick the same type of circuit for as long as possible, this maximizes GPU cache reuse + /// + /// NOTE: Most of this function is a duplicate of `get_next_job()`. Get next job will be deleted together with old prover. + pub async fn get_job( + &mut self, + protocol_version: ProtocolSemanticVersion, + picked_by: &str, + ) -> Option { + sqlx::query!( + r#" + UPDATE prover_jobs_fri + SET + status = 'in_progress', + attempts = attempts + 1, + updated_at = NOW(), + processing_started_at = NOW(), + picked_by = $3 + WHERE + id = ( + SELECT + id + FROM + prover_jobs_fri + WHERE + status = 'queued' + AND protocol_version = $1 + AND protocol_version_patch = $2 + ORDER BY + l1_batch_number ASC, + aggregation_round ASC, + circuit_id ASC, + id ASC + LIMIT + 1 + FOR UPDATE + SKIP LOCKED + ) + RETURNING + prover_jobs_fri.id, + prover_jobs_fri.l1_batch_number, + prover_jobs_fri.circuit_id, + prover_jobs_fri.aggregation_round, + prover_jobs_fri.sequence_number, + prover_jobs_fri.depth, + prover_jobs_fri.is_node_final_proof + "#, + protocol_version.minor as i32, + protocol_version.patch.0 as i32, + picked_by, + ) + .fetch_optional(self.storage.conn()) + .await + .expect("failed to get prover job") + .map(|row| FriProverJobMetadata { + id: row.id as u32, + block_number: L1BatchNumber(row.l1_batch_number as u32), + circuit_id: row.circuit_id as u8, + aggregation_round: AggregationRound::try_from(i32::from(row.aggregation_round)) + .unwrap(), + sequence_number: row.sequence_number as usize, + depth: row.depth as u16, + is_node_final_proof: row.is_node_final_proof, + }) + } + pub async fn get_next_job( &mut self, protocol_version: ProtocolSemanticVersion, diff --git a/prover/crates/lib/prover_fri_types/src/keys.rs b/prover/crates/lib/prover_fri_types/src/keys.rs index 2948fc5f84ed..26aa679b4a94 100644 --- a/prover/crates/lib/prover_fri_types/src/keys.rs +++ b/prover/crates/lib/prover_fri_types/src/keys.rs @@ -1,6 +1,8 @@ //! Different key types for object store. -use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; +use zksync_types::{ + basic_fri_types::AggregationRound, prover_dal::FriProverJobMetadata, L1BatchNumber, +}; /// Storage key for a [AggregationWrapper`]. #[derive(Debug, Clone, Copy)] @@ -27,6 +29,18 @@ pub struct FriCircuitKey { pub depth: u16, } +impl From for FriCircuitKey { + fn from(prover_job_metadata: FriProverJobMetadata) -> Self { + FriCircuitKey { + block_number: prover_job_metadata.block_number, + sequence_number: prover_job_metadata.sequence_number, + circuit_id: prover_job_metadata.circuit_id, + aggregation_round: prover_job_metadata.aggregation_round, + depth: prover_job_metadata.depth, + } + } +} + /// Storage key for a [`ZkSyncCircuit`]. #[derive(Debug, Clone, Copy)] pub struct CircuitKey<'a> { diff --git a/prover/crates/lib/prover_fri_types/src/lib.rs b/prover/crates/lib/prover_fri_types/src/lib.rs index c14bc1905639..4a8a1b3e4064 100644 --- a/prover/crates/lib/prover_fri_types/src/lib.rs +++ b/prover/crates/lib/prover_fri_types/src/lib.rs @@ -1,4 +1,4 @@ -use std::env; +use std::{env, time::Instant}; pub use circuit_definitions; use circuit_definitions::{ @@ -66,7 +66,7 @@ impl StoredObject for CircuitWrapper { serialize_using_bincode!(); } -#[derive(Clone, serde::Serialize, serde::Deserialize)] +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] pub enum FriProofWrapper { Base(ZkSyncBaseLayerProof), Recursive(ZkSyncRecursionLayerProof), @@ -98,6 +98,45 @@ impl WitnessVectorArtifacts { } } +/// This structure exists for the transition period between old prover and new prover. +/// We want the 2 codebases to coexist, without impacting each other. +/// Once old prover is deleted, this struct will be renamed to `WitnessVectorArtifacts`. +pub struct WitnessVectorArtifactsTemp { + pub witness_vector: WitnessVec, + pub prover_job: ProverJob, + pub time: Instant, +} + +impl WitnessVectorArtifactsTemp { + pub fn new( + witness_vector: WitnessVec, + prover_job: ProverJob, + time: Instant, + ) -> Self { + Self { + witness_vector, + prover_job, + time, + } + } +} + +/// Data structure containing the proof generated by the circuit prover. +#[derive(Debug)] +pub struct ProverArtifacts { + pub block_number: L1BatchNumber, + pub proof_wrapper: FriProofWrapper, +} + +impl ProverArtifacts { + pub fn new(block_number: L1BatchNumber, proof_wrapper: FriProofWrapper) -> Self { + Self { + block_number, + proof_wrapper, + } + } +} + #[derive(Clone, serde::Serialize, serde::Deserialize)] pub struct ProverJob { pub block_number: L1BatchNumber, @@ -122,12 +161,30 @@ impl ProverJob { } } -#[derive(Debug, Clone, Eq, PartialEq, Hash, serde::Serialize, serde::Deserialize)] +#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash, serde::Serialize, serde::Deserialize)] pub struct ProverServiceDataKey { pub circuit_id: u8, pub round: AggregationRound, } +impl ProverServiceDataKey { + /// Returns the crypto version of the setup key. + /// + /// Setup key is overloaded in our system. On one hand, it is used as identifier for figuring out which type of proofs are ready. + /// On the other hand, it is also a setup key from prover perspective. + /// The 2 overlap on all aggregation rounds, but NodeAggregation. + /// There's only 1 node key and that belongs to circuit 2. + pub fn crypto_setup_key(self) -> Self { + if let AggregationRound::NodeAggregation = self.round { + return Self { + circuit_id: 2, + round: self.round, + }; + } + self + } +} + fn get_round_for_recursive_circuit_type(circuit_type: u8) -> AggregationRound { match circuit_type { circuit_type if circuit_type == ZkSyncRecursionLayerStorageType::SchedulerCircuit as u8 => { @@ -186,6 +243,12 @@ impl ProverServiceDataKey { } } + pub fn all() -> Vec { + let mut keys = Self::all_boojum(); + keys.push(Self::snark()); + keys + } + pub fn is_base_layer(&self) -> bool { self.round == AggregationRound::BasicCircuits } From d99704141716940eb4d2987640357a5768a51c9f Mon Sep 17 00:00:00 2001 From: Alex Ostrovski Date: Fri, 20 Sep 2024 15:42:19 +0300 Subject: [PATCH 116/116] refactor(vm): Refactor L1 batch params provider (#2891) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What ❔ Refactors `L1BatchParamsProvider`, in particular its construction. ## Why ❔ To have more intuitive DevEx. ## Checklist - [x] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [x] Tests for the changes have been added / updated. - [x] Documentation comments have been added / updated. - [x] Code has been formatted via `zk fmt` and `zk lint`. --- core/lib/vm_executor/src/storage.rs | 40 ++++++++++++- core/node/node_sync/src/external_io.rs | 2 +- core/node/state_keeper/src/io/common/tests.rs | 58 ++++++------------- core/node/state_keeper/src/io/mempool.rs | 28 +++------ .../tee_verifier_input_producer/src/lib.rs | 20 ++----- core/node/vm_runner/src/storage.rs | 37 ++++-------- 6 files changed, 84 insertions(+), 101 deletions(-) diff --git a/core/lib/vm_executor/src/storage.rs b/core/lib/vm_executor/src/storage.rs index e39748786a30..a2369820a5b4 100644 --- a/core/lib/vm_executor/src/storage.rs +++ b/core/lib/vm_executor/src/storage.rs @@ -89,7 +89,15 @@ pub struct L1BatchParamsProvider { } impl L1BatchParamsProvider { - pub fn new() -> Self { + /// Creates a new provider. + pub async fn new(storage: &mut Connection<'_, Core>) -> anyhow::Result { + let mut this = Self::uninitialized(); + this.initialize(storage).await?; + Ok(this) + } + + /// Creates an uninitialized provider. Before use, it must be [`initialize`](Self::initialize())d. + pub fn uninitialized() -> Self { Self { snapshot: None } } @@ -323,4 +331,34 @@ impl L1BatchParamsProvider { chain_id, )) } + + /// Combines [`Self::load_first_l2_block_in_batch()`] and [Self::load_l1_batch_params()`]. Returns `Ok(None)` + /// iff the requested batch doesn't have any persisted blocks. + /// + /// Prefer using this method unless you need to manipulate / inspect the first block in the batch. + pub async fn load_l1_batch_env( + &self, + storage: &mut Connection<'_, Core>, + number: L1BatchNumber, + validation_computational_gas_limit: u32, + chain_id: L2ChainId, + ) -> anyhow::Result> { + let first_l2_block = self + .load_first_l2_block_in_batch(storage, number) + .await + .with_context(|| format!("failed loading first L2 block for L1 batch #{number}"))?; + let Some(first_l2_block) = first_l2_block else { + return Ok(None); + }; + + self.load_l1_batch_params( + storage, + &first_l2_block, + validation_computational_gas_limit, + chain_id, + ) + .await + .with_context(|| format!("failed loading params for L1 batch #{number}")) + .map(Some) + } } diff --git a/core/node/node_sync/src/external_io.rs b/core/node/node_sync/src/external_io.rs index b7b8930c4957..6075ff048bfd 100644 --- a/core/node/node_sync/src/external_io.rs +++ b/core/node/node_sync/src/external_io.rs @@ -49,7 +49,7 @@ impl ExternalIO { main_node_client: Box, chain_id: L2ChainId, ) -> anyhow::Result { - let l1_batch_params_provider = L1BatchParamsProvider::new(); + let l1_batch_params_provider = L1BatchParamsProvider::uninitialized(); Ok(Self { pool, l1_batch_params_provider, diff --git a/core/node/state_keeper/src/io/common/tests.rs b/core/node/state_keeper/src/io/common/tests.rs index 9ea699234f8f..b2a24acb4956 100644 --- a/core/node/state_keeper/src/io/common/tests.rs +++ b/core/node/state_keeper/src/io/common/tests.rs @@ -103,8 +103,7 @@ async fn waiting_for_l1_batch_params_with_genesis() { .await .unwrap(); - let mut provider = L1BatchParamsProvider::new(); - provider.initialize(&mut storage).await.unwrap(); + let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); let (hash, timestamp) = provider .wait_for_l1_batch_params(&mut storage, L1BatchNumber(0)) .await @@ -143,8 +142,7 @@ async fn waiting_for_l1_batch_params_after_snapshot_recovery() { let snapshot_recovery = prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), L2BlockNumber(42), &[]).await; - let mut provider = L1BatchParamsProvider::new(); - provider.initialize(&mut storage).await.unwrap(); + let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); let (hash, timestamp) = provider .wait_for_l1_batch_params(&mut storage, snapshot_recovery.l1_batch_number) .await @@ -192,8 +190,7 @@ async fn getting_first_l2_block_in_batch_with_genesis() { .await .unwrap(); - let mut provider = L1BatchParamsProvider::new(); - provider.initialize(&mut storage).await.unwrap(); + let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); let mut batches_and_l2_blocks = HashMap::from([ (L1BatchNumber(0), Ok(Some(L2BlockNumber(0)))), (L1BatchNumber(1), Ok(Some(L2BlockNumber(1)))), @@ -264,8 +261,7 @@ async fn getting_first_l2_block_in_batch_after_snapshot_recovery() { let snapshot_recovery = prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), L2BlockNumber(42), &[]).await; - let mut provider = L1BatchParamsProvider::new(); - provider.initialize(&mut storage).await.unwrap(); + let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); let mut batches_and_l2_blocks = HashMap::from([ (L1BatchNumber(1), Err(())), (snapshot_recovery.l1_batch_number, Err(())), @@ -321,24 +317,20 @@ async fn loading_pending_batch_with_genesis() { ) .await; - let mut provider = L1BatchParamsProvider::new(); - provider.initialize(&mut storage).await.unwrap(); - let first_l2_block_in_batch = provider - .load_first_l2_block_in_batch(&mut storage, L1BatchNumber(1)) - .await - .unwrap() - .expect("no first L2 block"); - assert_eq!(first_l2_block_in_batch.number(), L2BlockNumber(1)); - + let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); let (system_env, l1_batch_env) = provider - .load_l1_batch_params( + .load_l1_batch_env( &mut storage, - &first_l2_block_in_batch, + L1BatchNumber(1), u32::MAX, L2ChainId::default(), ) .await - .unwrap(); + .unwrap() + .expect("no L1 batch"); + + assert_eq!(l1_batch_env.first_l2_block.number, 1); + let pending_batch = load_pending_batch(&mut storage, system_env, l1_batch_env) .await .unwrap(); @@ -403,27 +395,17 @@ async fn loading_pending_batch_after_snapshot_recovery() { ) .await; - let mut provider = L1BatchParamsProvider::new(); - provider.initialize(&mut storage).await.unwrap(); - let first_l2_block_in_batch = provider - .load_first_l2_block_in_batch(&mut storage, snapshot_recovery.l1_batch_number + 1) - .await - .unwrap() - .expect("no first L2 block"); - assert_eq!( - first_l2_block_in_batch.number(), - snapshot_recovery.l2_block_number + 1 - ); - + let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); let (system_env, l1_batch_env) = provider - .load_l1_batch_params( + .load_l1_batch_env( &mut storage, - &first_l2_block_in_batch, + snapshot_recovery.l1_batch_number + 1, u32::MAX, L2ChainId::default(), ) .await - .unwrap(); + .unwrap() + .expect("no L1 batch"); let pending_batch = load_pending_batch(&mut storage, system_env, l1_batch_env) .await .unwrap(); @@ -466,8 +448,7 @@ async fn getting_batch_version_with_genesis() { .await .unwrap(); - let mut provider = L1BatchParamsProvider::new(); - provider.initialize(&mut storage).await.unwrap(); + let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); let version = provider .load_l1_batch_protocol_version(&mut storage, L1BatchNumber(0)) .await @@ -506,8 +487,7 @@ async fn getting_batch_version_after_snapshot_recovery() { let snapshot_recovery = prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), L2BlockNumber(42), &[]).await; - let mut provider = L1BatchParamsProvider::new(); - provider.initialize(&mut storage).await.unwrap(); + let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); let version = provider .load_l1_batch_protocol_version(&mut storage, snapshot_recovery.l1_batch_number) .await diff --git a/core/node/state_keeper/src/io/mempool.rs b/core/node/state_keeper/src/io/mempool.rs index 5734977538bd..108283122bce 100644 --- a/core/node/state_keeper/src/io/mempool.rs +++ b/core/node/state_keeper/src/io/mempool.rs @@ -97,30 +97,18 @@ impl StateKeeperIO for MempoolIO { L2BlockSealProcess::clear_pending_l2_block(&mut storage, cursor.next_l2_block - 1).await?; - let pending_l2_block_header = self + let Some((system_env, l1_batch_env)) = self .l1_batch_params_provider - .load_first_l2_block_in_batch(&mut storage, cursor.l1_batch) - .await - .with_context(|| { - format!( - "failed loading first L2 block for L1 batch #{}", - cursor.l1_batch - ) - })?; - let Some(pending_l2_block_header) = pending_l2_block_header else { - return Ok((cursor, None)); - }; - - let (system_env, l1_batch_env) = self - .l1_batch_params_provider - .load_l1_batch_params( + .load_l1_batch_env( &mut storage, - &pending_l2_block_header, + cursor.l1_batch, self.validation_computational_gas_limit, self.chain_id, ) - .await - .with_context(|| format!("failed loading params for L1 batch #{}", cursor.l1_batch))?; + .await? + else { + return Ok((cursor, None)); + }; let pending_batch_data = load_pending_batch(&mut storage, system_env, l1_batch_env) .await .with_context(|| { @@ -436,7 +424,7 @@ impl MempoolIO { l2_block_max_payload_size_sealer: L2BlockMaxPayloadSizeSealer::new(config), filter: L2TxFilter::default(), // ^ Will be initialized properly on the first newly opened batch - l1_batch_params_provider: L1BatchParamsProvider::new(), + l1_batch_params_provider: L1BatchParamsProvider::uninitialized(), fee_account, validation_computational_gas_limit: config.validation_computational_gas_limit, max_allowed_tx_gas_limit: config.max_allowed_l2_tx_gas_limit.into(), diff --git a/core/node/tee_verifier_input_producer/src/lib.rs b/core/node/tee_verifier_input_producer/src/lib.rs index 08382903ad6d..8a99aa07ae51 100644 --- a/core/node/tee_verifier_input_producer/src/lib.rs +++ b/core/node/tee_verifier_input_producer/src/lib.rs @@ -77,34 +77,24 @@ impl TeeVerifierInputProducer { .with_context(|| format!("header is missing for L1 batch #{l1_batch_number}"))? .unwrap(); - let mut l1_batch_params_provider = L1BatchParamsProvider::new(); - l1_batch_params_provider - .initialize(&mut connection) + let l1_batch_params_provider = L1BatchParamsProvider::new(&mut connection) .await .context("failed initializing L1 batch params provider")?; - let first_miniblock_in_batch = l1_batch_params_provider - .load_first_l2_block_in_batch(&mut connection, l1_batch_number) - .await - .with_context(|| { - format!("failed loading first miniblock in L1 batch #{l1_batch_number}") - })? - .with_context(|| format!("no miniblocks persisted for L1 batch #{l1_batch_number}"))?; - // In the state keeper, this value is used to reject execution. // All batches have already been executed by State Keeper. // This means we don't want to reject any execution, therefore we're using MAX as an allow all. let validation_computational_gas_limit = u32::MAX; let (system_env, l1_batch_env) = l1_batch_params_provider - .load_l1_batch_params( + .load_l1_batch_env( &mut connection, - &first_miniblock_in_batch, + l1_batch_number, validation_computational_gas_limit, l2_chain_id, ) - .await - .context("expected miniblock to be executed and sealed")?; + .await? + .with_context(|| format!("expected L1 batch #{l1_batch_number} to be sealed"))?; let used_contract_hashes = l1_batch_header .used_contract_hashes diff --git a/core/node/vm_runner/src/storage.rs b/core/node/vm_runner/src/storage.rs index cd746e4e1d97..2285455ba244 100644 --- a/core/node/vm_runner/src/storage.rs +++ b/core/node/vm_runner/src/storage.rs @@ -48,9 +48,8 @@ pub(crate) struct PostgresLoader { impl PostgresLoader { pub async fn new(pool: ConnectionPool, chain_id: L2ChainId) -> anyhow::Result { - let mut l1_batch_params_provider = L1BatchParamsProvider::new(); let mut conn = pool.connection_tagged("vm_runner").await?; - l1_batch_params_provider.initialize(&mut conn).await?; + let l1_batch_params_provider = L1BatchParamsProvider::new(&mut conn).await?; Ok(Self { pool, l1_batch_params_provider, @@ -151,12 +150,11 @@ impl VmRunnerStorage { chain_id: L2ChainId, ) -> anyhow::Result<(Self, StorageSyncTask)> { let mut conn = pool.connection_tagged(io.name()).await?; - let mut l1_batch_params_provider = L1BatchParamsProvider::new(); - l1_batch_params_provider - .initialize(&mut conn) + let l1_batch_params_provider = L1BatchParamsProvider::new(&mut conn) .await .context("Failed initializing L1 batch params provider")?; drop(conn); + let state = Arc::new(RwLock::new(State { rocksdb: None, l1_batch_number: L1BatchNumber(0), @@ -263,9 +261,7 @@ impl StorageSyncTask { state: Arc>, ) -> anyhow::Result { let mut conn = pool.connection_tagged(io.name()).await?; - let mut l1_batch_params_provider = L1BatchParamsProvider::new(); - l1_batch_params_provider - .initialize(&mut conn) + let l1_batch_params_provider = L1BatchParamsProvider::new(&mut conn) .await .context("Failed initializing L1 batch params provider")?; let target_l1_batch_number = io.latest_processed_batch(&mut conn).await?; @@ -398,29 +394,20 @@ pub(crate) async fn load_batch_execute_data( l1_batch_params_provider: &L1BatchParamsProvider, chain_id: L2ChainId, ) -> anyhow::Result> { - let first_l2_block_in_batch = l1_batch_params_provider - .load_first_l2_block_in_batch(conn, l1_batch_number) - .await - .with_context(|| { - format!( - "Failed loading first L2 block for L1 batch #{}", - l1_batch_number - ) - })?; - let Some(first_l2_block_in_batch) = first_l2_block_in_batch else { - return Ok(None); - }; - let (system_env, l1_batch_env) = l1_batch_params_provider - .load_l1_batch_params( + let Some((system_env, l1_batch_env)) = l1_batch_params_provider + .load_l1_batch_env( conn, - &first_l2_block_in_batch, + l1_batch_number, // `validation_computational_gas_limit` is only relevant when rejecting txs, but we // are re-executing so none of them should be rejected u32::MAX, chain_id, ) - .await - .with_context(|| format!("Failed loading params for L1 batch #{}", l1_batch_number))?; + .await? + else { + return Ok(None); + }; + let l2_blocks = conn .transactions_dal() .get_l2_blocks_to_execute_for_l1_batch(l1_batch_number)