diff --git a/.github/actions/buildah-action/action.yaml b/.github/actions/buildah-action/action.yaml new file mode 100644 index 000000000..f564279ed --- /dev/null +++ b/.github/actions/buildah-action/action.yaml @@ -0,0 +1,85 @@ +name: 'Buildah OCI Image' +description: 'A composite action to build OCI images using Buildah' + +inputs: + image_name: + description: 'The name of the image to build' + required: true + containerfile: + description: 'Path to the Containerfile' + required: true + context: + description: 'Build context directory' + default: '.' + volume: + description: 'Optional volume bind mount' + required: false + username: + description: 'Registry username' + required: false + password: + description: 'Registry password' + required: false + registry: + description: 'Registry URL' + required: false + build_args: + description: 'Optional build arguments for Buildah' + required: false + push: + description: 'Push the image to the registry' + required: false + tags: + description: 'Image tags' + required: false + +runs: + using: "composite" + steps: + - name: Setup environment + shell: bash + run: | + buildah -h + if [ $? -ne 0 ]; then + sudo apt-get update + sudo apt-get install -y buildah + fi + - name: Build the image + shell: bash + run: | + VOLUME_OPTION="" + if [ -n "${{ inputs.volume }}" ]; then + VOLUME_OPTION="--volume ${{ inputs.volume }}" + fi + + BUILD_ARGS_OPTION="" + if [ -n "${{ inputs.build_args }}" ]; then + while IFS= read -r line; do + if [ -n "$line" ]; then + BUILD_ARGS_OPTION="$BUILD_ARGS_OPTION --build-arg $line" + fi + done <<< "${{ inputs.build_args }}" + fi + + TAG_OPTIONS="" + if [ -n "${{ inputs.tags }}" ]; then + IFS=',' read -ra TAG_ARRAY <<< "${{ inputs.tags }}" + for TAG in "${TAG_ARRAY[@]}"; do + TAG_OPTIONS="$TAG_OPTIONS -t ${{ inputs.image_name }}:$TAG" + done + fi + + buildah bud $VOLUME_OPTION $BUILD_ARGS_OPTION $TAG_OPTIONS --format oci -f ${{ inputs.containerfile }} -t ${{ inputs.image_name }} ${{ inputs.context }} + + - name: Login to registry + shell: bash + if: ${{ inputs.push == 'true' && inputs.registry && inputs.username && inputs.password }} + run: | + echo ${{ inputs.password }} | buildah login -u ${{ inputs.username }} --password-stdin ${{ inputs.registry }} + + - name: Push the image + shell: bash + if: ${{ inputs.push == 'true' }} + run: | + buildah push ${{ inputs.image_name }} ${{ inputs.registry }}/${{ inputs.image_name }} + diff --git a/.github/actions/cargo-command/action.yaml b/.github/actions/cargo-command/action.yaml index c6ed7287c..85f4ffb7b 100644 --- a/.github/actions/cargo-command/action.yaml +++ b/.github/actions/cargo-command/action.yaml @@ -6,18 +6,26 @@ inputs: required: false default: 'build' package: - description: 'Limit execution to a specific package' + description: 'Limit execution to a specific package, assumes workspace if unset' required: false profile: description: 'Profile under which to run cargo command' required: false default: 'release' - features: + feature: description: 'Feature with which to run cargo command' required: false args: description: 'Additional argument to pass to cargo invocation' required: false + cache: + description: 'Whether to enable registry, index and compile output caching' + required: false + default: true + annotate: + description: 'Whether to provide errors as GitHub annotations' + required: false + default: true runs: using: "composite" steps: @@ -25,31 +33,34 @@ runs: shell: bash run: rustup show - name: Install cargo-cache + if: ${{ fromJSON(inputs.annotate) }} shell: bash run: cargo install cargo-action-fmt - name: Cache cargo registry and index + if: ${{ fromJSON(inputs.cache) }} uses: actions/cache@v4 with: path: | ~/.cargo/registry/index/ ~/.cargo/registry/cache/ ~/.cargo/git/db/ - key: cargo-cache-${{ inputs.package || 'workspace' }}-${{ inputs.features || 'default' }}-${{ hashFiles('Cargo.lock') }} + key: cargo-cache-${{ inputs.package || 'workspace' }}-${{ inputs.feature || 'default' }}-${{ hashFiles('Cargo.lock') }} restore-keys: | - cargo-cache-${{ inputs.package || 'workspace' }}-${{ inputs.features || 'default' }}- + cargo-cache-${{ inputs.package || 'workspace' }}-${{ inputs.feature || 'default' }}- cargo-cache-${{ inputs.package || 'workspace' }}-default- cargo-cache-workspace- - name: Cache cargo target folder + if: ${{ fromJSON(inputs.cache) }} uses: actions/cache@v4 with: path: target - key: cargo-${{ inputs.command }}-${{ inputs.profile }}-${{ inputs.package || 'workspace' }}-${{ inputs.features || 'default' }}-${{ hashFiles('Cargo.lock') }} + key: cargo-${{ inputs.command }}-${{ inputs.profile }}-${{ inputs.package || 'workspace' }}-${{ inputs.feature || 'default' }}-${{ hashFiles('Cargo.lock') }} restore-keys: | - cargo-${{ inputs.command }}-${{ inputs.profile }}-${{ inputs.package || 'workspace' }}-${{ inputs.features || 'default' }}- + cargo-${{ inputs.command }}-${{ inputs.profile }}-${{ inputs.package || 'workspace' }}-${{ inputs.feature || 'default' }}- cargo-${{ inputs.command }}-${{ inputs.profile }}-${{ inputs.package || 'workspace' }}-default- cargo-${{ inputs.command }}-${{ inputs.profile }}-workspace- - name: Run cargo ${{ inputs.command }} env: CARGO_TERM_COLOR: always shell: bash - run: cargo ${{ inputs.command }} ${{ inputs.package != '' && '--package' || '' }} ${{ inputs.package }} --profile '${{ inputs.profile }}' --features '${{ inputs.features }}' --message-format json ${{ inputs.args }} | cargo-action-fmt + run: cargo ${{ inputs.command }} ${{ inputs.package && '--package' || '--workspace' }} ${{ inputs.package }} --profile '${{ inputs.profile }}' --features '${{ inputs.feature }}' ${{ fromJSON(inputs.annotate) && '--message-format json' || '' }} ${{ inputs.args }} ${{ fromJSON(inputs.annotate) && '| cargo-action-fmt' || '' }} diff --git a/.github/workflows/merge-docker-chronicle.yaml b/.github/workflows/merge-docker-chronicle.yaml index 8de7e081f..ecc18cac6 100644 --- a/.github/workflows/merge-docker-chronicle.yaml +++ b/.github/workflows/merge-docker-chronicle.yaml @@ -39,7 +39,7 @@ jobs: build-binary: name: Build Docker image needs: ["set-tags"] - runs-on: ubuntu-latest + runs-on: ["self-hosted", "container"] strategy: fail-fast: false matrix: @@ -51,24 +51,26 @@ jobs: steps: - name: Fetch latest code uses: actions/checkout@v3 - - name: Set up QEMU - uses: docker/setup-qemu-action@v2 - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 - - name: Login to Docker Hub - uses: docker/login-action@v2 + - name: Create target dir + run: mkdir -p ${{ github.workspace }}/target/${{ matrix.profile }} + - name: Cache Rust deps + uses: actions/cache@v3 with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - name: Build and push - uses: docker/build-push-action@v4 + path: ${{ github.workspace }}/target/${{matrix.profile}} + key: docker-chronicle-${{ matrix.profile }}-${{ hashFiles('Cargo.lock', 'config/docker/Dockerfile.chronicle-release') }} + - name: Build OCI Image + uses: ./.github/actions/buildah-action with: + image_name: ${{ env.DOCKER_REPO }}-${{ matrix.image }}:${{ needs.set-tags.outputs.commit_hash8 }} + containerfile: ./config/docker/Dockerfile.chronicle-release context: . - push: true - tags: | - ${{ env.DOCKER_REPO }}-${{ matrix.image }}:${{ needs.set-tags.outputs.commit_hash8 }} - ${{ env.DOCKER_REPO }}:latest - file: config/docker/Dockerfile.chronicle-release - build-args: |- + volume: ${{ github.workspace }}/target/${{ matrix.profile }}:/build/target/${{ matrix.profile }} + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + registry: docker.io + push: 'true' + build_args: | VCS_REF=${{ needs.set-tags.outputs.commit_hash8 }} PROFILE=${{ matrix.profile }} + + diff --git a/.github/workflows/merge-docker-tester.yaml b/.github/workflows/merge-docker-tester.yaml index c29f22114..697b02f87 100644 --- a/.github/workflows/merge-docker-tester.yaml +++ b/.github/workflows/merge-docker-tester.yaml @@ -44,15 +44,6 @@ jobs: uses: actions/checkout@v3 with: submodules: recursive - - name: Set up QEMU - uses: docker/setup-qemu-action@v2 - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 - - name: Login to Docker Hub - uses: docker/login-action@v2 - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Setup Cargo uses: actions-rust-lang/setup-rust-toolchain@v1 with: @@ -68,11 +59,14 @@ jobs: - name: Copy contracts run: cp -r target/release/tester tester-bin - name: Build and push - uses: docker/build-push-action@v4 + uses: ./.github/actions/buildah-action with: + image_name: ${{ env.DOCKER_REPO }}:${{ needs.set-tags.outputs.commit_hash8 }} + containerfile: ./config/docker/Dockerfile.tester-release context: . - push: true - tags: | - ${{ env.DOCKER_REPO }}:${{ needs.set-tags.outputs.commit_hash8 }} - ${{ env.DOCKER_REPO }}:latest - file: config/docker/Dockerfile.tester-release + volume: ${{ github.workspace }}/target:/build/target + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + registry: docker.io + push: 'true' + diff --git a/.github/workflows/merge-docker-timenode.yaml b/.github/workflows/merge-docker-timenode.yaml index aef7b5f51..116f34b4b 100644 --- a/.github/workflows/merge-docker-timenode.yaml +++ b/.github/workflows/merge-docker-timenode.yaml @@ -28,7 +28,7 @@ jobs: build-binary: name: Build Docker image needs: ["set-tags"] - runs-on: ubuntu-latest + runs-on: ["self-hosted", "container"] strategy: fail-fast: false matrix: @@ -45,24 +45,26 @@ jobs: steps: - name: Fetch latest code uses: actions/checkout@v3 - - name: Set up QEMU - uses: docker/setup-qemu-action@v2 - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 - - name: Login to Docker Hub - uses: docker/login-action@v2 + - name: Create target dir + run: mkdir -p ${{ github.workspace }}/target/${{ matrix.profile }} + - name: Cache Rust deps + uses: actions/cache@v3 with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_TOKEN }} - - name: Build and push - uses: docker/build-push-action@v4 + path: ${{ github.workspace }}/target/${{matrix.profile}} + key: docker-timechain-${{ matrix.profile }}-${{ hashFiles('Cargo.lock', 'config/docker/Dockerfile.timenode-release') }} + - name: Build OCI Image + uses: ./.github/actions/buildah-action with: + image_name: ${{ env.DOCKER_REPO }}-${{ matrix.image }}:${{ needs.set-tags.outputs.commit_hash8 }} + containerfile: ./config/docker/Dockerfile.timenode-release context: . - push: true - tags: ${{ env.DOCKER_REPO }}-${{ matrix.image }}:${{ needs.set-tags.outputs.commit_hash8 }} - file: config/docker/Dockerfile.release - build-args: | - VCS_REF=${{ needs.set-tags.outputs.commit_hash8 }} + volume: ${{ github.workspace }}/target/${{ matrix.profile }}:/build/target/${{ matrix.profile }} + username: ${{ secrets.DOCKERHUB_USERNAME }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + registry: docker.io + push: 'true' + build_args: | PROFILE=${{ matrix.profile }} - FEATURES=${{ matrix.features }} + VCS_REF=${{ needs.set-tags.outputs.commit_hash8 }} BUILD_VARIANT=${{ matrix.image }} + FEATURES=${{ matrix.features }} diff --git a/.github/workflows/merge-pages-docs.yaml b/.github/workflows/merge-pages-docs.yaml index 9c3f9ee2e..0c4b8246e 100644 --- a/.github/workflows/merge-pages-docs.yaml +++ b/.github/workflows/merge-pages-docs.yaml @@ -36,7 +36,7 @@ jobs: uses: ./.github/actions/cargo-command with: command: doc - args: --workspace --no-deps --document-private-items + args: --no-deps --document-private-items - name: Assemble structure env: DOCS_HIDEOUT: an8ohgahmoot6ro8ieReib9micau0Oow diff --git a/.github/workflows/pr-build-node.yaml b/.github/workflows/pr-build-node.yaml deleted file mode 100644 index f6ed0fc01..000000000 --- a/.github/workflows/pr-build-node.yaml +++ /dev/null @@ -1,46 +0,0 @@ -name: Compile node -on: - pull_request: - paths: - - '.github/actions/cargo-command/**' - - '.github/workflows/pr-build-node.yaml' - - 'node/**' - - 'primitives/**' - - 'Cargo.toml' - - 'Cargo.lock' - - 'rust-toolchain.toml' -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true -jobs: - build-node: - runs-on: [self-hosted, general] - strategy: - fail-fast: false - matrix: - include: - - profile: production - features: default - - profile: testnet - features: default - - profile: testnet - features: development - steps: - - name: Checkout sources - uses: actions/checkout@v4 - with: - submodules: recursive - - name: Build timenode - uses: ./.github/actions/cargo-command - env: - SKIP_WASM_BUILD: true - with: - package: timechain-node - profile: ${{ matrix.profile }} - features: ${{ matrix.features }} - - name: Upload timechain node - uses: actions/upload-artifact@v4 - with: - name: timenode.${{ matrix.profile }}.${{ matrix.features }} - if-no-files-found: error - path: target/${{ matrix.profile }}/timechain-node diff --git a/.github/workflows/pr-build-tester.yaml b/.github/workflows/pr-build-tester.yaml deleted file mode 100644 index 0a0219e77..000000000 --- a/.github/workflows/pr-build-tester.yaml +++ /dev/null @@ -1,42 +0,0 @@ -name: Compile tester -on: - pull_request: - paths: - - '.github/actions/cargo-command/**' - - '.github/workflows/pr-build-tester.yaml' - - 'analog-gmp/**' - - 'config/subxt/**' - - 'primitives/**' - - 'tc-subxt/**' - - 'tester/**' - - 'Cargo.toml' - - 'Cargo.lock' - - 'rust-toolchain.toml' -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true -jobs: - build-tester: - runs-on: [self-hosted, general] - strategy: - fail-fast: false - matrix: - profile: - - production - - testnet - steps: - - name: Checkout sources - uses: actions/checkout@v4 - with: - submodules: recursive - - name: Build tester - uses: ./.github/actions/cargo-command - with: - package: tester - profile: ${{ matrix.profile }} - - name: Upload tester - uses: actions/upload-artifact@v4 - with: - name: tester.${{ matrix.profile }} - if-no-files-found: error - path: target/${{ matrix.profile }}/tester diff --git a/.github/workflows/pr-test-cargo.yaml b/.github/workflows/pr-test-cargo.yaml index 6e6bce5f2..0f81b8f80 100644 --- a/.github/workflows/pr-test-cargo.yaml +++ b/.github/workflows/pr-test-cargo.yaml @@ -1,4 +1,4 @@ -name: Check cargo tests +name: Check testsuite on: pull_request: paths: @@ -24,26 +24,21 @@ concurrency: jobs: test-cargo: runs-on: [self-hosted, general] - strategy: - fail-fast: false - matrix: - include: - - command: clippy - args: --all-targets --workspace --examples --tests -- --no-deps -D warnings - - command: test - args: --workspace --locked - - command: check - features: runtime-benchmarks - - command: check - features: try-runtime steps: - name: Checkout sources uses: actions/checkout@v4 with: submodules: recursive - - name: Run cargo command + - name: Build testsuite uses: ./.github/actions/cargo-command with: - command: ${{ matrix.command }} - features: ${{ matrix.features }} - args: ${{ matrix.args }} + command: test + feature: try-runtime + args: --no-run + - name: Run testsuite + uses: ./.github/actions/cargo-command + with: + command: test + feature: try-runtime + cache: false + annotate: false diff --git a/.github/workflows/pr-build-chronicle.yaml b/.github/workflows/pr-test-clippy.yaml similarity index 51% rename from .github/workflows/pr-build-chronicle.yaml rename to .github/workflows/pr-test-clippy.yaml index 877efbe41..52dcb4dbe 100644 --- a/.github/workflows/pr-build-chronicle.yaml +++ b/.github/workflows/pr-test-clippy.yaml @@ -1,15 +1,20 @@ -name: Compile chronicle +name: Check code linter on: pull_request: paths: - '.github/actions/cargo-command/**' - - '.github/workflows/pr-build-chronicle.yaml' + - '.github/workflows/pr-test-clippy.yaml' - 'chronicle/**' - 'config/subxt/**' - - 'lib/**' + - 'docs/**' + - 'node/**' + - 'pallets/**' - 'primitives/**' + - 'runtime/**' - 'tc-subxt/**' + - 'tester/**' - 'tss/**' + - 'utils/**' - 'Cargo.toml' - 'Cargo.lock' - 'rust-toolchain.toml' @@ -17,27 +22,15 @@ concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true jobs: - build-chronicle: + test-clippy: runs-on: [self-hosted, general] - strategy: - fail-fast: false - matrix: - profile: - - production - - testnet steps: - name: Checkout sources uses: actions/checkout@v4 with: submodules: recursive - - name: Build chronicle + - name: Run clippy linter uses: ./.github/actions/cargo-command with: - package: chronicle - profile: ${{ matrix.profile }} - - name: Upload chronicle - uses: actions/upload-artifact@v4 - with: - name: chronicle.${{ matrix.profile }} - if-no-files-found: error - path: target/${{ matrix.profile }}/chronicle + command: clippy + args: --all-targets --all-features -- -D warnings diff --git a/.github/workflows/pr-build-docs.yaml b/.github/workflows/pr-test-docs.yaml similarity index 86% rename from .github/workflows/pr-build-docs.yaml rename to .github/workflows/pr-test-docs.yaml index f96d8a218..077652e1c 100644 --- a/.github/workflows/pr-build-docs.yaml +++ b/.github/workflows/pr-test-docs.yaml @@ -1,9 +1,9 @@ -name: Generate docs +name: Check docs generation on: pull_request: paths: - '.github/actions/cargo-command/**' - - '.github/workflows/pr-build-docs.yaml' + - '.github/workflows/pr-test-docs.yaml' - 'chronicle/**' - 'config/subxt/**' - 'docs/**' @@ -30,7 +30,7 @@ jobs: uses: ./.github/actions/cargo-command with: command: doc - args: --workspace --no-deps --document-private-items + args: --no-deps --document-private-items - name: Upload docs uses: actions/upload-artifact@v4 with: diff --git a/.github/workflows/pr-test-rustfmt.yaml b/.github/workflows/pr-test-rustfmt.yaml index 34ed6f77f..833c07436 100644 --- a/.github/workflows/pr-test-rustfmt.yaml +++ b/.github/workflows/pr-test-rustfmt.yaml @@ -28,4 +28,4 @@ jobs: - name: Install rust toolchain run: rustup show - name: Run cargo fmt - run: cargo fmt --all -- --check + run: cargo fmt --all --check diff --git a/.github/workflows/pr-build-runtime.yaml b/.github/workflows/pr-update-metadata.yaml similarity index 87% rename from .github/workflows/pr-build-runtime.yaml rename to .github/workflows/pr-update-metadata.yaml index 92b259ef2..310dbf345 100644 --- a/.github/workflows/pr-build-runtime.yaml +++ b/.github/workflows/pr-update-metadata.yaml @@ -1,9 +1,9 @@ -name: Compile runtimes +name: Update metadata on: pull_request: paths: - '.github/actions/cargo-command/**' - - '.github/workflows/pr-build-runtime.yaml' + - '.github/workflows/pr-update-metadata.yaml' - 'pallets/**' - 'primitives/**' - 'runtime/**' @@ -21,10 +21,10 @@ jobs: matrix: include: - package: timechain-runtime - features: default + feature: default crate: timechain_runtime - package: timechain-runtime - features: development + feature: development crate: timechain_runtime steps: - name: Checkout sources @@ -35,18 +35,17 @@ jobs: uses: ./.github/actions/cargo-command with: package: ${{ matrix.package }} - profile: release - features: ${{ matrix.features }} + feature: ${{ matrix.feature }} - name: Upload timechain runtime uses: actions/upload-artifact@v4 with: - name: ${{ matrix.package }}.${{ matrix.features }}.wasm + name: ${{ matrix.package }}.${{ matrix.feature }}.wasm if-no-files-found: error path: target/release/wbuild/${{ matrix.package }}/${{ matrix.crate }}.compact.compressed.wasm - name: Upload timechain metadata uses: actions/upload-artifact@v4 with: - name: ${{ matrix.package }}.${{ matrix.features }}.scale + name: ${{ matrix.package }}.${{ matrix.feature }}.scale if-no-files-found: error path: target/release/wbuild/${{ matrix.package }}/${{ matrix.crate }}.metadata.scale update-metadata: @@ -86,6 +85,6 @@ jobs: echo "metadata changes detected: committing updated metadata" git config user.email "github@analog.one" git config user.name "Metadata Update Bot" - git commit -am "tc-subxt: Automated metadata update" + git commit -am "tc-subxt: Automatic metadata update" git push fi diff --git a/Cargo.lock b/Cargo.lock index 333437c33..74a72704b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -5893,7 +5893,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite 0.2.14", - "socket2 0.5.7", + "socket2 0.4.10", "tokio", "tower-service", "tracing", @@ -5985,7 +5985,7 @@ dependencies = [ "iana-time-zone-haiku", "js-sys", "wasm-bindgen", - "windows-core 0.52.0", + "windows-core 0.51.1", ] [[package]] @@ -8653,7 +8653,7 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "681030a937600a36906c185595136d26abfebb4aa9c65701cefcaf8578bb982b" dependencies = [ - "proc-macro-crate 3.1.0", + "proc-macro-crate 2.0.0", "proc-macro2", "quote", "syn 2.0.68", @@ -12886,7 +12886,7 @@ checksum = "22505a5c94da8e3b7c2996394d1c933236c4d743e81a410bcca4e6989fc066a4" dependencies = [ "bytes", "heck 0.5.0", - "itertools 0.12.1", + "itertools 0.11.0", "log", "multimap 0.10.0", "once_cell", @@ -12919,7 +12919,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81bddcdb20abf9501610992b6759a4c888aef7d1a7247ef75e2404275ac24af1" dependencies = [ "anyhow", - "itertools 0.12.1", + "itertools 0.11.0", "proc-macro2", "quote", "syn 2.0.68", @@ -13756,7 +13756,7 @@ dependencies = [ [[package]] name = "rosetta-client" version = "0.6.0" -source = "git+https://github.com/analog-labs/chain-connectors#bca13f762bfd86175244551cd77991708e703bcd" +source = "git+https://github.com/analog-labs/chain-connectors#901c7b1f2ca6d79f9689beb55cb7b3b6eb304ba4" dependencies = [ "anyhow", "async-trait", @@ -13785,7 +13785,7 @@ dependencies = [ [[package]] name = "rosetta-config-astar" version = "0.6.0" -source = "git+https://github.com/analog-labs/chain-connectors#bca13f762bfd86175244551cd77991708e703bcd" +source = "git+https://github.com/analog-labs/chain-connectors#901c7b1f2ca6d79f9689beb55cb7b3b6eb304ba4" dependencies = [ "anyhow", "rosetta-core", @@ -13795,7 +13795,7 @@ dependencies = [ [[package]] name = "rosetta-config-ethereum" version = "0.6.0" -source = "git+https://github.com/analog-labs/chain-connectors#bca13f762bfd86175244551cd77991708e703bcd" +source = "git+https://github.com/analog-labs/chain-connectors#901c7b1f2ca6d79f9689beb55cb7b3b6eb304ba4" dependencies = [ "anyhow", "const-hex", @@ -13815,7 +13815,7 @@ dependencies = [ [[package]] name = "rosetta-config-polkadot" version = "0.6.0" -source = "git+https://github.com/analog-labs/chain-connectors#bca13f762bfd86175244551cd77991708e703bcd" +source = "git+https://github.com/analog-labs/chain-connectors#901c7b1f2ca6d79f9689beb55cb7b3b6eb304ba4" dependencies = [ "anyhow", "rosetta-core", @@ -13826,7 +13826,7 @@ dependencies = [ [[package]] name = "rosetta-core" version = "0.6.0" -source = "git+https://github.com/analog-labs/chain-connectors#bca13f762bfd86175244551cd77991708e703bcd" +source = "git+https://github.com/analog-labs/chain-connectors#901c7b1f2ca6d79f9689beb55cb7b3b6eb304ba4" dependencies = [ "anyhow", "async-trait", @@ -13842,7 +13842,7 @@ dependencies = [ [[package]] name = "rosetta-crypto" version = "0.6.0" -source = "git+https://github.com/analog-labs/chain-connectors#bca13f762bfd86175244551cd77991708e703bcd" +source = "git+https://github.com/analog-labs/chain-connectors#901c7b1f2ca6d79f9689beb55cb7b3b6eb304ba4" dependencies = [ "anyhow", "bech32", @@ -13868,7 +13868,7 @@ dependencies = [ [[package]] name = "rosetta-ethereum-backend" version = "0.1.0" -source = "git+https://github.com/analog-labs/chain-connectors#bca13f762bfd86175244551cd77991708e703bcd" +source = "git+https://github.com/analog-labs/chain-connectors#901c7b1f2ca6d79f9689beb55cb7b3b6eb304ba4" dependencies = [ "async-trait", "auto_impl", @@ -13884,7 +13884,7 @@ dependencies = [ [[package]] name = "rosetta-ethereum-types" version = "0.2.0" -source = "git+https://github.com/analog-labs/chain-connectors#bca13f762bfd86175244551cd77991708e703bcd" +source = "git+https://github.com/analog-labs/chain-connectors#901c7b1f2ca6d79f9689beb55cb7b3b6eb304ba4" dependencies = [ "bytes", "const-hex", @@ -13918,7 +13918,7 @@ dependencies = [ [[package]] name = "rosetta-server" version = "0.6.0" -source = "git+https://github.com/analog-labs/chain-connectors#bca13f762bfd86175244551cd77991708e703bcd" +source = "git+https://github.com/analog-labs/chain-connectors#901c7b1f2ca6d79f9689beb55cb7b3b6eb304ba4" dependencies = [ "anyhow", "async-trait", @@ -13944,7 +13944,7 @@ dependencies = [ [[package]] name = "rosetta-server-astar" version = "0.6.0" -source = "git+https://github.com/analog-labs/chain-connectors#bca13f762bfd86175244551cd77991708e703bcd" +source = "git+https://github.com/analog-labs/chain-connectors#901c7b1f2ca6d79f9689beb55cb7b3b6eb304ba4" dependencies = [ "anyhow", "async-trait", @@ -13968,7 +13968,7 @@ dependencies = [ [[package]] name = "rosetta-server-ethereum" version = "0.6.0" -source = "git+https://github.com/analog-labs/chain-connectors#bca13f762bfd86175244551cd77991708e703bcd" +source = "git+https://github.com/analog-labs/chain-connectors#901c7b1f2ca6d79f9689beb55cb7b3b6eb304ba4" dependencies = [ "anyhow", "async-trait", @@ -13995,7 +13995,7 @@ dependencies = [ [[package]] name = "rosetta-server-polkadot" version = "0.6.0" -source = "git+https://github.com/analog-labs/chain-connectors#bca13f762bfd86175244551cd77991708e703bcd" +source = "git+https://github.com/analog-labs/chain-connectors#901c7b1f2ca6d79f9689beb55cb7b3b6eb304ba4" dependencies = [ "anyhow", "async-trait", @@ -14016,7 +14016,7 @@ dependencies = [ [[package]] name = "rosetta-tx-ethereum" version = "0.6.0" -source = "git+https://github.com/analog-labs/chain-connectors#bca13f762bfd86175244551cd77991708e703bcd" +source = "git+https://github.com/analog-labs/chain-connectors#901c7b1f2ca6d79f9689beb55cb7b3b6eb304ba4" dependencies = [ "anyhow", "rosetta-config-ethereum", @@ -14027,7 +14027,7 @@ dependencies = [ [[package]] name = "rosetta-tx-polkadot" version = "0.6.0" -source = "git+https://github.com/analog-labs/chain-connectors#bca13f762bfd86175244551cd77991708e703bcd" +source = "git+https://github.com/analog-labs/chain-connectors#901c7b1f2ca6d79f9689beb55cb7b3b6eb304ba4" dependencies = [ "anyhow", "blake2-rfc", @@ -14041,7 +14041,7 @@ dependencies = [ [[package]] name = "rosetta-types" version = "0.6.0" -source = "git+https://github.com/analog-labs/chain-connectors#bca13f762bfd86175244551cd77991708e703bcd" +source = "git+https://github.com/analog-labs/chain-connectors#901c7b1f2ca6d79f9689beb55cb7b3b6eb304ba4" dependencies = [ "serde", "serde_json", @@ -14050,7 +14050,7 @@ dependencies = [ [[package]] name = "rosetta-utils" version = "0.1.0" -source = "git+https://github.com/analog-labs/chain-connectors#bca13f762bfd86175244551cd77991708e703bcd" +source = "git+https://github.com/analog-labs/chain-connectors#901c7b1f2ca6d79f9689beb55cb7b3b6eb304ba4" dependencies = [ "bytes", "futures-timer", @@ -19937,7 +19937,6 @@ dependencies = [ "async-channel 1.9.0", "async-trait", "bincode", - "chronicle", "clap", "convert_case 0.6.0", "futures", @@ -19947,7 +19946,6 @@ dependencies = [ "polkadot-sdk", "serde", "serde_json", - "tc-subxt", "time-primitives", "timechain-runtime", "tokio", diff --git a/chronicle/Cargo.toml b/chronicle/Cargo.toml index 38c9c3897..698edd07e 100644 --- a/chronicle/Cargo.toml +++ b/chronicle/Cargo.toml @@ -49,5 +49,3 @@ surf = { version = "2.3.2", default-features = false, features = [ "h1-client-ru [features] default = [] -testnet = [ "tc-subxt/testnet" ] -development = [ "tc-subxt/development" ] diff --git a/chronicle/src/mock.rs b/chronicle/src/mock.rs index 432650b26..fc3aa23fa 100644 --- a/chronicle/src/mock.rs +++ b/chronicle/src/mock.rs @@ -13,7 +13,7 @@ use time_primitives::{ sp_core, AccountId, Balance, BlockHash, BlockNumber, ChainName, ChainNetwork, Commitment, Function, MemberStatus, NetworkId, Payload, PeerId, ProofOfKnowledge, PublicKey, Runtime, ShardId, ShardStatus, TaskDescriptor, TaskExecution, TaskId, TaskPhase, TaskResult, TssHash, - TssId, TssSignature, TssSigningRequest, + TssSignature, TssSigningRequest, }; use tokio::time::Duration; use tss::{sum_commitments, VerifiableSecretSharingCommitment, VerifyingKey}; @@ -207,7 +207,7 @@ impl Mock { if let Some(mut tss) = self.tss.clone() { let (tx, rx) = oneshot::channel(); tss.send(TssSigningRequest { - request_id: TssId::new(task_id, task_phase), + request_id: TaskExecution::new(task_id, task_phase), shard_id, block_number, data: payload.to_vec(), diff --git a/chronicle/src/network/mod.rs b/chronicle/src/network/mod.rs index c3c50e0b2..00b756fde 100644 --- a/chronicle/src/network/mod.rs +++ b/chronicle/src/network/mod.rs @@ -8,13 +8,13 @@ use std::ops::Deref; use std::path::PathBuf; use std::pin::Pin; use std::sync::Arc; -use time_primitives::{BlockNumber, ShardId, TssId}; +use time_primitives::{BlockNumber, ShardId, TaskExecution}; mod protocol; pub use time_primitives::PeerId; -pub type TssMessage = tss::TssMessage; +pub type TssMessage = tss::TssMessage; pub const PROTOCOL_NAME: &str = "/analog-labs/chronicle/1"; diff --git a/chronicle/src/shards/service.rs b/chronicle/src/shards/service.rs index 5734d4df3..a725bbfa3 100644 --- a/chronicle/src/shards/service.rs +++ b/chronicle/src/shards/service.rs @@ -17,7 +17,8 @@ use std::{ task::Poll, }; use time_primitives::{ - BlockHash, BlockNumber, Runtime, ShardId, ShardStatus, TssId, TssSignature, TssSigningRequest, + BlockHash, BlockNumber, Runtime, ShardId, ShardStatus, TaskExecution, TssSignature, + TssSigningRequest, }; use tokio::time::{sleep, Duration}; use tracing::{event, span, Level, Span}; @@ -41,8 +42,8 @@ pub struct TimeWorker { tss_states: HashMap, executor_states: HashMap, messages: BTreeMap>, - requests: BTreeMap)>>, - channels: HashMap>, + requests: BTreeMap)>>, + channels: HashMap>, #[allow(clippy::type_complexity)] outgoing_requests: FuturesUnordered< Pin)> + Send + 'static>>, @@ -193,27 +194,6 @@ where self.poll_actions(&span, shard_id, block_number).await; } } - while let Some(n) = self.messages.keys().copied().next() { - if n > block_number { - break; - } - for (shard_id, peer_id, msg) in self.messages.remove(&n).unwrap() { - let Some(tss) = self.tss_states.get_mut(&shard_id) else { - event!( - target: TW_LOG, - parent: &span, - Level::INFO, - shard_id, - "dropping message {} from {:?}", - msg, - peer_id, - ); - continue; - }; - tss.on_message(peer_id, msg)?; - self.poll_actions(&span, shard_id, n).await; - } - } for shard_id in shards { if self.substrate.get_shard_status(block, shard_id).await? != ShardStatus::Online { continue; @@ -254,6 +234,27 @@ where tss.on_start(session); } } + while let Some(n) = self.messages.keys().copied().next() { + if n > block_number { + break; + } + for (shard_id, peer_id, msg) in self.messages.remove(&n).unwrap() { + let Some(tss) = self.tss_states.get_mut(&shard_id) else { + event!( + target: TW_LOG, + parent: &span, + Level::INFO, + shard_id, + "dropping message {} from {:?}", + msg, + peer_id, + ); + continue; + }; + tss.on_message(peer_id, msg)?; + self.poll_actions(&span, shard_id, n).await; + } + } Ok(()) } diff --git a/chronicle/src/shards/tss.rs b/chronicle/src/shards/tss.rs index e89d86884..113e4dd5a 100644 --- a/chronicle/src/shards/tss.rs +++ b/chronicle/src/shards/tss.rs @@ -3,24 +3,24 @@ use anyhow::Result; use sha3::{Digest, Sha3_256}; use std::collections::BTreeSet; use std::path::{Path, PathBuf}; -pub use time_primitives::TssId; +pub use time_primitives::TaskExecution; pub use tss::{ ProofOfKnowledge, Signature, SigningKey, VerifiableSecretSharingCommitment, VerifyingKey, }; use valuable::Valuable; -pub type TssMessage = tss::TssMessage; +pub type TssMessage = tss::TssMessage; #[derive(Clone)] pub enum TssAction { Send(Vec<(PeerId, TssMessage)>), Commit(VerifiableSecretSharingCommitment, ProofOfKnowledge), PublicKey(VerifyingKey), - Signature(TssId, [u8; 32], Signature), + Signature(TaskExecution, [u8; 32], Signature), } pub enum Tss { - Enabled(tss::Tss), + Enabled(tss::Tss), Disabled(SigningKey, Option, bool), } @@ -115,14 +115,14 @@ impl Tss { } } - pub fn on_start(&mut self, request_id: TssId) { + pub fn on_start(&mut self, request_id: TaskExecution) { match self { Self::Enabled(tss) => tss.on_start(request_id), Self::Disabled(_, _, _) => {}, } } - pub fn on_sign(&mut self, request_id: TssId, data: Vec) { + pub fn on_sign(&mut self, request_id: TaskExecution, data: Vec) { match self { Self::Enabled(tss) => tss.on_sign(request_id, data), Self::Disabled(key, actions, _) => { @@ -132,7 +132,7 @@ impl Tss { } } - pub fn on_complete(&mut self, request_id: TssId) { + pub fn on_complete(&mut self, request_id: TaskExecution) { match self { Self::Enabled(tss) => tss.on_complete(request_id), Self::Disabled(_, _, _) => {}, diff --git a/chronicle/src/tasks/executor.rs b/chronicle/src/tasks/executor.rs index b5385a9a7..4c2d9deac 100644 --- a/chronicle/src/tasks/executor.rs +++ b/chronicle/src/tasks/executor.rs @@ -6,10 +6,10 @@ use futures::Stream; use std::{collections::BTreeMap, pin::Pin}; use time_primitives::{ BlockHash, BlockNumber, Function, GmpParams, Message, NetworkId, Runtime, ShardId, - TaskExecution, TaskPhase, TssId, + TaskExecution, TaskPhase, }; use tokio::task::JoinHandle; -use tracing::{event, span, Level, Span}; +use tracing::{event, span, Level}; /// Set of properties we need to run our gadget #[derive(Clone)] @@ -61,23 +61,9 @@ where block_number: BlockNumber, shard_id: ShardId, target_block_height: u64, - ) -> Result<(Vec, Vec)> { - let span = span!( - target: TW_LOG, - Level::DEBUG, - "process_tasks", - block = block_hash.to_string(), - block_number, - ); - TaskExecutor::process_tasks( - self, - &span, - block_hash, - block_number, - shard_id, - target_block_height, - ) - .await + ) -> Result<(Vec, Vec)> { + TaskExecutor::process_tasks(self, block_hash, block_number, shard_id, target_block_height) + .await } } @@ -105,15 +91,13 @@ where /// preprocesses the task before sending it for execution in task_spawner.rs pub async fn process_tasks( &mut self, - span: &Span, block_hash: BlockHash, block_number: BlockNumber, shard_id: ShardId, target_block_height: u64, - ) -> Result<(Vec, Vec)> { + ) -> Result<(Vec, Vec)> { let span = span!( target: TW_LOG, - parent: span, Level::DEBUG, "process_tasks", block = block_hash.to_string(), @@ -123,7 +107,7 @@ where let mut start_sessions = vec![]; let tasks = self.substrate.get_shard_tasks(block_hash, shard_id).await?; tracing::debug!("debug_latency Current Tasks Under processing: {:?}", tasks); - for executable_task in tasks.iter().clone() { + for executable_task in tasks.iter().copied() { let task_id = executable_task.task_id; event!( target: TW_LOG, @@ -132,7 +116,7 @@ where task_id, "task in execution", ); - if self.running_tasks.contains_key(executable_task) { + if self.running_tasks.contains_key(&executable_task) { continue; } // gets task details @@ -323,7 +307,7 @@ where // Metrics: Increase number of running tasks self.task_counter_metric.inc(&phase, &function_metric_clone); let counter = self.task_counter_metric.clone(); - start_sessions.push(TssId::new(task_id, phase)); + start_sessions.push(executable_task); let handle = tokio::task::spawn(async move { match task.await { @@ -353,12 +337,12 @@ where // Metrics: Decrease number of running tasks counter.dec(&phase, &function_metric_clone); }); - self.running_tasks.insert(executable_task.clone(), handle); + self.running_tasks.insert(executable_task, handle); } let mut completed_sessions = Vec::with_capacity(self.running_tasks.len()); // remove from running task if task is completed or we dont receive anymore from pallet - self.running_tasks.retain(|x, handle| { - if tasks.contains(x) { + self.running_tasks.retain(|executable_task, handle| { + if tasks.contains(executable_task) { true } else { if !handle.is_finished() { @@ -366,12 +350,12 @@ where target: TW_LOG, parent: &span, Level::DEBUG, - x.task_id, + executable_task.task_id, "task aborted", ); handle.abort(); } - completed_sessions.push(TssId::new(x.task_id, x.phase)); + completed_sessions.push(*executable_task); false } }); @@ -435,14 +419,8 @@ mod tests { while let Some((block_hash, block_number)) = mock.finality_notification_stream().next().await { - let span = span!( - Level::DEBUG, - "task_executor_smoke", - block = block_hash.to_string(), - block_number, - ); task_executor - .process_tasks(&span, block_hash, block_number, shard, target_block_height) + .process_tasks(block_hash, block_number, shard, target_block_height) .await .unwrap(); tracing::info!("Watching for result"); diff --git a/chronicle/src/tasks/mod.rs b/chronicle/src/tasks/mod.rs index 431ec8419..62a8f1d47 100644 --- a/chronicle/src/tasks/mod.rs +++ b/chronicle/src/tasks/mod.rs @@ -2,7 +2,9 @@ use anyhow::Result; use async_trait::async_trait; use futures::{Future, Stream}; use std::pin::Pin; -use time_primitives::{BlockHash, BlockNumber, Function, NetworkId, ShardId, TaskId, TssId}; +use time_primitives::{ + BlockHash, BlockNumber, Function, NetworkId, ShardId, TaskExecution, TaskId, +}; pub mod executor; pub mod spawner; @@ -46,5 +48,5 @@ pub trait TaskExecutor { block_number: BlockNumber, shard_id: ShardId, target_block_height: u64, - ) -> Result<(Vec, Vec)>; + ) -> Result<(Vec, Vec)>; } diff --git a/chronicle/src/tasks/spawner.rs b/chronicle/src/tasks/spawner.rs index f83ac889b..7f4f92b2c 100644 --- a/chronicle/src/tasks/spawner.rs +++ b/chronicle/src/tasks/spawner.rs @@ -17,8 +17,8 @@ use std::{ task::{Context, Poll}, }; use time_primitives::{ - BlockNumber, Function, NetworkId, Payload, Runtime, ShardId, TaskId, TaskPhase, TaskResult, - TssHash, TssId, TssSignature, TssSigningRequest, + BlockNumber, Function, NetworkId, Payload, Runtime, ShardId, TaskExecution, TaskId, TaskPhase, + TaskResult, TssHash, TssSignature, TssSigningRequest, }; use time_primitives::{IGateway, Msg}; use tokio::sync::Mutex; @@ -201,7 +201,7 @@ where self.tss .clone() .send(TssSigningRequest { - request_id: TssId::new(task_id, task_phase), + request_id: TaskExecution::new(task_id, task_phase), shard_id, block_number, data: payload.to_vec(), diff --git a/config/docker/Dockerfile.chronicle-release b/config/docker/Dockerfile.chronicle-release index e973b89af..83391e378 100644 --- a/config/docker/Dockerfile.chronicle-release +++ b/config/docker/Dockerfile.chronicle-release @@ -4,9 +4,13 @@ ARG PROFILE WORKDIR /build +# The /build/bin folder will be used for any artifacts needed for later stages +RUN mkdir /build/bin + COPY . . RUN CARGO_HTTP_CHECK_REVOKE=false cargo build --profile $PROFILE --locked -p chronicle +RUN mv /build/target/$PROFILE/chronicle /build/bin/chronicle FROM ubuntu:22.04 @@ -20,5 +24,5 @@ LABEL description="Multistage Dockerfile for building Analog Chronicle" \ one.analog.image.source="https://github.com/Analog-Labs/timechain" \ one.analog.image.commit="${VCS_REF}" -COPY --from=builder /build/target/$PROFILE/chronicle chronicle +COPY --from=builder /build/bin/chronicle chronicle ENTRYPOINT ["/chronicle"] \ No newline at end of file diff --git a/config/docker/Dockerfile.release b/config/docker/Dockerfile.timenode-release similarity index 89% rename from config/docker/Dockerfile.release rename to config/docker/Dockerfile.timenode-release index a39f89827..e988e0d3d 100644 --- a/config/docker/Dockerfile.release +++ b/config/docker/Dockerfile.timenode-release @@ -14,12 +14,12 @@ WORKDIR /build COPY . . RUN cargo build -p timechain-node --locked --profile $PROFILE --features $FEATURES +RUN mv /build/target/$PROFILE/timechain-node /build/timechain-node ### Release stage # Copies the binary from the builder stage into a fresh scratch image FROM ubuntu:22.04 -ARG PROFILE ARG VCS_REF ## TODO: change repo name to timechain @@ -34,5 +34,5 @@ LABEL description="Multistage Dockerfile for building Analog Timechain" \ one.analog.image.source="https://github.com/Analog-Labs/timechain" \ one.analog.image.commit="${VCS_REF}" -COPY --from=builder /build/target/$PROFILE/timechain-node timechain-node +COPY --from=builder /build/timechain-node timechain-node ENTRYPOINT ["/timechain-node"] \ No newline at end of file diff --git a/docs/src/lib.rs b/docs/src/lib.rs index 7546546d8..7a3f8480d 100644 --- a/docs/src/lib.rs +++ b/docs/src/lib.rs @@ -6,7 +6,7 @@ //! ## The Timechain Protocol //! //! The Analog Timechain is a substrate based solochain. It utilizes -//! Babe and Grandpa to power its [`timechain_node`] and [`timechain_runtime`]. +//! Babe and Grandpa to power its `timechain_node` and [`timechain_runtime`]. //! //! On top of that it runs the Timechain protocol to attest and relay data //! between various chains. This protocol is executed by shards of [`chronicle`] nodes. diff --git a/flake.nix b/flake.nix index 551328d67..d57ff716e 100644 --- a/flake.nix +++ b/flake.nix @@ -35,7 +35,7 @@ # Download associated rust toolchain from mozilla toolchain = fpkgs.fromToolchainName { name = toml.toolchain.channel; - sha256 = "opUgs6ckUQCyDxcB9Wy51pqhd0MPGHUVbwRKKPGiwZU="; + sha256 = "Ngiz76YP4HTY75GGdH2P+APE/DEIx2R/Dn+BwwOyzZU="; }; # Determine profile or use default diff --git a/node/Cargo.toml b/node/Cargo.toml index a0b67d6b2..b716b6bf2 100644 --- a/node/Cargo.toml +++ b/node/Cargo.toml @@ -101,10 +101,8 @@ polkadot-sdk = { workspace = true, features = [ jsonrpsee = { version = "0.22.1", features = [ "server" ] } # node's local dependencies -chronicle = { path = "../chronicle", optional = true } time-primitives = { path = "../primitives" } timechain-runtime = { path = "../runtime" } -tc-subxt = { path = "../tc-subxt", optional = true } # additional command line interfaces #try-runtime-core = { git = "https://github.com/paritytech/try-runtime-cli", tag = "v0.7.0", optional = true } @@ -131,5 +129,3 @@ try-runtime = [ development = [ "timechain-runtime/development" ] -# runs chronicle with node -chronicle = ["dep:chronicle", "dep:tc-subxt"] diff --git a/node/src/chronicle/mod.rs b/node/src/chronicle/mod.rs deleted file mode 100644 index de8c03256..000000000 --- a/node/src/chronicle/mod.rs +++ /dev/null @@ -1,56 +0,0 @@ -use anyhow::Result; -use sc_client_api::{BlockchainEvents, HeaderBackend}; -use sc_network::request_responses::IncomingRequest; -use sc_network::{NetworkRequest, NetworkSigner}; -use sc_transaction_pool_api::OffchainTransactionPoolFactory; -use sp_api::ProvideRuntimeApi; -use sp_runtime::traits::Block; -use std::sync::Arc; -use time_primitives::{ - BlockHash, MembersApi, NetworksApi, ShardsApi, SubmitTransactionApi, TasksApi, -}; - -mod network; -mod runtime; - -pub use network::protocol_config; - -pub struct ChronicleParams { - pub client: Arc, - pub runtime: Arc, - pub tx_pool: OffchainTransactionPoolFactory, - pub network: Option<(N, async_channel::Receiver)>, - pub config: chronicle::ChronicleConfig, -} - -pub async fn run_node_with_chronicle(params: ChronicleParams) -> Result<()> -where - B: Block, - C: BlockchainEvents + HeaderBackend + 'static, - R: ProvideRuntimeApi + Send + Sync + 'static, - R::Api: MembersApi + NetworksApi + ShardsApi + TasksApi + SubmitTransactionApi, - N: NetworkRequest + NetworkSigner + Send + Sync + 'static, -{ - let (network, net_request) = if let Some((network, incoming)) = params.network { - network::create_substrate_network(network, incoming).await? - } else { - chronicle::create_iroh_network(params.config.network_config()).await? - }; - - let tx_client = tc_subxt::SubxtClient::get_client(¶ms.config.timechain_url).await?; - let tx_submitter = runtime::SubstrateTxSubmitter::new( - params.tx_pool.clone(), - params.client.clone(), - params.runtime.clone(), - tx_client, - ); - let subxt_client = tc_subxt::SubxtClient::with_keyfile( - ¶ms.config.timechain_url, - ¶ms.config.timechain_keyfile, - tx_submitter, - ) - .await?; - let substrate = runtime::Substrate::new(params.client, params.runtime, subxt_client); - - chronicle::run_chronicle(params.config, network, net_request, substrate).await -} diff --git a/node/src/chronicle/network.rs b/node/src/chronicle/network.rs deleted file mode 100644 index 00fd8211a..000000000 --- a/node/src/chronicle/network.rs +++ /dev/null @@ -1,132 +0,0 @@ -use anyhow::Result; -use chronicle::{Message, Network, PeerId, PROTOCOL_NAME}; -use futures::channel::oneshot; -use futures::stream::BoxStream; -use futures::{Future, Stream, StreamExt}; -use sc_network::config::{IncomingRequest, RequestResponseConfig}; -use sc_network::multiaddr::multihash::MultihashGeneric as Multihash; -use sc_network::request_responses::OutgoingResponse; -use sc_network::{IfDisconnected, NetworkRequest, NetworkSigner, PublicKey}; -use std::pin::Pin; -use std::sync::Arc; -use std::task::{Context, Poll}; -use std::time::Duration; - -pub fn protocol_config(tx: async_channel::Sender) -> RequestResponseConfig { - RequestResponseConfig { - name: PROTOCOL_NAME.into(), - fallback_names: vec![], - max_request_size: 1024 * 1024, - max_response_size: 0, - request_timeout: Duration::from_secs(3), - inbound_queue: Some(tx), - } -} - -pub struct SubstrateNetwork { - network: N, - peer_id: PeerId, -} - -impl SubstrateNetwork -where - N: NetworkRequest + NetworkSigner, -{ - pub fn new(network: N) -> Result { - let public_key = network.sign_with_local_identity([])?.public_key; - let peer_id = public_key.clone().try_into_ed25519()?.to_bytes(); - Ok(Self { network, peer_id }) - } -} - -impl Network for SubstrateNetwork { - fn peer_id(&self) -> PeerId { - self.peer_id - } - - fn send( - &self, - peer_id: PeerId, - msg: Message, - ) -> Pin> + Send>> { - let bytes = bincode::serialize(&msg).unwrap(); - let (tx, rx) = oneshot::channel(); - let peer_id = sc_network::PeerId::from_public_key( - &sc_network::config::ed25519::PublicKey::try_from_bytes(&peer_id).unwrap().into(), - ); - self.network.start_request( - peer_id, - PROTOCOL_NAME.into(), - bytes, - None, - tx, - IfDisconnected::TryConnect, - ); - Box::pin(async move { - let response = rx.await??; - Ok(bincode::deserialize(&response.0)?) - }) - } -} - -fn parse_peer_id(peer: sc_network::PeerId) -> Option { - let mh = Multihash::from(peer); - if mh.code() != 0 { - return None; - } - let p = PublicKey::try_decode_protobuf(mh.digest()).ok()?; - let p = p.try_into_ed25519().ok()?; - Some(p.to_bytes()) -} - -pub struct SubstrateNetworkAdapter(async_channel::Receiver); - -impl SubstrateNetworkAdapter { - pub fn new(rx: async_channel::Receiver) -> Self { - Self(rx) - } -} - -impl Stream for SubstrateNetworkAdapter { - type Item = (PeerId, Message); - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - loop { - match Pin::new(&mut self.0).poll_next(cx) { - Poll::Ready(Some(IncomingRequest { - peer, - payload, - pending_response, - })) => { - // Don't try to do anything other than reply immediately as - // substrate will close the substream. - let _ = pending_response.send(OutgoingResponse { - result: Ok(vec![]), - reputation_changes: vec![], - sent_feedback: None, - }); - let Some(peer) = parse_peer_id(peer) else { - tracing::info!("invalid peer id"); - continue; - }; - if let Ok(msg) = bincode::deserialize(&payload) { - return Poll::Ready(Some((peer, msg))); - } else { - tracing::info!("invalid message"); - } - }, - Poll::Ready(None) => return Poll::Ready(None), - Poll::Pending => return Poll::Pending, - } - } - } -} - -pub async fn create_substrate_network( - network: N, - incoming: async_channel::Receiver, -) -> Result<(Arc, BoxStream<'static, (PeerId, Message)>)> { - let network = Arc::new(SubstrateNetwork::new(network)?) as Arc; - let incoming = SubstrateNetworkAdapter::new(incoming).boxed(); - Ok((network, incoming)) -} diff --git a/node/src/chronicle/runtime.rs b/node/src/chronicle/runtime.rs deleted file mode 100644 index 69072efac..000000000 --- a/node/src/chronicle/runtime.rs +++ /dev/null @@ -1,283 +0,0 @@ -use anyhow::Result; -use async_trait::async_trait; -use futures::stream::{self, Stream, StreamExt}; -use sc_client_api::{BlockchainEvents, HeaderBackend}; -use sc_transaction_pool_api::OffchainTransactionPoolFactory; -use sp_api::{ApiExt, ApiRef, ProvideRuntimeApi}; -use sp_core::H256; -use sp_runtime::traits::{Block, Header}; -use std::marker::PhantomData; -use std::pin::Pin; -use std::sync::Arc; -use tc_subxt::{ - OnlineClient, PolkadotConfig, StreamOfResults, SubxtClient, TxProgress, TxSubmitter, -}; -use time_primitives::{ - AccountId, BlockHash, BlockNumber, Commitment, MemberStatus, MembersApi, NetworkId, - NetworksApi, PeerId, ProofOfKnowledge, PublicKey, Runtime, ShardId, ShardStatus, ShardsApi, - SubmitTransactionApi, TaskDescriptor, TaskError, TaskExecution, TaskId, TaskResult, TasksApi, - TssSignature, -}; - -pub struct Substrate { - _block: PhantomData, - client: Arc, - runtime: Arc, - subxt_client: SubxtClient, -} - -impl Substrate -where - B: Block, - C: HeaderBackend + BlockchainEvents + 'static, - R: ProvideRuntimeApi + Send + Sync + 'static, - R::Api: NetworksApi + MembersApi + ShardsApi + TasksApi + SubmitTransactionApi, -{ - fn best_block(&self) -> B::Hash { - self.client.info().best_hash - } - - pub fn new(client: Arc, runtime: Arc, subxt_client: SubxtClient) -> Self { - Self { - _block: PhantomData, - client, - runtime, - subxt_client, - } - } - - fn runtime_api(&self) -> ApiRef<'_, R::Api> { - self.runtime.runtime_api() - } -} - -impl Clone for Substrate { - fn clone(&self) -> Self { - Self { - _block: self._block, - client: self.client.clone(), - runtime: self.runtime.clone(), - subxt_client: self.subxt_client.clone(), - } - } -} - -#[async_trait] -impl Runtime for Substrate -where - B: Block, - C: HeaderBackend + BlockchainEvents + 'static, - R: ProvideRuntimeApi + Send + Sync + 'static, - R::Api: NetworksApi + MembersApi + ShardsApi + TasksApi + SubmitTransactionApi, -{ - fn public_key(&self) -> &PublicKey { - self.subxt_client.public_key() - } - - fn account_id(&self) -> &AccountId { - self.subxt_client.account_id() - } - - fn finality_notification_stream( - &self, - ) -> Pin + Send + 'static>> { - let stream = self.client.finality_notification_stream(); - stream - .map(|notification| { - let block_hash = notification.header.hash(); - let block_number = notification.header.number().to_string().parse().unwrap(); - (block_hash, block_number) - }) - .boxed() - } - - async fn get_shards(&self, block: BlockHash, account: &AccountId) -> Result> { - Ok(self.runtime_api().get_shards(block, account)?) - } - - async fn get_shard_members( - &self, - block: BlockHash, - shard_id: ShardId, - ) -> Result> { - Ok(self.runtime_api().get_shard_members(block, shard_id)?) - } - - async fn get_shard_threshold(&self, block: BlockHash, shard_id: ShardId) -> Result { - Ok(self.runtime_api().get_shard_threshold(block, shard_id)?) - } - - async fn get_shard_status(&self, block: BlockHash, shard_id: ShardId) -> Result { - Ok(self.runtime_api().get_shard_status(block, shard_id)?) - } - - async fn get_shard_commitment( - &self, - block: BlockHash, - shard_id: ShardId, - ) -> Result { - Ok(self.runtime_api().get_shard_commitment(block, shard_id)?) - } - - async fn submit_commitment( - &self, - shard_id: ShardId, - commitment: Commitment, - - proof_of_knowledge: ProofOfKnowledge, - ) -> Result<()> { - self.subxt_client - .submit_commitment(shard_id, commitment, proof_of_knowledge) - .await - } - - async fn submit_online(&self, shard_id: ShardId) -> Result<()> { - self.subxt_client.submit_online(shard_id).await - } - - async fn get_shard_tasks( - &self, - block: BlockHash, - shard_id: ShardId, - ) -> Result> { - Ok(self.runtime_api().get_shard_tasks(block, shard_id)?) - } - - async fn get_task(&self, block: BlockHash, task_id: TaskId) -> Result> { - Ok(self.runtime_api().get_task(block, task_id)?) - } - - async fn get_task_signature(&self, task_id: TaskId) -> Result> { - Ok(self.runtime_api().get_task_signature(self.best_block(), task_id)?) - } - - async fn get_gateway(&self, network: NetworkId) -> Result>> { - Ok(self.runtime_api().get_gateway(self.best_block(), network)?) - } - - async fn submit_task_hash(&self, task_id: TaskId, hash: Vec) -> Result<()> { - self.subxt_client.submit_task_hash(task_id, hash).await - } - - async fn submit_task_result(&self, task_id: TaskId, result: TaskResult) -> Result<()> { - self.subxt_client.submit_task_result(task_id, result).await - } - - async fn submit_task_error(&self, task_id: TaskId, error: TaskError) -> Result<()> { - self.subxt_client.submit_task_error(task_id, error).await - } - - async fn submit_task_signature(&self, task_id: TaskId, signature: TssSignature) -> Result<()> { - self.subxt_client.submit_task_signature(task_id, signature, hash).await - } - - async fn get_member_peer_id( - &self, - block: BlockHash, - account: &AccountId, - ) -> Result> { - Ok(self.runtime_api().get_member_peer_id(block, account)?) - } - - async fn get_heartbeat_timeout(&self) -> Result { - Ok(self.runtime_api().get_heartbeat_timeout(self.best_block())?) - } - - async fn get_min_stake(&self) -> Result { - Ok(self.runtime_api().get_min_stake(self.best_block())?) - } - - async fn submit_register_member( - &self, - network: NetworkId, - peer_id: PeerId, - stake_amount: u128, - ) -> Result<()> { - self.subxt_client.submit_register_member(network, peer_id, stake_amount).await - } - - async fn submit_heartbeat(&self) -> Result<()> { - self.subxt_client.submit_heartbeat().await - } - - async fn get_network(&self, network_id: NetworkId) -> Result> { - Ok(self.runtime_api().get_network(self.best_block(), network_id)?) - } -} - -pub struct SubstrateTxSubmitter { - _marker: PhantomData, - client: Arc, - pool: OffchainTransactionPoolFactory, - runtime: Arc, - tx_client: OnlineClient, -} - -impl Clone for SubstrateTxSubmitter { - fn clone(&self) -> Self { - Self { - _marker: self._marker, - client: self.client.clone(), - pool: self.pool.clone(), - runtime: self.runtime.clone(), - tx_client: self.tx_client.clone(), - } - } -} - -impl SubstrateTxSubmitter -where - B: Block, - C: HeaderBackend + BlockchainEvents + 'static, - R: ProvideRuntimeApi + Send + Sync + 'static, - R::Api: SubmitTransactionApi, -{ - pub fn new( - pool: OffchainTransactionPoolFactory, - client: Arc, - runtime: Arc, - tx_client: OnlineClient, - ) -> Self { - Self { - _marker: PhantomData, - client, - pool, - runtime, - tx_client, - } - } - - fn best_block(&self) -> B::Hash { - self.client.info().best_hash - } - - fn runtime_api(&self) -> ApiRef<'_, R::Api> { - let mut runtime = self.runtime.runtime_api(); - runtime.register_extension(self.pool.offchain_transaction_pool(self.best_block())); - runtime - } -} - -#[async_trait] -impl TxSubmitter for SubstrateTxSubmitter -where - B: Block, - C: HeaderBackend + BlockchainEvents + 'static, - R: ProvideRuntimeApi + Send + Sync + 'static, - R::Api: SubmitTransactionApi, -{ - async fn submit(&self, tx: Vec) -> Result { - self.runtime_api() - .submit_transaction(self.best_block(), tx) - .map_err(|_| anyhow::anyhow!("Error submitting transaction to runtime"))? - .map_err(|_| anyhow::anyhow!("Error submitting transaction onchain"))?; - let dummy_hash = H256::repeat_byte(0x01); - let dummy_stream = stream::iter(vec![]); - let empty_progress = TxProgress::new( - StreamOfResults::new(Box::pin(dummy_stream)), - self.tx_client.clone(), - dummy_hash, - ); - Ok(empty_progress) - } -} diff --git a/node/src/cli.rs b/node/src/cli.rs index f80b61eed..5d1ff9a2b 100644 --- a/node/src/cli.rs +++ b/node/src/cli.rs @@ -24,43 +24,6 @@ pub struct Cli { #[allow(missing_docs)] #[clap(flatten)] pub storage_monitor: sc_storage_monitor::StorageMonitorParams, - - #[cfg(feature = "chronicle")] - #[allow(missing_docs)] - #[clap(flatten)] - pub chronicle: Option, -} - -#[cfg(feature = "chronicle")] -#[derive(Debug, clap::Parser)] -/// workaround for -#[group(requires_all = ["network_id", "target_url", "target_keyfile", "timechain_keyfile"], multiple = true)] -pub struct ChronicleArgs { - /// The network to be used from Analog Connector. - #[arg(required = false)] - #[clap(long)] - pub network_id: time_primitives::NetworkId, - /// The secret to use for p2p networking. - #[clap(long)] - pub network_keyfile: Option, - /// The port to bind to for p2p networking. - #[clap(long)] - pub bind_port: Option, - /// Enables iroh networking. - #[clap(long)] - pub enable_iroh: bool, - /// The address of Analog Connector. - #[arg(required = false)] - #[clap(long)] - pub target_url: String, - /// key file for connector wallet - #[arg(required = false)] - #[clap(long)] - pub target_keyfile: std::path::PathBuf, - /// keyfile having an account with funds for timechain. - #[arg(required = false)] - #[clap(long)] - pub timechain_keyfile: std::path::PathBuf, } /// Possible subcommands of the main binary. diff --git a/node/src/main.rs b/node/src/main.rs index 685a14d74..8e220e820 100644 --- a/node/src/main.rs +++ b/node/src/main.rs @@ -1,8 +1,6 @@ //! Substrate Node CLI mod chain_spec; -#[cfg(feature = "chronicle")] -mod chronicle; #[macro_use] mod service; mod benchmarking; diff --git a/node/src/service.rs b/node/src/service.rs index 81a1b10b1..2360073aa 100644 --- a/node/src/service.rs +++ b/node/src/service.rs @@ -215,6 +215,7 @@ pub fn new_partial( } /// Result of [`new_full_base`]. +#[allow(dead_code)] pub struct NewFullBase { /// The task manager of the node. pub task_manager: TaskManager, @@ -234,7 +235,6 @@ pub struct NewFullBase { pub fn new_full_base::Hash>>( config: Configuration, disable_hardware_benchmarks: bool, - #[cfg(feature = "chronicle")] chronicle_args: Option, with_startup_data: impl FnOnce( &sc_consensus_babe::BabeBlockImport, &sc_consensus_babe::BabeLink, @@ -290,12 +290,6 @@ pub fn new_full_base::Hash>>( ); net_config.add_notification_protocol(grandpa_protocol_config); - // registering time p2p protocol - #[cfg(feature = "chronicle")] - let (protocol_tx, protocol_rx) = async_channel::bounded(10); - #[cfg(feature = "chronicle")] - net_config.add_request_response_protocol(crate::chronicle::protocol_config(protocol_tx)); - let warp_sync = Arc::new(sc_consensus_grandpa::warp_proof::NetworkProvider::new( backend.clone(), import_setup.1.shared_authority_set().clone(), @@ -479,34 +473,6 @@ pub fn new_full_base::Hash>>( ); } - #[cfg(feature = "chronicle")] - { - if let Some(args) = chronicle_args { - let config = chronicle::ChronicleConfig { - network_id: args.network_id, - network_port: args.bind_port, - network_keyfile: args.network_keyfile, - timechain_url: "ws://127.0.0.1:9944".into(), - timechain_keyfile: args.timechain_keyfile, - target_url: args.target_url, - target_keyfile: args.target_keyfile, - }; - let network = if args.enable_iroh { None } else { Some((network, protocol_rx)) }; - let params = crate::chronicle::ChronicleParams { - client: client.clone(), - runtime: client.clone(), - tx_pool: OffchainTransactionPoolFactory::new(transaction_pool.clone()), - network, - config, - }; - task_manager - .spawn_essential_handle() - .spawn_blocking("chronicle", None, async move { - crate::chronicle::run_node_with_chronicle(params).await.unwrap() - }); - } - } - if enable_offchain_worker { task_manager.spawn_handle().spawn( "offchain-workers-runner", @@ -548,8 +514,6 @@ pub fn new_full(config: Configuration, cli: cli::Cli) -> Result>( config, cli.no_hardware_benchmarks, - #[cfg(feature = "chronicle")] - cli.chronicle, |_, _| (), ) .map(|NewFullBase { task_manager, .. }| task_manager)? @@ -558,8 +522,6 @@ pub fn new_full(config: Configuration, cli: cli::Cli) -> Result( config, cli.no_hardware_benchmarks, - #[cfg(feature = "chronicle")] - cli.chronicle, |_, _| (), ) .map(|NewFullBase { task_manager, .. }| task_manager)? diff --git a/pallets/elections/Cargo.toml b/pallets/elections/Cargo.toml index d41ef8b60..73436a4c3 100644 --- a/pallets/elections/Cargo.toml +++ b/pallets/elections/Cargo.toml @@ -57,7 +57,8 @@ std = [ "sp-runtime/std", "sp-std/std", "frame-benchmarking?/std", - "time-primitives/std" + "time-primitives/std", + "pallet-balances/std" ] runtime-benchmarks = [ #"polkadot-sdk/runtime-benchmarks", diff --git a/pallets/networks/Cargo.toml b/pallets/networks/Cargo.toml index b8f704a4f..dcd04c20d 100644 --- a/pallets/networks/Cargo.toml +++ b/pallets/networks/Cargo.toml @@ -38,7 +38,7 @@ sp-io = { git = "https://github.com/analog-labs/polkadot-sdk", tag = "v1.13.0-pa [features] default = [ "std" ] std = [ - "scale-codec/std", + "scale-codec/std", "scale-info/std", #"polkadot-sdk/std", "frame-support/std", @@ -46,11 +46,12 @@ std = [ "sp-runtime/std", "frame-benchmarking?/std", "time-primitives/std", + "pallet-balances/std", ] runtime-benchmarks = [ #"polkadot-sdk/runtime-benchmarks", "frame-benchmarking/runtime-benchmarks", - "pallet-balances/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", ] try-runtime = [ #"polkadot-sdk/try-runtime" diff --git a/pallets/networks/src/benchmarking.rs b/pallets/networks/src/benchmarking.rs index d27978607..7fd8a7603 100644 --- a/pallets/networks/src/benchmarking.rs +++ b/pallets/networks/src/benchmarking.rs @@ -19,7 +19,7 @@ benchmarks! { for _ in 0..b { network.push('b'); } - }: _(RawOrigin::Root, name.into(), network.into()) + }: _(RawOrigin::Root, name, network) verify {} impl_benchmark_test_suite!(Pallet, crate::mock::new_test_ext(), crate::mock::Test); diff --git a/pallets/shards/Cargo.toml b/pallets/shards/Cargo.toml index d7c905b60..43cc4e060 100644 --- a/pallets/shards/Cargo.toml +++ b/pallets/shards/Cargo.toml @@ -55,6 +55,7 @@ std = [ "frame-benchmarking?/std", "schnorr-evm/std", "time-primitives/std", + "pallet-balances/std" ] runtime-benchmarks = [ #"polkadot-sdk/runtime-benchmarks", diff --git a/pallets/shards/src/benchmarking.rs b/pallets/shards/src/benchmarking.rs index 8a753a404..99e3dee49 100644 --- a/pallets/shards/src/benchmarking.rs +++ b/pallets/shards/src/benchmarking.rs @@ -27,7 +27,7 @@ benchmarks! { Pallet::::commit( RawOrigin::Signed(member.clone()).into(), 0, - vec![public_key.clone()], + vec![public_key], [0; 65], )?; } @@ -45,7 +45,7 @@ benchmarks! { Pallet::::commit( RawOrigin::Signed(member.clone()).into(), 0, - vec![public_key.clone()], + vec![public_key], [0; 65], )?; } diff --git a/pallets/tasks/src/benchmarking.rs b/pallets/tasks/src/benchmarking.rs index a2348dcc5..5a567cacb 100644 --- a/pallets/tasks/src/benchmarking.rs +++ b/pallets/tasks/src/benchmarking.rs @@ -136,7 +136,7 @@ benchmarks! { let mut i = 0u8; while u16::from(i) < ::Elections::default_shard_size() { let member = [i; 32]; - let member_account: AccountId = member.clone().into(); + let member_account: AccountId = member.into(); pallet_balances::Pallet::::resolve_creating( &member_account, pallet_balances::Pallet::::issue(::MinStake::get() * 100), @@ -144,7 +144,7 @@ benchmarks! { pallet_members::Pallet::::register_member( RawOrigin::Signed(member_account).into(), ETHEREUM, - public_key(member.clone()), + public_key(member), member, ::MinStake::get(), )?; @@ -172,7 +172,7 @@ benchmarks! { pallet_members::Pallet::::register_member( RawOrigin::Signed(assigned_signer.clone()).into(), ETHEREUM, - public_key(raw_signer.clone()), + public_key(raw_signer), raw_signer, ::MinStake::get(), )?; @@ -191,7 +191,7 @@ benchmarks! { let mut i = 0u8; while u16::from(i) < ::Elections::default_shard_size() { let member = [i; 32]; - let member_account: AccountId = member.clone().into(); + let member_account: AccountId = member.into(); pallet_balances::Pallet::::resolve_creating( &member_account, pallet_balances::Pallet::::issue(::MinStake::get() * 100), @@ -199,7 +199,7 @@ benchmarks! { pallet_members::Pallet::::register_member( RawOrigin::Signed(member_account).into(), ETHEREUM, - public_key(member.clone()), + public_key(member), member, ::MinStake::get(), )?; @@ -213,7 +213,7 @@ benchmarks! { ShardState::::insert(0, ShardStatus::Online); Pallet::::shard_online(0, ETHEREUM); let raw_caller = [0u8; 32]; - let caller: AccountId = raw_caller.clone().into(); + let caller: AccountId = raw_caller.into(); Pallet::::create_task(RawOrigin::Signed(caller.clone()).into(), descriptor)?; Pallet::::register_gateway(RawOrigin::Root.into(), 0, [0u8; 20], 0)?; let (pub_key, signature) = mock_submit_sig(); @@ -276,7 +276,7 @@ benchmarks! { Pallet::::shard_online(j, ETHEREUM); Pallet::::register_gateway(RawOrigin::Root.into(), j, [0u8; 20], 20)?; } - }: _(RawOrigin::Root, b.into()) verify {} + }: _(RawOrigin::Root, b) verify {} set_batch_size { }: _(RawOrigin::Root, ETHEREUM, 100, 25) verify {} diff --git a/pallets/tasks/src/lib.rs b/pallets/tasks/src/lib.rs index a56dca80b..f5337591d 100644 --- a/pallets/tasks/src/lib.rs +++ b/pallets/tasks/src/lib.rs @@ -525,14 +525,13 @@ pub mod pallet { Self::start_phase(shard_id, task_id, TaskPhase::Read); }, Err(err) => { - Self::finish_task( - task_id, - TaskResult { - shard_id, - payload: Payload::Error(err), - signature: [0; 64], - }, - ); + let result = TaskResult { + shard_id, + payload: Payload::Error(err), + signature: [0; 64], + }; + Self::finish_task(task_id, result.clone()); + Self::deposit_event(Event::TaskResult(task_id, result)); }, } Ok(()) diff --git a/pallets/timegraph/Cargo.toml b/pallets/timegraph/Cargo.toml index 0ae51c6d3..c06814f14 100644 --- a/pallets/timegraph/Cargo.toml +++ b/pallets/timegraph/Cargo.toml @@ -40,6 +40,7 @@ std = [ "frame-support/std", "frame-system/std", "frame-benchmarking?/std", + "pallet-balances/std", ] runtime-benchmarks = [ #"polkadot-sdk/runtime-benchmarks", diff --git a/pallets/timegraph/src/benchmarking.rs b/pallets/timegraph/src/benchmarking.rs index 97e2129ee..1fc3ca6a7 100644 --- a/pallets/timegraph/src/benchmarking.rs +++ b/pallets/timegraph/src/benchmarking.rs @@ -1,4 +1,5 @@ #![cfg(feature = "runtime-benchmarks")] +#![allow(clippy::duplicated_attributes)] use super::*; #[allow(unused)] diff --git a/pallets/timegraph/src/lib.rs b/pallets/timegraph/src/lib.rs index e16b7a98a..1123e5996 100644 --- a/pallets/timegraph/src/lib.rs +++ b/pallets/timegraph/src/lib.rs @@ -8,6 +8,7 @@ pub use pallet::*; #[cfg(feature = "runtime-benchmarks")] mod benchmarking; + #[cfg(test)] mod mock; #[cfg(test)] diff --git a/primitives/src/shard.rs b/primitives/src/shard.rs index 561be7943..256bbb443 100644 --- a/primitives/src/shard.rs +++ b/primitives/src/shard.rs @@ -5,12 +5,11 @@ use futures::channel::oneshot; #[cfg(feature = "std")] use serde::{Deserialize, Serialize}; -use crate::{TaskId, TaskPhase}; +use crate::TaskExecution; use scale_codec::{Decode, Encode}; use scale_info::prelude::string::String; use scale_info::TypeInfo; use sp_std::vec::Vec; -use valuable::Valuable; pub type TssPublicKey = [u8; 33]; pub type TssSignature = [u8; 64]; @@ -20,26 +19,6 @@ pub type ShardId = u64; pub type ProofOfKnowledge = [u8; 65]; pub type Commitment = Vec; -#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] -#[derive(Debug, Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash, Valuable)] -pub struct TssId { - task_id: TaskId, - task_phase: TaskPhase, -} - -impl TssId { - pub fn new(task_id: TaskId, task_phase: TaskPhase) -> Self { - Self { task_id, task_phase } - } -} - -#[cfg(feature = "std")] -impl std::fmt::Display for TssId { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{}-{}", self.task_id, self.task_phase) - } -} - #[derive(Debug, Clone, Eq, PartialEq, Encode, Decode, TypeInfo)] pub enum MemberStatus { Added, @@ -87,7 +66,7 @@ impl Default for ShardStatus { #[cfg(feature = "std")] pub struct TssSigningRequest { - pub request_id: TssId, + pub request_id: TaskExecution, pub shard_id: ShardId, pub block_number: BlockNumber, pub data: Vec, diff --git a/primitives/src/task.rs b/primitives/src/task.rs index 8af252628..91401795b 100644 --- a/primitives/src/task.rs +++ b/primitives/src/task.rs @@ -176,8 +176,8 @@ impl Default for TaskPhase { } } -#[cfg_attr(feature = "std", derive(Serialize))] -#[derive(Debug, Clone, Encode, Decode, TypeInfo, PartialEq, Eq, PartialOrd, Ord)] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize))] +#[derive(Clone, Copy, Debug, Encode, Decode, TypeInfo, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct TaskExecution { pub task_id: TaskId, pub phase: TaskPhase, @@ -192,7 +192,7 @@ impl TaskExecution { #[cfg(feature = "std")] impl std::fmt::Display for TaskExecution { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{}", self.task_id) + write!(f, "{}-{}", self.task_id, self.phase) } } diff --git a/runtime/src/lib.rs b/runtime/src/lib.rs index 3588bab75..a66c73d14 100644 --- a/runtime/src/lib.rs +++ b/runtime/src/lib.rs @@ -395,8 +395,8 @@ impl pallet_offences::Config for Runtime { parameter_types! { pub NposSolutionPriority: TransactionPriority = - Perbill::from_percent(90) * TransactionPriority::max_value(); - pub const ImOnlineUnsignedPriority: TransactionPriority = TransactionPriority::max_value(); + Perbill::from_percent(90) * TransactionPriority::MAX; + pub const ImOnlineUnsignedPriority: TransactionPriority = TransactionPriority::MAX; } impl pallet_im_online::Config for Runtime { @@ -882,7 +882,7 @@ parameter_types! { pub const ProposalBondMinimum: Balance = ANLOG; pub const SpendPeriod: BlockNumber = prod_or_dev!(DAYS, HOURS); pub const Burn: Permill = Permill::from_percent(50); - pub const MaxBalance: Balance = Balance::max_value(); + pub const MaxBalance: Balance = Balance::MAX; pub const PayoutPeriod: BlockNumber = prod_or_dev!(14 * DAYS, 6 * HOURS); pub TreasuryAccount: AccountId = Treasury::account_id(); } diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 177b28046..e595f65c5 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,4 +1,4 @@ [toolchain] -channel = "1.78.0" +channel = "1.79.0" components = [ "rust-src", "rust-analyzer" ] targets = [ "wasm32-unknown-unknown" ] diff --git a/tc-subxt/Cargo.toml b/tc-subxt/Cargo.toml index 6c069a913..bf80e24bb 100644 --- a/tc-subxt/Cargo.toml +++ b/tc-subxt/Cargo.toml @@ -26,7 +26,3 @@ subxt = { version = "0.37.0", features = ["unstable-reconnecting-rpc-client"] } subxt-signer = { version = "0.37.0", features = ["subxt"]} time-primitives = { path = "../primitives" } - -[features] -testnet = [] -development = [] diff --git a/tc-subxt/src/lib.rs b/tc-subxt/src/lib.rs index ca0e15972..320dbd6ae 100644 --- a/tc-subxt/src/lib.rs +++ b/tc-subxt/src/lib.rs @@ -1,3 +1,4 @@ +#![allow(clippy::missing_transmute_annotations)] use anyhow::{Context, Result}; use async_trait::async_trait; use futures::channel::mpsc; diff --git a/tester/Cargo.toml b/tester/Cargo.toml index f91ed0ca1..06671bbb1 100644 --- a/tester/Cargo.toml +++ b/tester/Cargo.toml @@ -36,5 +36,3 @@ time-primitives = { path = "../primitives" } [features] default = [] -testnet = [ "tc-subxt/testnet" ] -development = [ "tc-subxt/development" ] diff --git a/tester/src/lib.rs b/tester/src/lib.rs index 353aa0fd1..a0e9ede13 100644 --- a/tester/src/lib.rs +++ b/tester/src/lib.rs @@ -7,7 +7,7 @@ use rosetta_config_ethereum::{ }; use schnorr_evm::SigningKey; use sp_core::crypto::Ss58Codec; -use std::collections::HashMap; +use std::collections::{BTreeSet, HashMap}; use std::future::Future; use std::path::{Path, PathBuf}; use std::process::Command; @@ -683,6 +683,7 @@ pub struct GmpBenchState { gmp_execution_duration: Duration, pub tasks: HashMap, recv_tasks: HashMap, + errored_tasks: HashMap, total_src_gas: Vec, } @@ -695,6 +696,7 @@ impl GmpBenchState { total_deposit: Default::default(), tasks: HashMap::with_capacity(total_calls as usize), recv_tasks: Default::default(), + errored_tasks: Default::default(), total_src_gas: Vec::with_capacity(total_calls as usize), } } @@ -742,7 +744,7 @@ impl GmpBenchState { } } - pub fn task_ids(&self) -> Vec { + pub fn task_ids(&self) -> BTreeSet { self.tasks.keys().cloned().collect() } @@ -750,6 +752,23 @@ impl GmpBenchState { self.recv_tasks.keys().cloned().collect() } + pub fn add_errored_tasks(&mut self, task_id: TaskId, reason: String) { + self.errored_tasks.insert(task_id, reason); + } + + fn get_success_tasks(&self) -> HashMap { + self.tasks + .iter() + .filter_map(|(task_id, task_info)| { + if !self.errored_tasks.contains_key(task_id) { + Some((*task_id, task_info.clone())) + } else { + None + } + }) + .collect::<_>() + } + pub fn finish_task(&mut self, task_id: TaskId) { let phase = self.tasks.get_mut(&task_id); if let Some(phase) = phase { @@ -761,8 +780,9 @@ impl GmpBenchState { } } - pub async fn sync_phase(&mut self, src_tester: &Tester) { - let unassigned_tasks = src_tester.get_network_unassigned_tasks(3).await; + pub async fn sync_phase(&mut self, dest_tester: &Tester) { + let unassigned_tasks = + dest_tester.get_network_unassigned_tasks(dest_tester.network_id()).await; // update recv_tasks status for (task_id, phase) in self.recv_tasks.iter_mut() { // if task is unassigned then skip it @@ -782,7 +802,7 @@ impl GmpBenchState { continue; } - let task_state = src_tester.get_task_phase(*task_id).await; + let task_state = dest_tester.get_task_phase(*task_id).await; if let Some(task_state) = task_state { phase.shift_phase(task_state) @@ -854,21 +874,15 @@ impl GmpBenchState { } fn print_recv_message_latencies(&self) { - let creation_latencies: Vec = self - .recv_tasks - .values() + let recv_tasks = self.recv_tasks.values(); + let creation_latencies: Vec = recv_tasks + .clone() .map(|info| info.start_time.unwrap().duration_since(self.gmp_start_time)) .collect(); - let start_latencies: Vec = self - .recv_tasks - .values() - .map(|info| info.get_start_duration().unwrap()) - .collect(); - let finish_latencies: Vec = self - .recv_tasks - .values() - .map(|info| info.get_execution_time().unwrap()) - .collect(); + let start_latencies: Vec = + recv_tasks.clone().map(|info| info.get_start_duration().unwrap()).collect(); + let finish_latencies: Vec = + recv_tasks.clone().map(|info| info.get_execution_time().unwrap()).collect(); let average_creation_latency = sum_duration(creation_latencies.clone()) / creation_latencies.len() as u32; @@ -894,6 +908,13 @@ impl GmpBenchState { /// /// print average delays in send message tasks during benchmark fn print_send_message_analysis(&self) { + if !self.errored_tasks.is_empty() { + println!("Following tasks failed:"); + println!("{:#?}", self.errored_tasks); + } + + let tasks = self.get_success_tasks(); + let mut builder = Builder::new(); builder.push_record([ "task_id", @@ -913,8 +934,7 @@ impl GmpBenchState { let total_spent_time = self.gmp_execution_duration; // calculate task duration for each task - let all_task_phase_duration: Vec<_> = self - .tasks + let all_task_phase_duration: Vec<_> = tasks .iter() .map(|(k, v)| { ( @@ -961,26 +981,25 @@ impl GmpBenchState { /// /// print average delays find in task execution fn print_send_message_task_latencies(&self) { - let unassigned_latencies: Vec = - self.tasks.values().map(|info| info.unassigned_time().unwrap()).collect(); + let tasks = self.get_success_tasks(); - let sign_latencies: Vec = - self.tasks.values().map(|info| info.sign_to_write_duration().unwrap()).collect(); - - let write_latencies: Vec = - self.tasks.values().map(|info| info.write_to_read_duration().unwrap()).collect(); - - let read_latencies: Vec = self - .tasks - .values() - .map(|info| info.read_to_finish_duration().unwrap()) - .collect(); + if tasks.is_empty() { + return; + } - let total_latencies: Vec = self - .tasks - .values() - .map(|info| info.total_execution_duration().unwrap()) - .collect(); + let mut unassigned_latencies = Vec::with_capacity(tasks.len()); + let mut sign_latencies = Vec::with_capacity(tasks.len()); + let mut write_latencies = Vec::with_capacity(tasks.len()); + let mut read_latencies = Vec::with_capacity(tasks.len()); + let mut total_latencies = Vec::with_capacity(tasks.len()); + + for (_, info) in tasks.iter() { + unassigned_latencies.push(info.unassigned_time().unwrap()); + sign_latencies.push(info.sign_to_write_duration().unwrap()); + write_latencies.push(info.write_to_read_duration().unwrap()); + read_latencies.push(info.read_to_finish_duration().unwrap()); + total_latencies.push(info.total_execution_duration().unwrap()); + } let average_unassigned_latency = sum_duration(unassigned_latencies.clone()) / unassigned_latencies.len() as u32; @@ -1014,8 +1033,12 @@ impl GmpBenchState { self.tasks.iter().all(|(_, phase)| phase.finish_time.is_some()) } - pub fn get_finished_tasks(&self) -> usize { - self.tasks.iter().filter(|(_, phase)| phase.finish_time.is_some()).count() + pub fn get_finished_tasks(&self) -> BTreeSet { + self.tasks + .iter() + .filter(|(_, phase)| phase.finish_time.is_some()) + .map(|(id, _)| *id) + .collect::>() } } @@ -1075,7 +1098,7 @@ impl RecvTaskPhase { } } -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct TaskPhaseInfo { pub insert_time: Instant, pub sign_phase_start: Option, diff --git a/tester/src/main.rs b/tester/src/main.rs index 527fe3652..cf0b88df2 100644 --- a/tester/src/main.rs +++ b/tester/src/main.rs @@ -366,15 +366,22 @@ async fn gmp_benchmark( let task_id = task_result.0; let task_payload = task_result.1; + match task_payload.payload { + Payload::Gmp(msgs) => { + bench_state.update_recv_gmp_task(task_id, msgs.len() as u64); + }, + Payload::Error(error) => { + bench_state.add_errored_tasks(task_id, error); + }, + _ => {} + } + if bench_state.task_ids().contains(&task_id) || bench_state.recv_task_ids().contains(&task_id) { bench_state.finish_task(task_id); } - if let Payload::Gmp(msgs) = task_payload.payload { - bench_state.update_recv_gmp_task(task_id, msgs.len() as u64); - }; } // update task phase - bench_state.sync_phase(src_tester).await; + bench_state.sync_phase(dest_tester).await; } } _ = one_min_tick.tick() => { @@ -413,12 +420,15 @@ async fn gmp_benchmark( cpu_usage.push(average_cpu_usage); // verify if the number of tasks finished matches the number of calls or greater and all tasks are finished - if bench_state.get_finished_tasks() >= number_of_calls as usize - && bench_state.all_tasks_completed() - && is_contract_updated { + if bench_state.get_finished_tasks().len() >= number_of_calls as usize + && bench_state.all_tasks_completed() { + println!("all tasks Completed"); + if !is_contract_updated { + println!("Contract was not able to update completely"); + } break; } else { - println!("task_ids: {:?}, completed: {:?}", bench_state.task_ids(), bench_state.get_finished_tasks()); + println!("task_ids: {:?}:{:?}, completed: {:?}:{:?}", bench_state.task_ids().len(), bench_state.task_ids(), bench_state.get_finished_tasks().len(), bench_state.get_finished_tasks()); } } _ = tokio::signal::ctrl_c() => { diff --git a/tss/src/lib.rs b/tss/src/lib.rs index 4321fc457..faf4118c6 100644 --- a/tss/src/lib.rs +++ b/tss/src/lib.rs @@ -191,7 +191,7 @@ where if let Some(session) = signing_sessions.get_mut(&id) { session.on_message(frost_id, msg); } else { - anyhow::bail!("invalid signing session"); + tracing::info!("invalid signing session {}", id); } }, (_, msg) => { @@ -212,43 +212,39 @@ where } } - pub fn on_start(&mut self, id: I) { - tracing::info!(peer_id = self.peer_id, "start {}", id); + fn get_or_insert_session(&mut self, id: I) -> Option<&mut Roast> { match &mut self.state { TssState::Roast { key_package, public_key_package, signing_sessions, .. - } => { - let roast = Roast::new( + } => Some(signing_sessions.entry(id).or_insert_with(|| { + Roast::new( self.frost_id, self.threshold, key_package.clone(), public_key_package.clone(), self.coordinators.clone(), - ); - signing_sessions.insert(id, roast); - }, - _ => { - tracing::error!("not ready to sign"); - }, + ) + })), + _ => None, + } + } + + pub fn on_start(&mut self, id: I) { + tracing::info!(peer_id = self.peer_id, "start {}", id); + if self.get_or_insert_session(id).is_none() { + tracing::error!("not ready to sign"); } } pub fn on_sign(&mut self, id: I, data: Vec) { tracing::info!(peer_id = self.peer_id, "sign {}", id); - match &mut self.state { - TssState::Roast { signing_sessions, .. } => { - if let Some(session) = signing_sessions.get_mut(&id) { - session.set_data(data); - } else { - tracing::info!("signing session already complete"); - } - }, - _ => { - tracing::error!("not ready to sign"); - }, + if let Some(session) = self.get_or_insert_session(id) { + session.set_data(data) + } else { + tracing::error!("not ready to sign"); } }