diff --git a/.github/actions/dockerfiles/Dockerfile.alpine-binary b/.github/actions/dockerfiles/Dockerfile.alpine-binary index d185121e73..133ed6ada3 100644 --- a/.github/actions/dockerfiles/Dockerfile.alpine-binary +++ b/.github/actions/dockerfiles/Dockerfile.alpine-binary @@ -10,10 +10,9 @@ ARG TARGETVARIANT ARG REPO RUN case ${TARGETPLATFORM} in \ - linux/amd64/v2) BIN_ARCH=linux-glibc-x64-v2 ;; \ - linux/amd64*) BIN_ARCH=linux-glibc-x64 ;; \ - linux/arm64*) BIN_ARCH=linux-glibc-arm64 ;; \ - linux/arm/v7) BIN_ARCH=linux-glibc-armv7 ;; \ + linux/amd64*) BIN_ARCH=linux-musl-x64 ;; \ + linux/arm64*) BIN_ARCH=linux-musl-arm64 ;; \ + linux/arm/v7) BIN_ARCH=linux-musl-armv7 ;; \ *) exit 1 ;; \ esac \ && echo "TARGETPLATFORM: $TARGETPLATFORM" \ diff --git a/.github/actions/dockerfiles/Dockerfile.debian-binary b/.github/actions/dockerfiles/Dockerfile.debian-binary index 757379095c..0dec991d3f 100644 --- a/.github/actions/dockerfiles/Dockerfile.debian-binary +++ b/.github/actions/dockerfiles/Dockerfile.debian-binary @@ -10,7 +10,6 @@ ARG TARGETVARIANT ARG REPO RUN case ${TARGETPLATFORM} in \ - linux/amd64/v2) BIN_ARCH=linux-glibc-x64-v2 ;; \ linux/amd64*) BIN_ARCH=linux-glibc-x64 ;; \ linux/arm64*) BIN_ARCH=linux-glibc-arm64 ;; \ linux/arm/v7) BIN_ARCH=linux-glibc-armv7 ;; \ diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index f97a7d6d9b..e32148c06f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -52,19 +52,32 @@ jobs: ###################################################################################### ## Check if the branch that this workflow is being run against is a release branch + ## + ## Outputs: + ## - node_tag: Tag of the stacks-node if the branch is a release one (example: release/3.4.0.0.1), null otherwise + ## - node_docker_tag: Version of the stacks-node if the branch is a release one (example: 3.4.0.0.1), null otherwise + ## - signer_tag: Tag of the stacks-signer if the branch is a release one (example: release/3.4.0.0.1.0), null otherwise + ## - signer_docker_tag: Version of the stacks-signer if the branch is a release one (example: 3.4.0.0.1.0), null otherwise + ## - is_node_release: True if the branch represents a 'stacks-node' release, false otherwise. + ## If this is true, 'is_signer_release' will also be true, since a 'stacks-signer' binary + ## is always released alongside 'stacks-node'. + ## - is_signer_release: True if the branch represents a 'stacks-signer' release, false otherwise. check-release: name: Check Release needs: - rustfmt runs-on: ubuntu-latest outputs: - tag: ${{ steps.check_release.outputs.tag }} - docker_tag: ${{ steps.check_release.outputs.docker_tag }} - is_release: ${{ steps.check_release.outputs.is_release }} + node_tag: ${{ steps.check_release.outputs.node_tag }} + node_docker_tag: ${{ steps.check_release.outputs.node_docker_tag }} + signer_tag: ${{ steps.check_release.outputs.signer_tag }} + signer_docker_tag: ${{ steps.check_release.outputs.signer_docker_tag }} + is_node_release: ${{ steps.check_release.outputs.is_node_release }} + is_signer_release: ${{ steps.check_release.outputs.is_signer_release }} steps: - name: Check Release id: check_release - uses: stacks-network/actions/stacks-core/check-release@main + uses: stacks-network/actions/stacks-core/release/check-release@main with: tag: ${{ github.ref_name }} @@ -72,27 +85,33 @@ jobs: ## Create a tagged github release ## ## Runs when: - ## - it is a release run + ## - it is either a node release or a signer release create-release: if: | - needs.check-release.outputs.is_release == 'true' - name: Create Release + needs.check-release.outputs.is_node_release == 'true' || + needs.check-release.outputs.is_signer_release == 'true' + name: Create Release(s) needs: - rustfmt - check-release uses: ./.github/workflows/github-release.yml with: - tag: ${{ needs.check-release.outputs.tag }} - docker_tag: ${{ needs.check-release.outputs.docker_tag }} + node_tag: ${{ needs.check-release.outputs.node_tag }} + node_docker_tag: ${{ needs.check-release.outputs.node_docker_tag }} + signer_tag: ${{ needs.check-release.outputs.signer_tag }} + signer_docker_tag: ${{ needs.check-release.outputs.signer_docker_tag }} + is_node_release: ${{ needs.check-release.outputs.is_node_release }} + is_signer_release: ${{ needs.check-release.outputs.is_signer_release }} secrets: inherit ## Build and push Debian image built from source ## ## Runs when: - ## - it is not a release run + ## - it is not a node or signer-only release run docker-image: if: | - needs.check-release.outputs.is_release != 'true' + needs.check-release.outputs.is_node_release != 'true' || + needs.check-release.outputs.is_signer_release != 'true' name: Docker Image (Source) uses: ./.github/workflows/image-build-source.yml needs: @@ -103,16 +122,14 @@ jobs: ## Create a reusable cache for tests ## ## Runs when: - ## - it is a release run - ## or: - ## - it is not a release run - ## and any of: - ## - this workflow is called manually - ## - PR is opened - ## - commit to either (development, master) branch + ## - it is a node release run + ## or any of: + ## - this workflow is called manually + ## - PR is opened + ## - PR added to merge queue create-cache: if: | - needs.check-release.outputs.is_release == 'true' || + needs.check-release.outputs.is_node_release == 'true' || github.event_name == 'workflow_dispatch' || github.event_name == 'pull_request' || github.event_name == 'merge_group' @@ -125,16 +142,15 @@ jobs: ## Tests to run regularly ## ## Runs when: - ## - it is a release run - ## or: - ## - it is not a release run - ## and any of: - ## - this workflow is called manually - ## - PR is opened - ## - PR added to merge queue - ## - commit to either (development, next, master) branch + ## - it is a node or signer-only release run + ## or any of: + ## - this workflow is called manually + ## - PR is opened + ## - PR added to merge queue stacks-core-tests: if: | + needs.check-release.outputs.is_node_release == 'true' || + needs.check-release.outputs.is_signer_release == 'true' || github.event_name == 'workflow_dispatch' || github.event_name == 'pull_request' || github.event_name == 'merge_group' @@ -148,16 +164,15 @@ jobs: ## Checks to run on built binaries ## ## Runs when: - ## - it is a release run - ## or: - ## - it is not a release run - ## and any of: - ## - this workflow is called manually - ## - PR is opened - ## - PR added to merge queue - ## - commit to either (development, next, master) branch + ## - it is a node or signer-only release run + ## or any of: + ## - this workflow is called manually + ## - PR is opened + ## - PR added to merge queue stacks-core-build-tests: if: | + needs.check-release.outputs.is_node_release == 'true' || + needs.check-release.outputs.is_signer_release == 'true' || github.event_name == 'workflow_dispatch' || github.event_name == 'pull_request' || github.event_name == 'merge_group' @@ -167,8 +182,17 @@ jobs: - check-release uses: ./.github/workflows/core-build-tests.yml + ## Checks to run on built binaries + ## + ## Runs when: + ## - it is a node release run + ## or any of: + ## - this workflow is called manually + ## - PR is opened + ## - PR added to merge queue bitcoin-tests: if: | + needs.check-release.outputs.is_node_release == 'true' || github.event_name == 'workflow_dispatch' || github.event_name == 'pull_request' || github.event_name == 'merge_group' @@ -181,6 +205,7 @@ jobs: p2p-tests: if: | + needs.check-release.outputs.is_node_release == 'true' || github.event_name == 'workflow_dispatch' || github.event_name == 'pull_request' || github.event_name == 'merge_group' @@ -194,9 +219,9 @@ jobs: ## Test to run on a tagged release ## ## Runs when: - ## - it is a release run + ## - it is a node release run atlas-tests: - if: needs.check-release.outputs.is_release == 'true' + if: needs.check-release.outputs.is_node_release == 'true' name: Atlas Tests needs: - rustfmt @@ -205,7 +230,7 @@ jobs: uses: ./.github/workflows/atlas-tests.yml epoch-tests: - if: needs.check-release.outputs.is_release == 'true' + if: needs.check-release.outputs.is_node_release == 'true' name: Epoch Tests needs: - rustfmt @@ -214,7 +239,7 @@ jobs: uses: ./.github/workflows/epoch-tests.yml slow-tests: - if: needs.check-release.outputs.is_release == 'true' + if: needs.check-release.outputs.is_node_release == 'true' name: Slow Tests needs: - rustfmt diff --git a/.github/workflows/create-source-binary.yml b/.github/workflows/create-source-binary.yml deleted file mode 100644 index 385b30af7d..0000000000 --- a/.github/workflows/create-source-binary.yml +++ /dev/null @@ -1,60 +0,0 @@ -## Github workflow to create multiarch binaries from source - -name: Create Binaries - -on: - workflow_call: - inputs: - tag: - description: "Tag name of this release (x.y.z)" - required: true - type: string - -## change the display name to the tag being built -run-name: ${{ inputs.tag }} - -concurrency: - group: create-binary-${{ github.head_ref || github.ref || github.run_id}} - ## Only cancel in progress if this is for a PR - cancel-in-progress: ${{ github.event_name == 'pull_request' }} - -jobs: - ## Runs when the following is true: - ## - tag is provided - artifact: - if: | - inputs.tag != '' - name: Build Binaries - runs-on: ubuntu-latest - strategy: - ## Run a maximum of 10 builds concurrently, using the matrix defined in inputs.arch - max-parallel: 10 - matrix: - arch: - - linux-musl - - linux-glibc - - macos - - windows - cpu: - - arm64 - - armv7 - - x86-64 ## defaults to x86-64-v3 variant - intel haswell (2013) and newer - # - x86-64-v2 ## intel nehalem (2008) and newer - # - x86-64-v3 ## intel haswell (2013) and newer - # - x86-64-v4 ## intel skylake (2017) and newer - exclude: - - arch: windows # excludes windows-arm64 - cpu: arm64 - - arch: windows # excludes windows-armv7 - cpu: armv7 - - arch: macos # excludes macos-armv7 - cpu: armv7 - - steps: - - name: Build Binary (${{ matrix.arch }}_${{ matrix.cpu }}) - id: build_binary - uses: stacks-network/actions/stacks-core/create-source-binary@main - with: - arch: ${{ matrix.arch }} - cpu: ${{ matrix.cpu }} - tag: ${{ inputs.tag }} diff --git a/.github/workflows/github-release.yml b/.github/workflows/github-release.yml index 9d4e18c665..5028d35968 100644 --- a/.github/workflows/github-release.yml +++ b/.github/workflows/github-release.yml @@ -5,12 +5,28 @@ name: Github Release on: workflow_call: inputs: - tag: - description: "Release Tag" + node_tag: + description: "Node Release Tag" required: true type: string - docker_tag: - description: "Docker Release Tag" + node_docker_tag: + description: "Node Docker Release Tag" + required: true + type: string + signer_tag: + description: "Signer Release Tag" + required: true + type: string + signer_docker_tag: + description: "Signer Docker Release Tag" + required: true + type: string + is_node_release: + description: "True if it is a node release" + required: true + type: string + is_signer_release: + description: "True if it is a signer release" required: true type: string secrets: @@ -22,81 +38,107 @@ concurrency: ## Always cancel duplicate jobs cancel-in-progress: true -run-name: ${{ inputs.tag }} +run-name: ${{ inputs.node_tag || inputs.signer_tag }} jobs: ## Build arch dependent binaries from source ## ## Runs when the following is true: - ## - tag is provided + ## - either node or signer tag is provided build-binaries: if: | - inputs.tag != '' + inputs.node_tag != '' || + inputs.signer_tag != '' name: Build Binaries - uses: ./.github/workflows/create-source-binary.yml - with: - tag: ${{ inputs.tag }} - secrets: inherit + runs-on: ubuntu-latest + strategy: + ## Run a maximum of 10 builds concurrently, using the matrix defined in inputs.arch + max-parallel: 10 + matrix: + arch: + - linux-musl + - linux-glibc + - macos + - windows + cpu: + - arm64 + - armv7 + - x86-64 ## defaults to x86-64-v3 variant - intel haswell (2013) and newer + # - x86-64-v2 ## intel nehalem (2008) and newer + # - x86-64-v3 ## intel haswell (2013) and newer + # - x86-64-v4 ## intel skylake (2017) and newer + exclude: + - arch: windows # excludes windows-arm64 + cpu: arm64 + - arch: windows # excludes windows-armv7 + cpu: armv7 + - arch: macos # excludes macos-armv7 + cpu: armv7 + steps: + - name: Build Binary (${{ matrix.arch }}_${{ matrix.cpu }}) + uses: stacks-network/actions/stacks-core/release/create-source-binary@main + with: + arch: ${{ matrix.arch }} + cpu: ${{ matrix.cpu }} + node_tag: ${{ inputs.node_tag }} + signer_tag: ${{ inputs.signer_tag }} + signer_docker_tag: ${{ inputs.signer_docker_tag }} + is_node_release: ${{ inputs.is_node_release }} ## Runs when the following is true: - ## - tag is provided - ## - workflow is building default branch (master) + ## - either node or signer tag is provided create-release: if: | - inputs.tag != '' + inputs.node_tag != '' || + inputs.signer_tag != '' name: Create Release runs-on: ubuntu-latest needs: - build-binaries steps: - ## Downloads the artifacts built in `create-source-binary.yml` - - name: Download Artifacts - id: download_artifacts - uses: actions/download-artifact@65a9edc5881444af0b9093a5e628f2fe47ea3b2e # v4.1.7 + ## Creates releases + - name: Create Release + uses: stacks-network/actions/stacks-core/release/create-releases@main with: - pattern: ${{ inputs.tag }}-binary-build-* - path: release - merge-multiple: true - - ## Generate a checksums file to be added to the release page - - name: Generate Checksums - id: generate_checksum - uses: stacks-network/actions/generate-checksum@main - with: - artifact_download_pattern: "${{ inputs.tag }}-binary-build-*" - - ## Upload the release archives with the checksums file - - name: Upload Release - id: upload_release - uses: softprops/action-gh-release@69320dbe05506a9a39fc8ae11030b214ec2d1f87 #v2.0.5 - env: - GITHUB_TOKEN: ${{ secrets.GH_TOKEN }} - with: - name: Release ${{ inputs.tag || github.ref }} - tag_name: ${{ inputs.tag || github.ref }} - draft: false - prerelease: true - fail_on_unmatched_files: true - target_commitish: ${{ github.sha }} - generate_release_notes: true - files: | - release/*.zip - CHECKSUMS.txt + node_tag: ${{ inputs.node_tag }} + node_docker_tag: ${{ inputs.node_docker_tag }} + signer_tag: ${{ inputs.signer_tag }} + signer_docker_tag: ${{ inputs.signer_docker_tag }} + is_node_release: ${{ inputs.is_node_release }} + is_signer_release: ${{ inputs.is_signer_release }} + GH_TOKEN: ${{ secrets.GH_TOKEN }} ## Builds arch dependent Docker images from binaries ## ## Runs when the following is true: - ## - tag is provided - ## - workflow is building default branch (master) + ## - either node or signer tag is provided docker-image: if: | - inputs.tag != '' + inputs.node_tag != '' || + inputs.signer_tag != '' name: Docker Image (Binary) - uses: ./.github/workflows/image-build-binary.yml + runs-on: ubuntu-latest needs: - build-binaries - create-release - with: - tag: ${{ inputs.tag }} - docker_tag: ${{ inputs.docker_tag }} - secrets: inherit + strategy: + fail-fast: false + ## Build a maximum of 2 images concurrently based on matrix.dist + max-parallel: 2 + matrix: + dist: + - alpine + - debian + steps: + - name: Create Docker Image + uses: stacks-network/actions/stacks-core/release/docker-images@main + with: + node_tag: ${{ inputs.node_tag }} + node_docker_tag: ${{ inputs.node_docker_tag }} + signer_tag: ${{ inputs.signer_tag }} + signer_docker_tag: ${{ inputs.signer_docker_tag }} + is_node_release: ${{ inputs.is_node_release }} + is_signer_release: ${{ inputs.is_signer_release }} + DOCKERHUB_USERNAME: ${{ secrets.DOCKERHUB_USERNAME }} + DOCKERHUB_PASSWORD: ${{ secrets.DOCKERHUB_PASSWORD }} + dist: ${{ matrix.dist }} diff --git a/.github/workflows/image-build-binary.yml b/.github/workflows/image-build-binary.yml deleted file mode 100644 index 5966d7e68a..0000000000 --- a/.github/workflows/image-build-binary.yml +++ /dev/null @@ -1,145 +0,0 @@ -## Github workflow to build a multiarch docker image from pre-built binaries - -name: Docker Image (Binary) - -on: - workflow_call: - inputs: - tag: - required: true - type: string - description: "Version tag of release" - docker_tag: - required: true - type: string - description: "Version tag for docker images" - -## Define which docker arch to build for -env: - docker_platforms: "linux/arm64, linux/arm/v7, linux/amd64, linux/amd64/v3" - docker-org: blockstack - -concurrency: - group: docker-image-binary-${{ github.head_ref || github.ref || github.run_id }} - ## Always cancel duplicate jobs - cancel-in-progress: true - -run-name: ${{ inputs.tag }} - -jobs: - ## Runs when the following is true: - ## - tag is provided - ## - workflow is building default branch (master) - image: - if: | - inputs.tag != '' - name: Build Image - runs-on: ubuntu-latest - strategy: - fail-fast: false - ## Build a maximum of 2 images concurrently based on matrix.dist - max-parallel: 2 - matrix: - dist: - - alpine - - debian - steps: - ## Setup Docker for the builds - - name: Docker setup - id: docker_setup - uses: stacks-network/actions/docker@main - with: - username: ${{ secrets.DOCKERHUB_USERNAME }} - password: ${{ secrets.DOCKERHUB_PASSWORD }} - - ## if the repo owner is not `stacks-network`, default to a docker-org of the repo owner (i.e. github user id) - ## this allows forks to run the docker push workflows without having to hardcode a dockerhub org (but it does require docker hub user to match github username) - - name: Set Local env vars - id: set_env - if: | - github.repository_owner != 'stacks-network' - run: | - echo "docker-org=${{ github.repository_owner }}" >> "$GITHUB_ENV" - - - name: Check Signer Release - id: check_signer_release - run: | - case "${{ inputs.tag }}" in - signer-*) - echo "is-signer-release=true" >> $GITHUB_ENV - ;; - *) - echo "is-signer-release=false" >> $GITHUB_ENV - ;; - esac - - ## Set docker metatdata - ## - depending on the matrix.dist, different tags will be enabled - ## ex. debian will have this tag: `type=ref,event=tag,enable=${{ matrix.dist == 'debian' }}` - - name: Docker Metadata ( ${{matrix.dist}} ) - if: ${{ env.is-signer-release == 'true' }} - id: docker_metadata_signer - uses: docker/metadata-action@8e5442c4ef9f78752691e2d8f8d19755c6f78e81 #v5.5.1 - with: - images: | - ${{env.docker-org}}/stacks-signer - tags: | - type=raw,value=latest,enable=${{ inputs.docker_tag != '' && (github.ref == format('refs/heads/{0}', github.event.repository.default_branch) ) && matrix.dist == 'debian' }} - type=raw,value=${{ inputs.docker_tag }}-${{ matrix.dist }},enable=${{ inputs.docker_tag != '' && matrix.dist == 'debian'}} - type=raw,value=${{ inputs.docker_tag }},enable=${{ inputs.docker_tag != '' && matrix.dist == 'debian' }} - type=ref,event=tag,enable=${{ matrix.dist == 'debian' }} - type=raw,value=latest-${{ matrix.dist }},enable=${{ inputs.docker_tag != '' && (github.ref == format('refs/heads/{0}', github.event.repository.default_branch) ) && matrix.dist == 'alpine' }} - type=raw,value=${{ inputs.docker_tag }}-${{ matrix.dist }},enable=${{ inputs.docker_tag != '' && matrix.dist == 'alpine' }} - - - name: Docker Metadata ( ${{matrix.dist}} ) - if: ${{ env.is-signer-release == 'false' }} - id: docker_metadata_node - uses: docker/metadata-action@8e5442c4ef9f78752691e2d8f8d19755c6f78e81 #v5.5.1 - with: - ## tag images with current repo name `stacks-core` as well as legacy `stacks-blockchain` - images: | - ${{env.docker-org}}/${{ github.event.repository.name }} - ${{env.docker-org}}/stacks-blockchain - tags: | - type=raw,value=latest,enable=${{ inputs.docker_tag != '' && (github.ref == format('refs/heads/{0}', github.event.repository.default_branch) ) && matrix.dist == 'debian' }} - type=raw,value=${{ inputs.docker_tag }}-${{ matrix.dist }},enable=${{ inputs.docker_tag != '' && matrix.dist == 'debian'}} - type=raw,value=${{ inputs.docker_tag }},enable=${{ inputs.docker_tag != '' && matrix.dist == 'debian' }} - type=ref,event=tag,enable=${{ matrix.dist == 'debian' }} - type=raw,value=latest-${{ matrix.dist }},enable=${{ inputs.docker_tag != '' && (github.ref == format('refs/heads/{0}', github.event.repository.default_branch) ) && matrix.dist == 'alpine' }} - type=raw,value=${{ inputs.docker_tag }}-${{ matrix.dist }},enable=${{ inputs.docker_tag != '' && matrix.dist == 'alpine' }} - - ## Build docker image for signer release - - name: Build and Push ( ${{matrix.dist}} ) - if: ${{ env.is-signer-release == 'true' }} - id: docker_build_signer - uses: docker/build-push-action@2cdde995de11925a030ce8070c3d77a52ffcf1c0 # v5.3.0 - with: - file: ./.github/actions/dockerfiles/Dockerfile.${{ matrix.dist }}-binary - platforms: ${{ env.docker_platforms }} - tags: ${{ steps.docker_metadata_signer.outputs.tags }} - labels: ${{ steps.docker_metadata_signer.outputs.labels }} - build-args: | - TAG=${{ inputs.tag }} - REPO=${{ github.repository_owner }}/${{ github.event.repository.name }} - STACKS_NODE_VERSION=${{ inputs.tag || env.GITHUB_SHA_SHORT }} - GIT_BRANCH=${{ env.GITHUB_REF_SHORT }} - GIT_COMMIT=${{ env.GITHUB_SHA_SHORT }} - push: ${{ env.DOCKER_PUSH }} - - ## Build docker image for node release - - name: Build and Push ( ${{matrix.dist}} ) - if: ${{ env.is-signer-release == 'false' }} - id: docker_build_node - uses: docker/build-push-action@2cdde995de11925a030ce8070c3d77a52ffcf1c0 # v5.3.0 - with: - file: ./.github/actions/dockerfiles/Dockerfile.${{ matrix.dist }}-binary - platforms: ${{ env.docker_platforms }} - tags: ${{ steps.docker_metadata_node.outputs.tags }} - labels: ${{ steps.docker_metadata_node.outputs.labels }} - build-args: | - TAG=${{ inputs.tag }} - REPO=${{ github.repository_owner }}/${{ github.event.repository.name }} - STACKS_NODE_VERSION=${{ inputs.tag || env.GITHUB_SHA_SHORT }} - GIT_BRANCH=${{ env.GITHUB_REF_SHORT }} - GIT_COMMIT=${{ env.GITHUB_SHA_SHORT }} - push: ${{ env.DOCKER_PUSH }} diff --git a/CHANGELOG.md b/CHANGELOG.md index 0a972edbc1..aae051bfea 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,6 +13,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Changed +- Miner will stop waiting for signatures on a block if the Stacks tip advances (causing the block it had proposed to be invalid). - Logging improvements: - P2P logs now includes a reason for dropping a peer or neighbor - Improvements to how a PeerAddress is logged (human readable format vs hex) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index e1d5662732..487e926cf1 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -207,6 +207,10 @@ impl SignerTrait for Signer { "block_height" => b.header.chain_length, "signer_sighash" => %b.header.signer_signature_hash(), ); + #[cfg(any(test, feature = "testing"))] + if self.test_skip_block_broadcast(b) { + return; + } stacks_client.post_block_until_ok(self, b); } SignerMessage::MockProposal(mock_proposal) => { diff --git a/stackslib/src/burnchains/bitcoin/indexer.rs b/stackslib/src/burnchains/bitcoin/indexer.rs index 8f04d08a66..4550e903eb 100644 --- a/stackslib/src/burnchains/bitcoin/indexer.rs +++ b/stackslib/src/burnchains/bitcoin/indexer.rs @@ -234,10 +234,7 @@ impl BitcoinIndexer { true, false, ) - .expect(&format!( - "Failed to open {:?}", - working_dir_path.to_str().unwrap() - )); + .unwrap_or_else(|_| panic!("Failed to open {working_dir_path:?}")); BitcoinIndexer { config: BitcoinIndexerConfig::default_regtest( diff --git a/stackslib/src/burnchains/tests/mod.rs b/stackslib/src/burnchains/tests/mod.rs index 2b50656df6..b60413b9b0 100644 --- a/stackslib/src/burnchains/tests/mod.rs +++ b/stackslib/src/burnchains/tests/mod.rs @@ -439,10 +439,7 @@ impl TestBurnchainBlock { // prove on the last-ever sortition's hash to produce the new seed let proof = miner .make_proof(&leader_key.public_key, &last_snapshot.sortition_hash) - .expect(&format!( - "FATAL: no private key for {}", - leader_key.public_key.to_hex() - )); + .unwrap_or_else(|| panic!("FATAL: no private key for {:?}", leader_key.public_key)); VRFSeed::from_proof(&proof) }); @@ -655,10 +652,12 @@ impl TestBurnchainBlock { let parent_hdr = indexer .read_burnchain_header(self.block_height.saturating_sub(1)) .unwrap() - .expect(&format!( - "BUG: could not read block at height {}", - self.block_height.saturating_sub(1) - )); + .unwrap_or_else(|| { + panic!( + "BUG: could not read block at height {}", + self.block_height.saturating_sub(1) + ) + }); let now = BURNCHAIN_TEST_BLOCK_TIME; let block_hash = BurnchainHeaderHash::from_bitcoin_hash( diff --git a/stackslib/src/chainstate/nakamoto/tests/node.rs b/stackslib/src/chainstate/nakamoto/tests/node.rs index 9304079618..783e07901a 100644 --- a/stackslib/src/chainstate/nakamoto/tests/node.rs +++ b/stackslib/src/chainstate/nakamoto/tests/node.rs @@ -1344,7 +1344,7 @@ impl TestPeer<'_> { ); } Err(e) => { - panic!("Failure fetching recipient set: {:?}", e); + panic!("Failure fetching recipient set: {e:?}"); } }; @@ -1368,16 +1368,11 @@ impl TestPeer<'_> { let proof = self .miner .make_proof(&miner_key.public_key, &tip.sortition_hash) - .expect(&format!( - "FATAL: no private key for {}", - miner_key.public_key.to_hex() - )); + .unwrap_or_else(|| panic!("FATAL: no private key for {:?}", miner_key.public_key)); self.sortdb = Some(sortdb); debug!( - "VRF proof made from {} over {}: {}", - &miner_key.public_key.to_hex(), - &tip.sortition_hash, - &proof.to_hex() + "VRF proof made from {:?} over {}: {proof:?}", + miner_key.public_key, &tip.sortition_hash ); proof } diff --git a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs index ff5be1d0e5..abf53791ea 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs @@ -334,10 +334,7 @@ pub fn check_stacking_state_invariants( .burn_header_height; let stacking_state_entry = get_stacking_state_pox(peer, tip, stacker, active_pox_contract) - .expect(&format!( - "Invariant violated: reward-cycle entry has stacker field set, but not present in stacker-state (pox_contract = {})", - active_pox_contract, - )) + .unwrap_or_else(|| panic!("Invariant violated: reward-cycle entry has stacker field set, but not present in stacker-state (pox_contract = {active_pox_contract})")) .expect_tuple().unwrap(); let first_cycle = stacking_state_entry .get("first-reward-cycle") diff --git a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs index 392c6b2cd1..70258254e9 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs @@ -8353,9 +8353,9 @@ fn test_scenario_three(use_nakamoto: bool) { assert_eq!(amount_locked_actual, amount_locked_expected); // Check Bob signer key - let signer_key_expected = Value::buff_from(bob.public_key.to_bytes_compressed()); + let signer_key_expected = Value::buff_from(bob.public_key.to_bytes_compressed()).unwrap(); let signer_key_actual = bob_stack_tx_ok.data_map.get("signer-key").unwrap().clone(); - assert_eq!(signer_key_actual, signer_key_actual); + assert_eq!(signer_key_actual, signer_key_expected); // 5. Check that David can't delegate-stack-stx Eve if delegation expires during lock period let eve_delegate_stx_to_david_err = receipts @@ -10262,7 +10262,7 @@ fn test_scenario_five(use_nakamoto: bool) { for (idx, (stacker, stacker_lock_period)) in davids_stackers.iter().enumerate() { let (pox_address, first_reward_cycle, lock_period, _indices) = get_stacker_info_pox_4(&mut peer, &stacker.principal) - .expect(format!("Failed to find stacker {}", idx).as_str()); + .unwrap_or_else(|| panic!("Failed to find stacker {idx}")); assert_eq!(first_reward_cycle, reward_cycle); assert_eq!(pox_address, david.pox_address); assert_eq!(lock_period, *stacker_lock_period); @@ -10271,7 +10271,7 @@ fn test_scenario_five(use_nakamoto: bool) { for (idx, (stacker, stacker_lock_period)) in eves_stackers.iter().enumerate() { let (pox_address, first_reward_cycle, lock_period, _indices) = get_stacker_info_pox_4(&mut peer, &stacker.principal) - .expect(format!("Failed to find stacker {}", idx).as_str()); + .unwrap_or_else(|| panic!("Failed to find stacker {idx}")); assert_eq!(first_reward_cycle, reward_cycle); assert_eq!(pox_address, eve.pox_address); assert_eq!(lock_period, *stacker_lock_period); diff --git a/stackslib/src/chainstate/stacks/tests/mod.rs b/stackslib/src/chainstate/stacks/tests/mod.rs index 259e2bf949..f550f8b032 100644 --- a/stackslib/src/chainstate/stacks/tests/mod.rs +++ b/stackslib/src/chainstate/stacks/tests/mod.rs @@ -609,10 +609,7 @@ impl TestStacksNode { &miner_key.public_key, &burn_block.parent_snapshot.sortition_hash, ) - .expect(&format!( - "FATAL: no private key for {}", - miner_key.public_key.to_hex() - )); + .unwrap_or_else(|| panic!("FATAL: no private key for {:?}", miner_key.public_key)); let (builder, parent_block_snapshot_opt) = match parent_stacks_block { None => { diff --git a/stackslib/src/config/mod.rs b/stackslib/src/config/mod.rs index a9a03d4861..20851d0144 100644 --- a/stackslib/src/config/mod.rs +++ b/stackslib/src/config/mod.rs @@ -1691,40 +1691,25 @@ pub struct NodeConfig { pub stacker_dbs: Vec, } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Default)] pub enum CostEstimatorName { + #[default] NaivePessimistic, } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Default)] pub enum FeeEstimatorName { + #[default] ScalarFeeRate, FuzzedWeightedMedianFeeRate, } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, Default)] pub enum CostMetricName { + #[default] ProportionDotProduct, } -impl Default for CostEstimatorName { - fn default() -> Self { - CostEstimatorName::NaivePessimistic - } -} - -impl Default for FeeEstimatorName { - fn default() -> Self { - FeeEstimatorName::ScalarFeeRate - } -} - -impl Default for CostMetricName { - fn default() -> Self { - CostMetricName::ProportionDotProduct - } -} - impl CostEstimatorName { fn panic_parse(s: String) -> CostEstimatorName { if &s.to_lowercase() == "naive_pessimistic" { diff --git a/stackslib/src/net/chat.rs b/stackslib/src/net/chat.rs index 2b16a4ac06..94942e0c35 100644 --- a/stackslib/src/net/chat.rs +++ b/stackslib/src/net/chat.rs @@ -52,21 +52,12 @@ use crate::net::{ use crate::util_lib::db::{DBConn, Error as db_error}; // did we or did we not successfully send a message? -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Default)] pub struct NeighborHealthPoint { pub success: bool, pub time: u64, } -impl Default for NeighborHealthPoint { - fn default() -> NeighborHealthPoint { - NeighborHealthPoint { - success: false, - time: 0, - } - } -} - pub const NUM_HEALTH_POINTS: usize = 32; pub const HEALTH_POINT_LIFETIME: u64 = 12 * 3600; // 12 hours diff --git a/stackslib/src/net/download/nakamoto/tenure.rs b/stackslib/src/net/download/nakamoto/tenure.rs index 0f4e3d53cb..a950e1a732 100644 --- a/stackslib/src/net/download/nakamoto/tenure.rs +++ b/stackslib/src/net/download/nakamoto/tenure.rs @@ -345,10 +345,12 @@ impl TenureStartEnd { rc, pox_constants .block_height_to_reward_cycle(first_burn_height, wt_start.burn_height) - .expect(&format!( - "FATAL: tenure from before system start ({} <= {})", - wt_start.burn_height, first_burn_height - )), + .unwrap_or_else(|| { + panic!( + "FATAL: tenure from before system start ({} <= {first_burn_height})", + wt_start.burn_height + ) + }), wt.processed, ); tenure_start_end.fetch_end_block = true; diff --git a/stackslib/src/net/httpcore.rs b/stackslib/src/net/httpcore.rs index a7e96a1912..fe0f6a72ff 100644 --- a/stackslib/src/net/httpcore.rs +++ b/stackslib/src/net/httpcore.rs @@ -1014,10 +1014,9 @@ impl StacksHttp { pub fn set_response_handler(&mut self, request_verb: &str, request_path: &str) { let handler_index = self .find_response_handler(request_verb, request_path) - .expect(&format!( - "FATAL: could not find handler for '{}' '{}'", - request_verb, request_path - )); + .unwrap_or_else(|| { + panic!("FATAL: could not find handler for '{request_verb}' '{request_path}'") + }); self.request_handler_index = Some(handler_index); } diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 762f169e0c..9426fb748b 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -4409,18 +4409,15 @@ pub mod test { let mut stacks_node = self.stacks_node.take().unwrap(); let parent_block_opt = stacks_node.get_last_anchored_block(&self.miner); - let parent_sortition_opt = match parent_block_opt.as_ref() { - Some(parent_block) => { - let ic = sortdb.index_conn(); - SortitionDB::get_block_snapshot_for_winning_stacks_block( - &ic, - &tip.sortition_id, - &parent_block.block_hash(), - ) - .unwrap() - } - None => None, - }; + let parent_sortition_opt = parent_block_opt.as_ref().and_then(|parent_block| { + let ic = sortdb.index_conn(); + SortitionDB::get_block_snapshot_for_winning_stacks_block( + &ic, + &tip.sortition_id, + &parent_block.block_hash(), + ) + .unwrap() + }); let parent_microblock_header_opt = get_last_microblock_header(&stacks_node, &self.miner, parent_block_opt.as_ref()); @@ -4436,10 +4433,7 @@ pub mod test { &last_key.public_key, &burn_block.parent_snapshot.sortition_hash, ) - .expect(&format!( - "FATAL: no private key for {}", - last_key.public_key.to_hex() - )); + .unwrap_or_else(|| panic!("FATAL: no private key for {:?}", last_key.public_key)); let (stacks_block, microblocks) = tenure_builder( &mut self.miner, @@ -4699,10 +4693,9 @@ pub mod test { self.config .burnchain .block_height_to_reward_cycle(block_height) - .expect(&format!( - "Failed to get reward cycle for block height {}", - block_height - )) + .unwrap_or_else(|| { + panic!("Failed to get reward cycle for block height {block_height}") + }) } /// Verify that the sortition DB migration into Nakamoto worked correctly. diff --git a/stackslib/src/net/stackerdb/tests/config.rs b/stackslib/src/net/stackerdb/tests/config.rs index c099b20cad..5f32123b21 100644 --- a/stackslib/src/net/stackerdb/tests/config.rs +++ b/stackslib/src/net/stackerdb/tests/config.rs @@ -525,25 +525,24 @@ fn test_valid_and_invalid_stackerdb_configs() { ) .unwrap() .into(), - ContractName::try_from(format!("test-{}", i)).unwrap(), + ContractName::try_from(format!("test-{i}")).unwrap(), ); peer.with_db_state(|sortdb, chainstate, _, _| { match StackerDBConfig::from_smart_contract(chainstate, sortdb, &contract_id, 32, None) { Ok(config) => { let expected = result .clone() - .expect(&format!("FATAL: parsed a bad contract\n{}", code)); + .unwrap_or_else(|| panic!("FATAL: parsed a bad contract\n{code}")); assert_eq!(config, expected); } Err(net_error::InvalidStackerDBContract(..)) => { assert!( result.is_none(), - "FATAL: valid contract treated as invalid\n{}", - code + "FATAL: valid contract treated as invalid\n{code}" ); } Err(e) => { - panic!("Unexpected error: {:?}", &e); + panic!("Unexpected error: {e:?}"); } } Ok(()) diff --git a/stackslib/src/net/tests/download/nakamoto.rs b/stackslib/src/net/tests/download/nakamoto.rs index 0577ef3019..236e396d0b 100644 --- a/stackslib/src/net/tests/download/nakamoto.rs +++ b/stackslib/src/net/tests/download/nakamoto.rs @@ -1221,11 +1221,7 @@ fn test_tenure_start_end_from_inventory() { // no tenure here assert!( tenure_start_end_opt.is_none(), - "{}", - format!( - "tenure_start_end = {:?}, rc = {}, i = {}, wt = {:?}", - &tenure_start_end_opt, rc, i, &wt - ) + "tenure_start_end = {tenure_start_end_opt:?}, rc = {rc}, i = {i}, wt = {wt:?}" ); } } @@ -1262,7 +1258,7 @@ fn test_tenure_start_end_from_inventory() { let tenure_start_end_opt = available.get(&wt.tenure_id_consensus_hash); if bits .get(i as u16) - .expect(&format!("failed to get bit {}: {:?}", i, &wt)) + .unwrap_or_else(|| panic!("failed to get bit {i}: {wt:?}")) { // this sortition had a tenure let mut j = (i + 1) as u16; @@ -1288,13 +1284,11 @@ fn test_tenure_start_end_from_inventory() { if tenure_start_index.is_some() && tenure_end_index.is_some() { debug!( - "rc = {}, i = {}, tenure_start_index = {:?}, tenure_end_index = {:?}", - rc, i, &tenure_start_index, &tenure_end_index + "rc = {rc}, i = {i}, tenure_start_index = {tenure_start_index:?}, tenure_end_index = {tenure_end_index:?}" ); - let tenure_start_end = tenure_start_end_opt.expect(&format!( - "failed to get tenure_start_end_opt: i = {}, wt = {:?}", - i, &wt - )); + let tenure_start_end = tenure_start_end_opt.unwrap_or_else(|| { + panic!("failed to get tenure_start_end_opt: i = {i}, wt = {wt:?}") + }); assert_eq!( all_tenures[tenure_start_index.unwrap() as usize].winning_block_id, tenure_start_end.start_block_id @@ -1310,11 +1304,7 @@ fn test_tenure_start_end_from_inventory() { // no tenure here assert!( tenure_start_end_opt.is_none(), - "{}", - format!( - "tenure_start_end = {:?}, rc = {}, i = {}, wt = {:?}", - &tenure_start_end_opt, rc, i, &wt - ) + "tenure_start_end = {tenure_start_end_opt:?}, rc = {rc}, i = {i}, wt = {wt:?}" ); } } diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index 16b33ead7a..4c5324411e 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -65,11 +65,20 @@ use crate::run_loop::RegisteredKey; pub static TEST_MINE_STALL: LazyLock> = LazyLock::new(TestFlag::default); #[cfg(test)] /// Test flag to stall block proposal broadcasting -pub static TEST_BROADCAST_STALL: LazyLock> = LazyLock::new(TestFlag::default); +pub static TEST_BROADCAST_PROPOSAL_STALL: LazyLock> = + LazyLock::new(TestFlag::default); #[cfg(test)] +// Test flag to stall the miner from announcing a block while this flag is true pub static TEST_BLOCK_ANNOUNCE_STALL: LazyLock> = LazyLock::new(TestFlag::default); #[cfg(test)] -pub static TEST_SKIP_P2P_BROADCAST: LazyLock> = LazyLock::new(TestFlag::default); +// Test flag to skip broadcasting blocks over the p2p network +pub static TEST_P2P_BROADCAST_SKIP: LazyLock> = LazyLock::new(TestFlag::default); +#[cfg(test)] +// Test flag to stall broadcasting blocks over the p2p network +pub static TEST_P2P_BROADCAST_STALL: LazyLock> = LazyLock::new(TestFlag::default); +#[cfg(test)] +// Test flag to skip pushing blocks to the signers +pub static TEST_BLOCK_PUSH_SKIP: LazyLock> = LazyLock::new(TestFlag::default); /// If the miner was interrupted while mining a block, how long should the /// miner thread sleep before trying again? @@ -252,19 +261,19 @@ impl BlockMinerThread { } #[cfg(test)] - fn fault_injection_block_broadcast_stall(new_block: &NakamotoBlock) { - if TEST_BROADCAST_STALL.get() { + fn fault_injection_block_proposal_stall(new_block: &NakamotoBlock) { + if TEST_BROADCAST_PROPOSAL_STALL.get() { // Do an extra check just so we don't log EVERY time. - warn!("Fault injection: Broadcasting is stalled due to testing directive."; + warn!("Fault injection: Block proposal broadcast is stalled due to testing directive."; "stacks_block_id" => %new_block.block_id(), "stacks_block_hash" => %new_block.header.block_hash(), "height" => new_block.header.chain_length, "consensus_hash" => %new_block.header.consensus_hash ); - while TEST_BROADCAST_STALL.get() { + while TEST_BROADCAST_PROPOSAL_STALL.get() { std::thread::sleep(std::time::Duration::from_millis(10)); } - info!("Fault injection: Broadcasting is no longer stalled due to testing directive."; + info!("Fault injection: Block proposal broadcast is no longer stalled due to testing directive."; "block_id" => %new_block.block_id(), "height" => new_block.header.chain_length, "consensus_hash" => %new_block.header.consensus_hash @@ -273,7 +282,7 @@ impl BlockMinerThread { } #[cfg(not(test))] - fn fault_injection_block_broadcast_stall(_ignored: &NakamotoBlock) {} + fn fault_injection_block_proposal_stall(_ignored: &NakamotoBlock) {} #[cfg(test)] fn fault_injection_block_announce_stall(new_block: &NakamotoBlock) { @@ -301,10 +310,7 @@ impl BlockMinerThread { #[cfg(test)] fn fault_injection_skip_block_broadcast() -> bool { - if TEST_SKIP_P2P_BROADCAST.get() { - return true; - } - false + TEST_P2P_BROADCAST_SKIP.get() } #[cfg(not(test))] @@ -312,6 +318,40 @@ impl BlockMinerThread { false } + #[cfg(test)] + fn fault_injection_block_broadcast_stall(new_block: &NakamotoBlock) { + if TEST_P2P_BROADCAST_STALL.get() { + // Do an extra check just so we don't log EVERY time. + warn!("Fault injection: P2P block broadcast is stalled due to testing directive."; + "stacks_block_id" => %new_block.block_id(), + "stacks_block_hash" => %new_block.header.block_hash(), + "height" => new_block.header.chain_length, + "consensus_hash" => %new_block.header.consensus_hash + ); + while TEST_P2P_BROADCAST_STALL.get() { + std::thread::sleep(std::time::Duration::from_millis(10)); + } + info!("Fault injection: P2P block broadcast is no longer stalled due to testing directive."; + "block_id" => %new_block.block_id(), + "height" => new_block.header.chain_length, + "consensus_hash" => %new_block.header.consensus_hash + ); + } + } + + #[cfg(not(test))] + fn fault_injection_block_broadcast_stall(_ignored: &NakamotoBlock) {} + + #[cfg(test)] + fn fault_injection_skip_block_push() -> bool { + TEST_BLOCK_PUSH_SKIP.get() + } + + #[cfg(not(test))] + fn fault_injection_skip_block_push() -> bool { + false + } + /// Stop a miner tenure by blocking the miner and then joining the tenure thread #[cfg(test)] fn fault_injection_stall_miner() { @@ -516,7 +556,7 @@ impl BlockMinerThread { }; if let Some(mut new_block) = new_block { - Self::fault_injection_block_broadcast_stall(&new_block); + Self::fault_injection_block_proposal_stall(&new_block); let signer_signature = match self.propose_block( coordinator, @@ -532,7 +572,7 @@ impl BlockMinerThread { "block_height" => new_block.header.chain_length, "consensus_hash" => %new_block.header.consensus_hash, ); - return Err(e); + return Ok(()); } NakamotoNodeError::BurnchainTipChanged => { info!("Burnchain tip changed while waiting for signatures"; @@ -739,6 +779,7 @@ impl BlockMinerThread { ); return Ok(()); } + Self::fault_injection_block_broadcast_stall(block); let parent_block_info = NakamotoChainState::get_block_header(chain_state.db(), &block.header.parent_block_id)? @@ -834,6 +875,14 @@ impl BlockMinerThread { let miners_contract_id = boot_code_id(MINERS_NAME, chain_state.mainnet); let mut miners_session = StackerDBSession::new(&rpc_socket.to_string(), miners_contract_id); + if Self::fault_injection_skip_block_push() { + warn!( + "Fault injection: Skipping block push for {}", + block.block_id() + ); + return Ok(()); + } + SignerCoordinator::send_miners_message( miner_privkey, &sort_db, diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index 2138b7e767..0dcbfa04ad 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -288,6 +288,7 @@ impl SignerCoordinator { self.get_block_status( &block.header.signer_signature_hash(), &block.block_id(), + block.header.parent_block_id, chain_state, sortdb, counters, @@ -303,6 +304,7 @@ impl SignerCoordinator { &self, block_signer_sighash: &Sha512Trunc256Sum, block_id: &StacksBlockId, + parent_block_id: StacksBlockId, chain_state: &mut StacksChainState, sortdb: &SortitionDB, counters: &Counters, @@ -319,6 +321,10 @@ impl SignerCoordinator { ) })?; + let parent_tenure_header = + NakamotoChainState::get_block_header(chain_state.db(), &parent_block_id)? + .ok_or(NakamotoNodeError::UnexpectedChainState)?; + // this is used to track the start of the waiting cycle let rejections_timer = Instant::now(); loop { @@ -384,6 +390,18 @@ impl SignerCoordinator { )); } + // Check if a new Stacks block has arrived in the parent tenure + let highest_in_tenure = + NakamotoChainState::get_highest_known_block_header_in_tenure( + &mut chain_state.index_conn(), + &parent_tenure_header.consensus_hash, + )? + .ok_or(NakamotoNodeError::UnexpectedChainState)?; + if highest_in_tenure.index_block_hash() != parent_block_id { + debug!("SignCoordinator: Exiting due to new stacks tip"); + return Err(NakamotoNodeError::StacksTipChanged); + } + continue; } }; diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 3070b6610c..dc2723a3e9 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -98,7 +98,8 @@ use stacks_signer::v0::SpawnedSigner; use super::bitcoin_regtest::BitcoinCoreController; use crate::nakamoto_node::miner::{ - TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL, TEST_MINE_STALL, TEST_SKIP_P2P_BROADCAST, + TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_PROPOSAL_STALL, TEST_MINE_STALL, + TEST_P2P_BROADCAST_SKIP, }; use crate::nakamoto_node::relayer::{RelayerThread, TEST_MINER_THREAD_STALL}; use crate::neon::{Counters, RunLoopCounter}; @@ -5188,7 +5189,7 @@ fn forked_tenure_is_ignored() { // For the next tenure, submit the commit op but do not allow any stacks blocks to be broadcasted. // Stall the miner thread; only wait until the number of submitted commits increases. - TEST_BROADCAST_STALL.set(true); + TEST_BROADCAST_PROPOSAL_STALL.set(true); TEST_BLOCK_ANNOUNCE_STALL.set(true); let blocks_before = mined_blocks.load(Ordering::SeqCst); @@ -5207,7 +5208,7 @@ fn forked_tenure_is_ignored() { // Unpause the broadcast of Tenure B's block, do not submit commits, and do not allow blocks to // be processed test_skip_commit_op.set(true); - TEST_BROADCAST_STALL.set(false); + TEST_BROADCAST_PROPOSAL_STALL.set(false); // Wait for a stacks block to be broadcasted. // However, it will not be processed. @@ -9881,7 +9882,7 @@ fn skip_mining_long_tx() { }) .unwrap(); - TEST_SKIP_P2P_BROADCAST.set(true); + TEST_P2P_BROADCAST_SKIP.set(true); let tx = make_contract_publish( &sender_2_sk, 0, @@ -9908,7 +9909,7 @@ fn skip_mining_long_tx() { }) .unwrap(); - TEST_SKIP_P2P_BROADCAST.set(false); + TEST_P2P_BROADCAST_SKIP.set(false); } else { let transfer_tx = make_stacks_transfer( &sender_1_sk, diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index bc30a105d5..7388349ca6 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -36,6 +36,7 @@ use stacks::chainstate::nakamoto::{NakamotoBlock, NakamotoBlockHeader, NakamotoC use stacks::chainstate::stacks::address::PoxAddress; use stacks::chainstate::stacks::boot::MINERS_NAME; use stacks::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksChainState, StacksHeaderInfo}; +use stacks::chainstate::stacks::miner::{TransactionEvent, TransactionSuccessEvent}; use stacks::chainstate::stacks::{StacksTransaction, TenureChangeCause, TransactionPayload}; use stacks::codec::StacksMessageCodec; use stacks::config::{EventKeyType, EventObserverConfig}; @@ -74,7 +75,8 @@ use tracing_subscriber::{fmt, EnvFilter}; use super::SignerTest; use crate::event_dispatcher::{MinedNakamotoBlockEvent, TEST_SKIP_BLOCK_ANNOUNCEMENT}; use crate::nakamoto_node::miner::{ - TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL, TEST_MINE_STALL, + TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_PROPOSAL_STALL, TEST_MINE_STALL, + TEST_P2P_BROADCAST_STALL, }; use crate::nakamoto_node::stackerdb_listener::TEST_IGNORE_SIGNERS; use crate::neon::Counters; @@ -589,7 +591,7 @@ fn miner_gather_signatures() { // Disable p2p broadcast of the nakamoto blocks, so that we rely // on the signer's using StackerDB to get pushed blocks - nakamoto_node::miner::TEST_SKIP_P2P_BROADCAST.set(true); + nakamoto_node::miner::TEST_P2P_BROADCAST_SKIP.set(true); info!("------------------------- Test Setup -------------------------"); let num_signers = 5; @@ -1020,7 +1022,7 @@ fn forked_tenure_testing( .unwrap(); // For the next tenure, submit the commit op but do not allow any stacks blocks to be broadcasted - TEST_BROADCAST_STALL.set(true); + TEST_BROADCAST_PROPOSAL_STALL.set(true); TEST_BLOCK_ANNOUNCE_STALL.set(true); let blocks_before = mined_blocks.load(Ordering::SeqCst); @@ -1045,7 +1047,7 @@ fn forked_tenure_testing( .running_nodes .nakamoto_test_skip_commit_op .set(true); - TEST_BROADCAST_STALL.set(false); + TEST_BROADCAST_PROPOSAL_STALL.set(false); // Wait for a stacks block to be broadcasted let start_time = Instant::now(); @@ -1965,7 +1967,7 @@ fn miner_forking() { info!("------------------------- RL1 Wins Sortition -------------------------"); info!("Pausing stacks block proposal to force an empty tenure commit from RL2"); - TEST_BROADCAST_STALL.set(true); + TEST_BROADCAST_PROPOSAL_STALL.set(true); let rl1_commits_before = commits_submitted_rl1.load(Ordering::SeqCst); let burn_height_before = get_burn_height(); @@ -2028,7 +2030,7 @@ fn miner_forking() { // unblock block mining let blocks_len = test_observer::get_blocks().len(); - TEST_BROADCAST_STALL.set(false); + TEST_BROADCAST_PROPOSAL_STALL.set(false); // Wait for the block to be broadcasted and processed wait_for(30, || Ok(test_observer::get_blocks().len() > blocks_len)) @@ -2115,7 +2117,7 @@ fn miner_forking() { info!("------------------------- RL1 RBFs its Own Commit -------------------------"); info!("Pausing stacks block proposal to test RBF capability"); - TEST_BROADCAST_STALL.set(true); + TEST_BROADCAST_PROPOSAL_STALL.set(true); let rl1_commits_before = commits_submitted_rl1.load(Ordering::SeqCst); info!("Unpausing commits from RL1"); @@ -2153,7 +2155,7 @@ fn miner_forking() { let rl1_commits_before = commits_submitted_rl1.load(Ordering::SeqCst); // unblock block mining let blocks_len = test_observer::get_blocks().len(); - TEST_BROADCAST_STALL.set(false); + TEST_BROADCAST_PROPOSAL_STALL.set(false); // Wait for the block to be broadcasted and processed wait_for(30, || Ok(test_observer::get_blocks().len() > blocks_len)) @@ -3350,7 +3352,7 @@ fn empty_sortition() { signer_test.boot_to_epoch_3(); - TEST_BROADCAST_STALL.set(true); + TEST_BROADCAST_PROPOSAL_STALL.set(true); info!("------------------------- Test Mine Regular Tenure A -------------------------"); let commits_before = signer_test @@ -3397,7 +3399,7 @@ fn empty_sortition() { .unwrap(); info!("Pausing stacks block proposal to force an empty tenure"); - TEST_BROADCAST_STALL.set(true); + TEST_BROADCAST_PROPOSAL_STALL.set(true); info!("Pausing commit op to prevent tenure C from starting..."); signer_test @@ -3430,7 +3432,7 @@ fn empty_sortition() { std::thread::sleep(block_proposal_timeout.add(Duration::from_secs(1))); - TEST_BROADCAST_STALL.set(false); + TEST_BROADCAST_PROPOSAL_STALL.set(false); info!("------------------------- Test Delayed Block is Rejected -------------------------"); let reward_cycle = signer_test.get_current_reward_cycle(); @@ -4562,7 +4564,7 @@ fn duplicate_signers() { // Disable p2p broadcast of the nakamoto blocks, so that we rely // on the signer's using StackerDB to get pushed blocks - nakamoto_node::miner::TEST_SKIP_P2P_BROADCAST.set(true); + nakamoto_node::miner::TEST_P2P_BROADCAST_SKIP.set(true); info!("------------------------- Test Setup -------------------------"); let num_signers = 5; @@ -11712,7 +11714,7 @@ fn reorg_attempts_activity_timeout_exceeded() { let block_proposal_n = wait_for_block_proposal().expect("Failed to get block proposal N"); let chain_after = get_chain_info(&signer_test.running_nodes.conf); assert_eq!(chain_after, chain_before); - TEST_BROADCAST_STALL.set(true); + TEST_BROADCAST_PROPOSAL_STALL.set(true); info!("------------------------- Start Tenure B -------------------------"); let commits_before = signer_test @@ -11750,7 +11752,7 @@ fn reorg_attempts_activity_timeout_exceeded() { // Make sure to wait the reorg_attempts_activity_timeout AFTER the block is globally signed over // as this is the point where signers start considering from. std::thread::sleep(reorg_attempts_activity_timeout.add(Duration::from_secs(1))); - TEST_BROADCAST_STALL.set(false); + TEST_BROADCAST_PROPOSAL_STALL.set(false); let block_proposal_n_prime = wait_for_block_proposal().expect("Failed to get block proposal N'"); assert_eq!( @@ -11758,7 +11760,7 @@ fn reorg_attempts_activity_timeout_exceeded() { chain_after.stacks_tip_height ); // Make sure that no subsequent proposal arrives before the block_proposal_timeout is exceeded - TEST_BROADCAST_STALL.set(true); + TEST_BROADCAST_PROPOSAL_STALL.set(true); TEST_VALIDATE_STALL.set(false); // We only need to wait the difference between the two timeouts now since we already slept for a min of reorg_attempts_activity_timeout + 1 std::thread::sleep(block_proposal_timeout.saturating_sub(reorg_attempts_activity_timeout)); @@ -11775,7 +11777,7 @@ fn reorg_attempts_activity_timeout_exceeded() { info!("------------------------- Wait for Block N+1 Proposal -------------------------"); test_observer::clear(); - TEST_BROADCAST_STALL.set(false); + TEST_BROADCAST_PROPOSAL_STALL.set(false); wait_for(30, || { let block_proposal_n_1 = wait_for_block_proposal().expect("Failed to get block proposal N+1"); @@ -12344,7 +12346,7 @@ fn block_proposal_timeout() { signer_test.boot_to_epoch_3(); // Pause the miner's block proposals - TEST_BROADCAST_STALL.set(true); + TEST_BROADCAST_PROPOSAL_STALL.set(true); let wait_for_block_proposal = || { let mut block_proposal = None; @@ -12389,7 +12391,7 @@ fn block_proposal_timeout() { test_observer::clear(); info!("------------------------- Attempt Mine Block N -------------------------"); - TEST_BROADCAST_STALL.set(false); + TEST_BROADCAST_PROPOSAL_STALL.set(false); let block_proposal_n = wait_for_block_proposal().expect("Failed to get block proposal N"); @@ -13063,3 +13065,438 @@ fn tenure_extend_cost_threshold() { signer_test.shutdown(); } + +#[test] +#[ignore] +/// Test that a miner that begins mining before seeing the last block of the +/// previous tenure can be interrupted when its tip advances to the last block, +/// then successfully mine a block on top of that block. +/// +/// Test Setup: +/// The test spins up five stacks signers, one miner Nakamoto node, and a corresponding bitcoind. +/// The stacks node is then advanced to Epoch 3.0 boundary to allow block signing. +/// +/// Test Execution: +/// Miner 1 mines a tenure change block, then mines a second block, block N, +/// but the signers will not broadcast it, and the miner will stall before +/// broadcasting. Miner 2 wins the next sortition and proposes a block N', +/// since it has not seen N, but signers are ignoring proposals so that it is +/// not rejected. Miner 1 then announces N. Miner 2 sees N, stops waiting +/// for signatures on N' and submits a new proposal, N+1, which is accepted. +/// Finally a new tenure arrives and N+2 is mined. +/// +/// Test Assertion: +/// Stacks tip advances to N+1' +fn interrupt_miner_on_new_stacks_tip() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let sender_sk = Secp256k1PrivateKey::random(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); + + let btc_miner_1_seed = vec![1, 1, 1, 1]; + let btc_miner_2_seed = vec![2, 2, 2, 2]; + let btc_miner_1_pk = Keychain::default(btc_miner_1_seed.clone()).get_pub_key(); + let btc_miner_2_pk = Keychain::default(btc_miner_2_seed.clone()).get_pub_key(); + + let node_1_rpc = gen_random_port(); + let node_1_p2p = gen_random_port(); + let node_2_rpc = gen_random_port(); + let node_2_p2p = gen_random_port(); + + let localhost = "127.0.0.1"; + let node_1_rpc_bind = format!("{localhost}:{node_1_rpc}"); + let node_2_rpc_bind = format!("{localhost}:{node_2_rpc}"); + let mut node_2_listeners = Vec::new(); + + // partition the signer set so that ~half are listening and using node 1 for RPC and events, + // and the rest are using node 2 + + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr, (send_amt + send_fee) * 2)], + |signer_config| { + let node_host = if signer_config.endpoint.port() % 2 == 0 { + &node_1_rpc_bind + } else { + &node_2_rpc_bind + }; + signer_config.node_host = node_host.to_string(); + // we're deliberately stalling proposals: don't punish this in this test! + signer_config.block_proposal_timeout = Duration::from_secs(240); + // make sure that we don't allow forking due to burn block timing + signer_config.first_proposal_burn_block_timing = Duration::from_secs(60); + }, + |config| { + config.node.rpc_bind = format!("{localhost}:{node_1_rpc}"); + config.node.p2p_bind = format!("{localhost}:{node_1_p2p}"); + config.node.data_url = format!("http://{localhost}:{node_1_rpc}"); + config.node.p2p_address = format!("{localhost}:{node_1_p2p}"); + + config.node.seed = btc_miner_1_seed.clone(); + config.node.local_peer_seed = btc_miner_1_seed.clone(); + config.burnchain.local_mining_public_key = Some(btc_miner_1_pk.to_hex()); + config.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[1])); + config.node.pox_sync_sample_secs = 30; + config.miner.block_commit_delay = Duration::from_secs(0); + config.miner.tenure_cost_limit_per_block_percentage = None; + config.miner.block_rejection_timeout_steps = [(0, Duration::from_secs(1200))].into(); + + config.events_observers.retain(|listener| { + match std::net::SocketAddr::from_str(&listener.endpoint) { + Ok(addr) => { + if addr.port() % 2 == 0 && addr.port() != test_observer::EVENT_OBSERVER_PORT { + return true; + } + + node_2_listeners.push(listener.clone()); + addr.port() == test_observer::EVENT_OBSERVER_PORT + } + Err(_) => { + warn!( + "Cannot parse {} to a socket, assuming it isn't a signer-listener binding", + listener.endpoint + ); + true + } + } + }) + }, + Some(vec![btc_miner_1_pk, btc_miner_2_pk]), + None, + ); + let conf = signer_test.running_nodes.conf.clone(); + let mut conf_node_2 = conf.clone(); + conf_node_2.node.rpc_bind = node_2_rpc_bind; + conf_node_2.node.p2p_bind = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.data_url = format!("http://{localhost}:{node_2_rpc}"); + conf_node_2.node.p2p_address = format!("{localhost}:{node_2_p2p}"); + conf_node_2.node.seed = btc_miner_2_seed.clone(); + conf_node_2.burnchain.local_mining_public_key = Some(btc_miner_2_pk.to_hex()); + conf_node_2.node.local_peer_seed = btc_miner_2_seed; + conf_node_2.node.miner = true; + conf_node_2.events_observers.clear(); + conf_node_2.events_observers.extend(node_2_listeners); + conf_node_2.miner.mining_key = Some(Secp256k1PrivateKey::from_seed(&[2])); + assert!(!conf_node_2.events_observers.is_empty()); + + let node_1_sk = Secp256k1PrivateKey::from_seed(&conf.node.local_peer_seed); + let node_1_pk = StacksPublicKey::from_private(&node_1_sk); + + conf_node_2.node.working_dir = format!("{}-1", conf_node_2.node.working_dir); + + conf_node_2.node.set_bootstrap_nodes( + format!("{}@{}", &node_1_pk.to_hex(), conf.node.p2p_bind), + conf.burnchain.chain_id, + conf.burnchain.peer_version, + ); + + let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); + let Counters { + naka_skip_commit_op: skip_commit_op_rl2, + naka_submitted_commits: commits_submitted_rl2, + naka_submitted_commit_last_burn_height: commits_submitted_rl2_last_burn_height, + naka_proposed_blocks: proposed_blocks_rl2, + .. + } = run_loop_2.counters(); + let _run_loop_2_thread = thread::Builder::new() + .name("run_loop_2".into()) + .spawn(move || run_loop_2.start(None, 0)) + .unwrap(); + + let all_signers: Vec<_> = signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .collect(); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + + signer_test.boot_to_epoch_3(); + + wait_for(120, || { + let Some(node_1_info) = get_chain_info_opt(&conf) else { + return Ok(false); + }; + let Some(node_2_info) = get_chain_info_opt(&conf_node_2) else { + return Ok(false); + }; + Ok(node_1_info.stacks_tip_height == node_2_info.stacks_tip_height) + }) + .expect("Timed out waiting for boostrapped node to catch up to the miner"); + + let commits_submitted_rl1 = signer_test.running_nodes.commits_submitted.clone(); + let commits_submitted_rl1_last_burn_height = + signer_test.running_nodes.last_commit_burn_height.clone(); + let skip_commit_op_rl1 = signer_test + .running_nodes + .nakamoto_test_skip_commit_op + .clone(); + + let mining_pk_1 = StacksPublicKey::from_private(&conf.miner.mining_key.unwrap()); + let mining_pk_2 = StacksPublicKey::from_private(&conf_node_2.miner.mining_key.unwrap()); + let mining_pkh_1 = Hash160::from_node_public_key(&mining_pk_1); + let mining_pkh_2 = Hash160::from_node_public_key(&mining_pk_2); + debug!("The mining key for miner 1 is {mining_pkh_1}"); + debug!("The mining key for miner 2 is {mining_pkh_2}"); + + let sortdb = conf.get_burnchain().open_sortition_db(true).unwrap(); + let get_burn_height = || { + SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) + .unwrap() + .block_height + }; + + let wait_for_chains = || { + wait_for(30, || { + let Some(chain_info_1) = get_chain_info_opt(&conf) else { + return Ok(false); + }; + let Some(chain_info_2) = get_chain_info_opt(&conf_node_2) else { + return Ok(false); + }; + Ok(chain_info_1.burn_block_height == chain_info_2.burn_block_height) + }) + }; + info!("------------------------- Reached Epoch 3.0 -------------------------"); + + info!("Pausing both miners' block commit submissions"); + skip_commit_op_rl1.set(true); + skip_commit_op_rl2.set(true); + + info!("Flushing any pending commits to enable custom winner selection"); + let burn_height_before = get_burn_height(); + let blocks_before = test_observer::get_blocks().len(); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 30, + || { + Ok(get_burn_height() > burn_height_before + && test_observer::get_blocks().len() > blocks_before) + }, + ) + .unwrap(); + + info!("------------------------- RL1 Wins Sortition -------------------------"); + let rl1_commits_before = commits_submitted_rl1.load(Ordering::SeqCst); + let burn_height_before = get_burn_height(); + + info!("Unpausing commits from RL1"); + skip_commit_op_rl1.set(false); + + info!("Waiting for commits from RL1"); + wait_for(30, || { + Ok( + commits_submitted_rl1.load(Ordering::SeqCst) > rl1_commits_before + && commits_submitted_rl1_last_burn_height.load(Ordering::SeqCst) + >= burn_height_before, + ) + }) + .expect("Timed out waiting for miner 1 to submit a commit op"); + + info!("Pausing commits from RL1"); + skip_commit_op_rl1.set(true); + + let burn_height_before = get_burn_height(); + info!("Mine RL1 Tenure"); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 30, + || Ok(get_burn_height() > burn_height_before), + ) + .unwrap(); + let burn_height_after = get_burn_height(); + + wait_for_chains().expect("Timed out waiting for Rl1 and Rl2 chains to advance"); + let sortdb = conf.get_burnchain().open_sortition_db(true).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + // make sure the tenure was won by RL1 + assert!(tip.sortition, "No sortition was won"); + assert_eq!( + tip.miner_pk_hash.unwrap(), + mining_pkh_1, + "RL1 did not win the sortition" + ); + + // Wait for RL1 to mine the tenure change block + wait_for(30, || { + Ok( + test_observer::get_blocks().last().unwrap()["burn_block_height"] + .as_u64() + .unwrap() + == burn_height_after, + ) + }) + .expect("Timed out waiting for RL1 to mine the tenure change block"); + + // Make the miner stall before broadcasting the block once it has been approved + TEST_P2P_BROADCAST_STALL.set(true); + // Make the signers not broadcast the block once it has been approved + TEST_SKIP_BLOCK_BROADCAST.set(true); + + // submit a tx so that the miner will mine a stacks block + let sender_nonce = 0; + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + let tx = submit_tx(&http_origin, &transfer_tx); + info!("Submitted tx {tx} in to mine block N"); + + // Wait for the block with this transfer to be accepted + wait_for(30, || { + Ok(test_observer::get_mined_nakamoto_blocks() + .last() + .unwrap() + .tx_events + .iter() + .any(|t| { + let TransactionEvent::Success(TransactionSuccessEvent { txid, .. }) = t else { + return false; + }; + txid.to_hex() == tx + })) + }) + .expect("Timed out waiting for the transfer tx to be mined"); + + let blocks = test_observer::get_mined_nakamoto_blocks(); + let block_n = blocks.last().expect("No blocks mined"); + signer_test + .wait_for_block_acceptance(30, &block_n.signer_signature_hash, &all_signers) + .expect("Timed out waiting for block acceptance of N"); + info!("Block N is {}", block_n.stacks_height); + + info!("------------------------- RL2 Wins Sortition -------------------------"); + let rl2_commits_before = commits_submitted_rl2.load(Ordering::SeqCst); + let burn_height_before = get_burn_height(); + + info!("Unpausing commits from RL2"); + skip_commit_op_rl2.set(false); + + info!("Waiting for commits from RL2"); + wait_for(30, || { + Ok( + commits_submitted_rl2.load(Ordering::SeqCst) > rl2_commits_before + && commits_submitted_rl2_last_burn_height.load(Ordering::SeqCst) + >= burn_height_before, + ) + }) + .expect("Timed out waiting for miner 2 to submit a commit op"); + + info!("Pausing commits from RL2"); + skip_commit_op_rl2.set(true); + + info!("Make signers ignore all block proposals, so that they don't reject it quickly"); + TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(all_signers.clone()); + + let burn_height_before = get_burn_height(); + let proposals_before = proposed_blocks_rl2.load(Ordering::SeqCst); + info!("Mine RL2 Tenure"); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 30, + || Ok(get_burn_height() > burn_height_before), + ) + .unwrap(); + + wait_for_chains().expect("Timed out waiting for Rl1 and Rl2 chains to advance"); + let sortdb = conf.get_burnchain().open_sortition_db(true).unwrap(); + let tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + // make sure the tenure was won by RL2 + assert!(tip.sortition, "No sortition was won"); + assert_eq!( + tip.miner_pk_hash.unwrap(), + mining_pkh_2, + "RL2 did not win the sortition" + ); + + info!("------------------------- RL2 Proposes Block N' -------------------------"); + + wait_for(30, || { + Ok(proposed_blocks_rl2.load(Ordering::SeqCst) > proposals_before) + }) + .expect("Timed out waiting for the block proposal from RL2"); + + info!("------------------------- Block N is Announced -------------------------"); + + TEST_BROADCAST_PROPOSAL_STALL.set(true); + TEST_P2P_BROADCAST_STALL.set(false); + let proposals_before = proposed_blocks_rl2.load(Ordering::SeqCst); + + // Wait for RL2's tip to advance to the last block + wait_for(30, || { + let Some(chain_info_2) = get_chain_info_opt(&conf_node_2) else { + return Ok(false); + }; + Ok(chain_info_2.stacks_tip_height == block_n.stacks_height) + }) + .expect("Timed out waiting for RL2 to advance to block N"); + + info!("------------------------- RL2 Proposes Block N+1 -------------------------"); + // Miner 2 should be interrupted from waiting for N' to be accepted when it sees N + + info!("Stop signers from ignoring proposals"); + TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(Vec::new()); + TEST_BROADCAST_PROPOSAL_STALL.set(false); + + wait_for(30, || { + Ok(proposed_blocks_rl2.load(Ordering::SeqCst) > proposals_before) + }) + .expect("Timed out waiting for the new block proposal from RL2"); + + info!("------------------------- Signers Accept Block N+1 -------------------------"); + + wait_for(30, || { + let Some(chain_info_2) = get_chain_info_opt(&conf_node_2) else { + return Ok(false); + }; + Ok(chain_info_2.stacks_tip_height == block_n.stacks_height + 1) + }) + .expect("Timed out waiting for RL2 to advance to block N+1"); + + info!("------------------------- Next Tenure Builds on N+1 -------------------------"); + + let rl1_commits_before = commits_submitted_rl1.load(Ordering::SeqCst); + let rl2_commits_before = commits_submitted_rl2.load(Ordering::SeqCst); + + skip_commit_op_rl1.set(false); + skip_commit_op_rl2.set(false); + + // Wait for both miners to submit block commits + wait_for(30, || { + Ok( + commits_submitted_rl1.load(Ordering::SeqCst) > rl1_commits_before + && commits_submitted_rl2.load(Ordering::SeqCst) > rl2_commits_before, + ) + }) + .expect("Timed out waiting for miners to submit block commits"); + + next_block_and_process_new_stacks_block( + &mut signer_test.running_nodes.btc_regtest_controller, + 30, + &signer_test.running_nodes.coord_channel, + ) + .expect("Timed out waiting for the next block to be mined"); + + wait_for(30, || { + let Some(chain_info) = get_chain_info_opt(&conf) else { + return Ok(false); + }; + Ok(chain_info.stacks_tip_height == block_n.stacks_height + 2) + }) + .expect("Timed out waiting for height to advance to block N+2"); + + wait_for_chains().expect("Timed out waiting for Rl2 to reach N+2"); + + info!("------------------------- Shutdown -------------------------"); + signer_test.shutdown(); +}